diff --git a/.clang-tidy.in b/.clang-tidy.in
index 31cd6eb31372c..838c09d8873ad 100644
--- a/.clang-tidy.in
+++ b/.clang-tidy.in
@@ -26,6 +26,7 @@ Checks: '-*,
bugprone-undelegated-constructor,
hicpp-static-assert,
hicpp-undelegated-constructor,
+ misc-static-assert,
misc-uniqueptr-reset-release,
modernize-avoid-bind,
modernize-deprecated-headers,
@@ -33,18 +34,28 @@ Checks: '-*,
modernize-replace-random-shuffle,
modernize-shrink-to-fit,
modernize-unary-static-assert,
+ mongo-assert-check,
mongo-cctype-check,
+ mongo-config-header-check,
+ mongo-collection-sharding-runtime-check,
+ mongo-cxx20-banned-includes-check,
+ mongo-cxx20-std-chrono-check,
mongo-header-bracket-check,
- mongo-std-atomic-check,
+ mongo-macro-definition-leaks-check,
mongo-mutex-check,
- mongo-assert-check,
+ mongo-polyfill-check,
+ mongo-rand-check,
+ mongo-std-atomic-check,
mongo-std-optional-check,
+ mongo-trace-check,
mongo-uninterruptible-lock-guard-check,
+ mongo-unstructured-log-check,
mongo-volatile-check,
- mongo-trace-check,
+ mongo-fcv-constant-check,
performance-faster-string-find,
performance-implicit-conversion-in-loop,
performance-inefficient-algorithm,
+ performance-no-automatic-move,
bugprone-signed-char-misuse,
bugprone-suspicious-string-compare,
performance-for-range-copy,
@@ -94,7 +105,6 @@ Checks: '-*,
-misc-misplaced-const,
-misc-non-copyable-objects,
-misc-redundant-expression,
- -misc-static-assert,
-misc-throw-by-value-catch-by-reference,
-misc-unconventional-assign-operator,
-misc-unused-alias-decls,
@@ -126,6 +136,8 @@ CheckOptions:
value: assert
- key: mongo-header-bracket-check.mongoSourceDirs
value: 'src/mongo;@MONGO_BUILD_DIR@'
+ - key: mongo-collection-sharding-runtime-check.exceptionDirs
+ value: 'src/mongo/db/s'
- key: bugprone-assert-side-effect.CheckFunctionCalls
value: '0'
- key: bugprone-dangling-handle.HandleClasses
diff --git a/.gdbinit b/.gdbinit
index 6a43ac3fe00d5..2259ac16e39d4 100644
--- a/.gdbinit
+++ b/.gdbinit
@@ -13,3 +13,6 @@ source buildscripts/gdb/mongo_lock.py
# Load methods for printing in-memory contents of WT tables.
source buildscripts/gdb/wt_dump_table.py
+
+# Load third-party pretty printers
+source src/third_party/immer/dist/tools/gdb_pretty_printers/autoload.py
diff --git a/.gitignore b/.gitignore
index ce851fdf7b29b..efe5d4811e7a7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,6 +13,8 @@
venv
*~
+*.test_iwyu.h
+*.test_iwyu.cpp
*.swp
*.o
*.os
@@ -44,6 +46,7 @@ venv
*.eslintcache
*#
.#*
+iwyu.dat
/src/mongo/*/*Debug*/
/src/mongo/*/*/*Debug*/
@@ -60,6 +63,7 @@ venv
/src/third_party/*/*.lastbuildstate
/buildscripts/libdeps/graph_visualizer_web_stack/build
/buildscripts/libdeps/graph_visualizer_web_stack/node_modules
+buildscripts/iwyu/test/*/test_run
libdeps.graphml
build-metrics.json
config.log
@@ -82,20 +86,26 @@ scratch
# binaries
/docgen*
/loadgen*
+/mongoed*
+/mongogrid*
+/mongoperf*
+/mongoshim*
+/mongosniff*
+/mongotrafficreader*
+
+# binaries from db-contrib-tool
+/ksdecode*
/mongo*
+/mongoauditdecrypt*
/mongobridge*
/mongocryptd*
/mongod*
-/mongoed*
-/mongogrid*
+/mongodecrypt*
/mongokerberos*
/mongoldap*
-/mongoperf*
+/mongoqd*
/mongos*
-/mongoshim*
-/mongosniff*
/mongotmock*
-/mongotrafficreader*
/mqlrun*
/wt*
@@ -258,3 +268,6 @@ dist-test/
# node extra stuff (for someone installing eslint)
node_modules/
package-lock.json
+
+# jstestfuzz generated test directory
+jstestfuzz/
diff --git a/CreativeCommons.txt b/CreativeCommons.txt
index 60efd96a51d45..1b3c7c92156ad 100644
--- a/CreativeCommons.txt
+++ b/CreativeCommons.txt
@@ -189,4 +189,4 @@ ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
licensed here. There are no understandings, agreements or representations with respect to the
Work not specified here. Licensor shall not be bound by any additional provisions that may
appear in any communication from You. This License may not be modified without the mutual
- written agreement of the Licensor and You.
\ No newline at end of file
+ written agreement of the Licensor and You.
diff --git a/README.md b/README.md
index 491f729ce246e..b441e5a6116e8 100644
--- a/README.md
+++ b/README.md
@@ -30,12 +30,12 @@ Welcome to MongoDB!
To run a single server database:
```bash
- $ sudo mkdir -p /data/db
- $ ./mongod
- $
- $ # The mongo javascript shell connects to localhost and test database by default:
- $ ./mongo
- > help
+ $ sudo mkdir -p /data/db
+ $ ./mongod
+ $
+ $ # The mongo javascript shell connects to localhost and test database by default:
+ $ ./mongo
+ > help
```
## Installing Compass
@@ -43,7 +43,7 @@ Welcome to MongoDB!
You can install compass using the `install_compass` script packaged with MongoDB:
```bash
- $ ./install_compass
+ $ ./install_compass
```
This will download the appropriate MongoDB Compass package for your platform
@@ -66,9 +66,9 @@ Welcome to MongoDB!
## Learn MongoDB
- Documentation - https://docs.mongodb.com/manual/
- Developer Center - https://www.mongodb.com/developer/
- MongoDB University - https://learn.mongodb.com
+ - Documentation - https://docs.mongodb.com/manual/
+ - Developer Center - https://www.mongodb.com/developer/
+ - MongoDB University - https://learn.mongodb.com
## Cloud Hosted MongoDB
diff --git a/README.third_party.md b/README.third_party.md
index 8454a55dad89d..f267ef8b6c637 100644
--- a/README.third_party.md
+++ b/README.third_party.md
@@ -31,6 +31,7 @@ a notice will be included in
| [GPerfTools] | BSD-3-Clause | 2.9.1 | | ✗ |
| [gRPC] | Apache-2.0 | 1.46.6 | | ✗ |
| [ICU4] | ICU | 57.1 | ✗ | ✗ |
+| [immer] | BSL-1.0 | d98a68c | | ✗ |
| [Intel Decimal FP Library] | BSD-3-Clause | 2.0 Update 1 | | ✗ |
| [JSON-Schema-Test-Suite] | MIT | 728066f9c5 | | |
| [libstemmer] | BSD-3-Clause | Unknown | ✗ | ✗ |
@@ -53,11 +54,10 @@ a notice will be included in
| [Unicode] | Unicode-DFS-2015 | 8.0.0 | ✗ | ✗ |
| [libunwind] | MIT | 1.6.2 + changes | | ✗ |
| [Valgrind] | BSD-3-Clause\[1] | 3.17.0 | | ✗ |
-| [variant] | BSL-1.0 | 1.4.0 | | ✗ |
| [wiredtiger] | | \[2] | ✗ | ✗ |
| [yaml-cpp] | MIT | 0.6.2 | | ✗ |
| [Zlib] | Zlib | 1.2.13 | ✗ | ✗ |
-| [Zstandard] | BSD-3-Clause | 1.5.2 | ✗ | ✗ |
+| [Zstandard] | BSD-3-Clause | 1.5.5 | ✗ | ✗ |
[abseil-cpp]: https://github.com/abseil/abseil-cpp
[ASIO]: https://github.com/chriskohlhoff/asio
@@ -66,6 +66,7 @@ a notice will be included in
[fmt]: http://fmtlib.net/
[GPerfTools]: https://github.com/gperftools/gperftools
[ICU4]: http://site.icu-project.org/download/
+[immer]: https://github.com/arximboldi/immer
[Intel Decimal FP Library]: https://software.intel.com/en-us/articles/intel-decimal-floating-point-math-library
[JSON-Schema-Test-Suite]: https://github.com/json-schema-org/JSON-Schema-Test-Suite
[libstemmer]: https://github.com/snowballstem/snowball
@@ -86,7 +87,6 @@ a notice will be included in
[Unicode]: http://www.unicode.org/versions/enumeratedversions.html
[libunwind]: http://www.nongnu.org/libunwind/
[Valgrind]: http://valgrind.org/downloads/current.html
-[variant]: https://github.com/mpark/variant
[wiredtiger]: https://github.com/wiredtiger/wiredtiger
[yaml-cpp]: https://github.com/jbeder/yaml-cpp/releases
[Zlib]: https://zlib.net/
diff --git a/SConstruct b/SConstruct
index d1ef83dbb4467..e692f889c92eb 100644
--- a/SConstruct
+++ b/SConstruct
@@ -23,8 +23,6 @@ from pkg_resources import parse_version
import SCons
import SCons.Script
-from mongo_tooling_metrics.client import get_mongo_metrics_client
-from mongo_tooling_metrics.errors import ExternalHostException
from mongo_tooling_metrics.lib.top_level_metrics import SConsToolingMetrics
from site_scons.mongo import build_profiles
@@ -422,6 +420,12 @@ add_option(
nargs=0,
)
+add_option(
+ 'wait-for-debugger',
+ help='Wait for debugger attach on process startup',
+ nargs=0,
+)
+
add_option(
'gcov',
help='compile with flags for gcov',
@@ -593,8 +597,8 @@ add_option(
add_option(
"cxx-std",
- choices=["17", "20"],
- default="17",
+ choices=["20"],
+ default="20",
help="Select the C++ language standard to build with",
)
@@ -616,6 +620,13 @@ add_option(
help="Specify variables files to load.",
)
+add_option(
+ 'streams-release-build',
+ default=False,
+ action='store_true',
+ help='If set, will include the enterprise streams module in a release build.',
+)
+
link_model_choices = ['auto', 'object', 'static', 'dynamic', 'dynamic-strict', 'dynamic-sdk']
add_option(
'link-model',
@@ -910,6 +921,7 @@ def variable_tools_converter(val):
"mongo_integrationtest",
"mongo_unittest",
"mongo_libfuzzer",
+ "mongo_pretty_printer_tests",
"textfile",
]
@@ -1634,6 +1646,8 @@ envDict = dict(
# TODO: Move unittests.txt to $BUILD_DIR, but that requires
# changes to MCI.
UNITTEST_LIST='$BUILD_ROOT/unittests.txt',
+ PRETTY_PRINTER_TEST_ALIAS='install-pretty-printer-tests',
+ PRETTY_PRINTER_TEST_LIST='$BUILD_ROOT/pretty_printer_tests.txt',
LIBFUZZER_TEST_ALIAS='install-fuzzertests',
LIBFUZZER_TEST_LIST='$BUILD_ROOT/libfuzzer_tests.txt',
INTEGRATION_TEST_ALIAS='install-integration-tests',
@@ -1659,22 +1673,13 @@ env.AddMethod(lambda env, name, **kwargs: add_option(name, **kwargs), 'AddOption
# The placement of this is intentional. Here we setup an atexit method to store tooling metrics.
# We should only register this function after env, env_vars and the parser have been properly initialized.
-try:
- metrics_client = get_mongo_metrics_client()
- metrics_client.register_metrics(
- SConsToolingMetrics,
- utc_starttime=datetime.utcnow(),
- artifact_dir=env.Dir('$BUILD_DIR').get_abspath(),
- env_vars=env_vars,
- env=env,
- parser=_parser,
- )
-except ExternalHostException as _:
- pass
-except Exception as _:
- print(
- "This MongoDB Virtual Workstation could not connect to the internal cluster\nThis is a non-issue, but if this message persists feel free to reach out in #server-dev-platform"
- )
+SConsToolingMetrics.register_metrics(
+ utc_starttime=datetime.utcnow(),
+ artifact_dir=env.Dir('$BUILD_DIR').get_abspath(),
+ env_vars=env_vars,
+ env=env,
+ parser=_parser,
+)
if get_option('build-metrics'):
env['BUILD_METRICS_ARTIFACTS_DIR'] = '$BUILD_ROOT/$VARIANT_DIR'
@@ -2021,12 +2026,16 @@ if env.get('ENABLE_OOM_RETRY'):
': out of memory',
'virtual memory exhausted: Cannot allocate memory',
': fatal error: Killed signal terminated program cc1',
+ # TODO: SERVER-77322 remove this non memory related ICE.
+ r'during IPA pass: cp.+g\+\+: internal compiler error',
+ 'ld terminated with signal 9',
]
elif env.ToolchainIs('msvc'):
env['OOM_RETRY_MESSAGES'] = [
'LNK1102: out of memory',
'C1060: compiler is out of heap space',
- 'LNK1171: unable to load mspdbcore.dll',
+ 'c1xx : fatal error C1063: INTERNAL COMPILER ERROR',
+ r'LNK1171: unable to load mspdbcore\.dll',
"LNK1201: error writing to program database ''",
]
env['OOM_RETRY_RETURNCODES'] = [1102]
@@ -3250,12 +3259,6 @@ if not env.TargetOSIs('windows', 'macOS') and (env.ToolchainIs('GCC', 'clang')):
for flag_value in env[search_variable]):
env.Append(CCFLAGS=[f'{targeting_flag}{targeting_flag_value}'])
-# Needed for auth tests since key files are stored in git with mode 644.
-if not env.TargetOSIs('windows'):
- for keysuffix in ["1", "2", "ForRollover"]:
- keyfile = "jstests/libs/key%s" % keysuffix
- os.chmod(keyfile, stat.S_IWUSR | stat.S_IRUSR)
-
# boostSuffixList is used when using system boost to select a search sequence
# for boost libraries.
boostSuffixList = ["-mt", ""]
@@ -3666,6 +3669,9 @@ def doConfigure(myenv):
# Don't issue warnings about potentially evaluated expressions
myenv.AddToCCFLAGSIfSupported("-Wno-potentially-evaluated-expression")
+ # SERVER-76472 we don't try to maintain ABI so disable warnings about possible ABI issues.
+ myenv.AddToCCFLAGSIfSupported("-Wno-psabi")
+
# Warn about moves of prvalues, which can inhibit copy elision.
myenv.AddToCXXFLAGSIfSupported("-Wpessimizing-move")
@@ -3705,15 +3711,6 @@ def doConfigure(myenv):
# only) flag that turns it on.
myenv.AddToCXXFLAGSIfSupported("-Wunused-exception-parameter")
- # TODO(SERVER-60151): Avoid the dilemma identified in
- # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100493. Unfortunately,
- # we don't have a more targeted warning suppression we can use
- # other than disabling all deprecation warnings. We will
- # revisit this once we are fully on C++20 and can commit the
- # C++20 style code.
- if get_option('cxx-std') == "20":
- myenv.AddToCXXFLAGSIfSupported('-Wno-deprecated')
-
# TODO SERVER-58675 - Remove this suppression after abseil is upgraded
myenv.AddToCXXFLAGSIfSupported("-Wno-deprecated-builtins")
@@ -3827,6 +3824,9 @@ def doConfigure(myenv):
usingLibStdCxx = False
if has_option('libc++'):
+ # TODO SERVER-54659 - ASIO depends on std::result_of which was removed in C++ 20
+ myenv.Append(CPPDEFINES=["ASIO_HAS_STD_INVOKE_RESULT"])
+
if not myenv.ToolchainIs('clang'):
myenv.FatalError('libc++ is currently only supported for clang')
if myenv.AddToCXXFLAGSIfSupported('-stdlib=libc++'):
@@ -3859,37 +3859,18 @@ def doConfigure(myenv):
conf.Finish()
if myenv.ToolchainIs('msvc'):
- if get_option('cxx-std') == "17":
- myenv.AppendUnique(CCFLAGS=['/std:c++17',
- '/Zc:lambda']) # /Zc:lambda is implied by /std:c++20
- elif get_option('cxx-std') == "20":
+ if get_option('cxx-std') == "20":
myenv.AppendUnique(CCFLAGS=['/std:c++20'])
else:
- if get_option('cxx-std') == "17":
- if not myenv.AddToCXXFLAGSIfSupported('-std=c++17'):
- myenv.ConfError('Compiler does not honor -std=c++17')
- elif get_option('cxx-std') == "20":
+ if get_option('cxx-std') == "20":
if not myenv.AddToCXXFLAGSIfSupported('-std=c++20'):
myenv.ConfError('Compiler does not honor -std=c++20')
if not myenv.AddToCFLAGSIfSupported('-std=c11'):
- myenv.ConfError("C++17 mode selected for C++ files, but can't enable C11 for C files")
+ myenv.ConfError("C++20 mode selected for C++ files, but can't enable C11 for C files")
if using_system_version_of_cxx_libraries():
- print('WARNING: System versions of C++ libraries must be compiled with C++17 support')
-
- def CheckCxx17(context):
- test_body = """
- #if __cplusplus < 201703L
- #error
- #endif
- namespace NestedNamespaceDecls::AreACXX17Feature {};
- """
-
- context.Message('Checking for C++17... ')
- ret = context.TryCompile(textwrap.dedent(test_body), ".cpp")
- context.Result(ret)
- return ret
+ print('WARNING: System versions of C++ libraries must be compiled with C++20 support')
def CheckCxx20(context):
test_body = """
@@ -3909,15 +3890,12 @@ def doConfigure(myenv):
myenv,
help=False,
custom_tests={
- 'CheckCxx17': CheckCxx17,
'CheckCxx20': CheckCxx20,
},
)
- if get_option('cxx-std') == "17" and not conf.CheckCxx17():
- myenv.ConfError('C++17 support is required to build MongoDB')
- elif get_option('cxx-std') == "20" and not conf.CheckCxx20():
- myenv.ConfError('C++20 support was not detected')
+ if get_option('cxx-std') == "20" and not conf.CheckCxx20():
+ myenv.ConfError('C++20 support is required to build MongoDB')
conf.Finish()
@@ -4390,6 +4368,11 @@ def doConfigure(myenv):
"Cannot use libunwind with TSAN, please add --use-libunwind=off to your compile flags"
)
+ # We add supressions based on the library file in etc/tsan.suppressions
+ # so the link-model needs to be dynamic.
+ if not link_model.startswith('dynamic'):
+ env.FatalError("TSAN is only supported with dynamic link models")
+
# If anything is changed, added, or removed in
# tsan_options, be sure to make the corresponding changes
# to the appropriate build variants in etc/evergreen.yml
@@ -6038,9 +6021,37 @@ env.AddPackageNameAlias(
name="mh-debugsymbols",
)
+env.AutoInstall(
+ target='$PREFIX',
+ source='$PRETTY_PRINTER_TEST_LIST',
+ AIB_ROLE='runtime',
+ AIB_COMPONENT='pretty-printer-tests',
+ AIB_COMPONENTS_EXTRA=['dist-test'],
+)
+
env['RPATH_ESCAPED_DOLLAR_ORIGIN'] = '\\$$$$ORIGIN'
+def isSupportedStreamsPlatform(thisEnv):
+ # TODO https://jira.mongodb.org/browse/SERVER-74961: Support other platforms.
+ # linux x86 and ARM64 are supported.
+ return thisEnv.TargetOSIs('linux') and \
+ thisEnv['TARGET_ARCH'] in ('x86_64', 'aarch64') \
+ and ssl_provider == 'openssl'
+
+
+def shouldBuildStreams(thisEnv):
+ if releaseBuild:
+ # The streaming enterprise module and dependencies are only included in release builds.
+ # when streams-release-build is set.
+ return get_option('streams-release-build') and isSupportedStreamsPlatform(thisEnv)
+ else:
+ return isSupportedStreamsPlatform(thisEnv)
+
+
+env.AddMethod(shouldBuildStreams, 'ShouldBuildStreams')
+
+
def prefix_libdir_rpath_generator(env, source, target, for_signature):
# If the PREFIX_LIBDIR has an absolute path, we will use that directly as
# RPATH because that indicates the final install destination of the libraries.
@@ -6194,7 +6205,7 @@ sconslinters = env.Command(
lint_py = env.Command(
target="#lint-lint.py",
- source=["buildscripts/quickcpplint.py"],
+ source=["buildscripts/quickmongolint.py"],
action="$PYTHON ${SOURCES[0]} lint",
)
@@ -6364,7 +6375,7 @@ if get_option('ninja') == 'disabled':
compileCommands = env.CompilationDatabase('compile_commands.json')
# Initialize generated-sources Alias as a placeholder so that it can be used as a
# dependency for compileCommands. This Alias will be properly updated in other SConscripts.
- env.Requires(compileCommands, env.Alias("generated-sources"))
+ env.Depends(compileCommands, env.Alias("generated-sources"))
compileDb = env.Alias("compiledb", compileCommands)
msvc_version = ""
@@ -6426,6 +6437,41 @@ if env.get('UNITTESTS_COMPILE_CONCURRENCY'):
source_file_regex=r"^.*_test\.cpp$",
)
+first_half_flag = False
+
+
+def half_source_emitter(target, source, env):
+ global first_half_flag
+ if first_half_flag:
+ first_half_flag = False
+ if not 'conftest' in str(target[0]) and not str(source[0]).endswith('_test.cpp'):
+ env.Alias('compile_first_half_non_test_source', target)
+ else:
+ first_half_flag = True
+ return target, source
+
+
+# Cribbed from Tool/cc.py and Tool/c++.py. It would be better if
+# we could obtain this from SCons.
+_CSuffixes = [".c"]
+if not SCons.Util.case_sensitive_suffixes(".c", ".C"):
+ _CSuffixes.append(".C")
+
+_CXXSuffixes = [".cpp", ".cc", ".cxx", ".c++", ".C++"]
+if SCons.Util.case_sensitive_suffixes(".c", ".C"):
+ _CXXSuffixes.append(".C")
+
+for object_builder in SCons.Tool.createObjBuilders(env):
+ emitterdict = object_builder.builder.emitter
+ for suffix in emitterdict.keys():
+ if not suffix in _CSuffixes + _CXXSuffixes:
+ continue
+ base = emitterdict[suffix]
+ emitterdict[suffix] = SCons.Builder.ListEmitter([
+ base,
+ half_source_emitter,
+ ])
+
# Keep this late in the game so that we can investigate attributes set by all the tools that have run.
if has_option("cache"):
if get_option("cache") == "nolinked":
diff --git a/buildscripts/antithesis/base_images/workload/Dockerfile b/buildscripts/antithesis/base_images/workload/Dockerfile
index b8d46508252f4..d47e99d07c6ae 100644
--- a/buildscripts/antithesis/base_images/workload/Dockerfile
+++ b/buildscripts/antithesis/base_images/workload/Dockerfile
@@ -14,12 +14,11 @@ RUN debconf-set-selections /tmp/preseed.txt
RUN rm /tmp/preseed.txt
RUN apt-get update
+RUN DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true apt-get install -qy git-all wget build-essential checkinstall libreadline-gplv2-dev libncursesw5-dev libssl-dev libsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev libffi-dev zlib1g-dev
RUN apt-get install -qy libcurl4 libgssapi-krb5-2 libldap-2.4-2 libwrap0 libsasl2-2 libsasl2-modules libsasl2-modules-gssapi-mit openssl liblzma5 libssl-dev build-essential software-properties-common
-RUN add-apt-repository ppa:deadsnakes/ppa
-RUN apt-get update
# installs that need to be forced to be non-interactive: python 3.9 and git
-RUN DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true apt-get install -qy python3.9 python3.9-dev python3.9-venv git-all
+RUN bash -c "cd /opt && wget https://www.python.org/ftp/python/3.9.16/Python-3.9.16.tgz && tar xzf Python-3.9.16.tgz && cd Python-3.9.16 && ./configure --enable-optimizations && make altinstall && rm -f /opt/Python-3.9.6.tgz"
# -------------------
# Everything above this line should be common image setup
diff --git a/buildscripts/antithesis_suite.py b/buildscripts/antithesis_suite.py
index bdf3a346070af..9c555cb0d4f76 100755
--- a/buildscripts/antithesis_suite.py
+++ b/buildscripts/antithesis_suite.py
@@ -15,6 +15,9 @@
"CleanEveryN",
"ContinuousStepdown",
"CheckOrphansDeleted",
+ # TODO SERVER-70396 re-enable hook once the checkMetadata feature flag is removed
+ # To check the feature flag we need to contact directly the config server that is not exposed in the ExternalFixture
+ "CheckMetadataConsistencyInBackground",
]
_SUITES_PATH = os.path.join("buildscripts", "resmokeconfig", "suites")
diff --git a/buildscripts/apply_clang_tidy_fixes.py b/buildscripts/apply_clang_tidy_fixes.py
index 9735a662097ef..bb0eab5b0ff82 100755
--- a/buildscripts/apply_clang_tidy_fixes.py
+++ b/buildscripts/apply_clang_tidy_fixes.py
@@ -53,10 +53,15 @@ def main():
# perform the swap replacement of the binary data
file_bytes = bytearray(file_bytes)
+ adjustments = 0
for replacement in fixes[recorded_md5]['replacements']:
- file_bytes[replacement['Offset']:replacement['Offset'] +
+
+ file_bytes[replacement['Offset'] + adjustments:replacement['Offset'] + adjustments +
replacement['Length']] = replacement['ReplacementText'].encode()
+ if replacement['Length'] != len(replacement['ReplacementText']):
+ adjustments += len(replacement['ReplacementText']) - replacement['Length']
+
with open(fixes[recorded_md5]['filepath'], 'wb') as fout:
fout.write(bytes(file_bytes))
diff --git a/buildscripts/backports_required_for_multiversion_tests_deduplicator.py b/buildscripts/backports_required_for_multiversion_tests_deduplicator.py
index ede797d4a5a8f..4ff42e9069d02 100644
--- a/buildscripts/backports_required_for_multiversion_tests_deduplicator.py
+++ b/buildscripts/backports_required_for_multiversion_tests_deduplicator.py
@@ -12,19 +12,20 @@
#
# Usage:
# Add the server ticket number and the path to the test file for the test you intend to denylist
-# under the appropriate suite. Any test in a (ticket, test_file) pair that appears in this file but
+# under the appropriate multiversion branch. Any test in a (ticket, test_file) pair that appears in this file but
# not in the last-lts or last-continuous branch version of this file indicates that a commit has
# not yet been backported to the last-lts or last-continuous branch and will be excluded from the
# multiversion suite corresponding to the root level suite key.
#
-# Example: To prevent 'my_test_file.js' from running in the 'replica_sets_multiversion' suite with the last-continuous binary
-# replica_sets_multiversion:
-# - ticket: SERVER-1000
-# test_file: jstests/core/my_test_file.js
+# Example: To prevent 'my_test_file.js' from running with the last-continuous binary
+# last-continuous:
+# all:
+# - test_file: jstests/core/my_test_file.js
+# ticket: SERVER-1000
#
# The above example will denylist jstests/core/my_test_file.js from the
-# 'replica_sets_multiversion_gen' task until this file has been updated with the same
-# (ticket, test_file) pair on the last-lts branch.
+# last-continuous branch until this file has been updated with the same
+# (ticket, test_file) pair on the last-continuous branch.
#
"""
diff --git a/buildscripts/blackduck_hub.py b/buildscripts/blackduck_hub.py
index 025bbb1cbf0bb..16b6458e6fde0 100644
--- a/buildscripts/blackduck_hub.py
+++ b/buildscripts/blackduck_hub.py
@@ -1133,9 +1133,8 @@ def _verify_components_in_yaml(self):
for mcomp in self.third_party_components:
# These components are known to be missing from Black Duck
# Aladdin MD5 is a pair of C files for MD5 computation
- # timelib is simply missing
# Unicode is not code
- if mcomp.name in ["Aladdin MD5", "timelib", "unicode"]:
+ if mcomp.name in ["Aladdin MD5", "unicode"]:
continue
if mcomp.name not in comp_names:
diff --git a/buildscripts/burn_in_tests.py b/buildscripts/burn_in_tests.py
index f2ff469137fd7..231747e35a6a8 100755
--- a/buildscripts/burn_in_tests.py
+++ b/buildscripts/burn_in_tests.py
@@ -1,6 +1,8 @@
#!/usr/bin/env python3
"""Command line utility for determining what jstests have been added or modified."""
+import collections
import copy
+import json
import logging
import os.path
import shlex
@@ -48,6 +50,8 @@
SELECTOR_FILE = "etc/burn_in_tests.yml"
SUITE_FILES = ["with_server"]
+BURN_IN_TEST_MEMBERSHIP_FILE = "burn_in_test_membership_map_file_for_ci.json"
+
SUPPORTED_TEST_KINDS = ("fsm_workload_test", "js_test", "json_schema_test",
"multi_stmt_txn_passthrough", "parallel_fsm_workload_test",
"all_versions_js_test")
@@ -182,7 +186,13 @@ def create_executor_list(suites, exclude_suites):
parameter. Returns a dict keyed by suite name / executor, value is tests
to run under that executor.
"""
- test_membership = create_test_membership_map(test_kind=SUPPORTED_TEST_KINDS)
+ try:
+ with open(BURN_IN_TEST_MEMBERSHIP_FILE) as file:
+ test_membership = collections.defaultdict(list, json.load(file))
+ LOGGER.info(f"Using cached test membership file {BURN_IN_TEST_MEMBERSHIP_FILE}.")
+ except FileNotFoundError:
+ LOGGER.info("Getting test membership data.")
+ test_membership = create_test_membership_map(test_kind=SUPPORTED_TEST_KINDS)
memberships = defaultdict(list)
for suite in suites:
@@ -611,7 +621,12 @@ def burn_in(self, repos: List[Repo], build_variant: str) -> None:
self.burn_in_executor.execute(tests_by_task)
-@click.command(context_settings=dict(ignore_unknown_options=True))
+@click.group()
+def cli():
+ pass
+
+
+@cli.command(context_settings=dict(ignore_unknown_options=True))
@click.option("--no-exec", "no_exec", default=False, is_flag=True,
help="Do not execute the found tests.")
@click.option("--build-variant", "build_variant", default=DEFAULT_VARIANT, metavar='BUILD_VARIANT',
@@ -635,11 +650,11 @@ def burn_in(self, repos: List[Repo], build_variant: str) -> None:
@click.option("--evg-project-file", "evg_project_file", default=DEFAULT_EVG_PROJECT_FILE,
help="Evergreen project config file")
@click.argument("resmoke_args", nargs=-1, type=click.UNPROCESSED)
-def main(build_variant: str, no_exec: bool, repeat_tests_num: Optional[int],
- repeat_tests_min: Optional[int], repeat_tests_max: Optional[int],
- repeat_tests_secs: Optional[int], resmoke_args: str, verbose: bool,
- origin_rev: Optional[str], install_dir: Optional[str], use_yaml: bool,
- evg_project_file: Optional[str]) -> None:
+def run(build_variant: str, no_exec: bool, repeat_tests_num: Optional[int],
+ repeat_tests_min: Optional[int], repeat_tests_max: Optional[int],
+ repeat_tests_secs: Optional[int], resmoke_args: str, verbose: bool,
+ origin_rev: Optional[str], install_dir: Optional[str], use_yaml: bool,
+ evg_project_file: Optional[str]) -> None:
"""
Run new or changed tests in repeated mode to validate their stability.
@@ -695,5 +710,27 @@ def main(build_variant: str, no_exec: bool, repeat_tests_num: Optional[int],
burn_in_orchestrator.burn_in(repos, build_variant)
+@cli.command()
+def generate_test_membership_map_file_for_ci():
+ """
+ Generate a file to cache test membership data for CI.
+
+ This command should only be used in CI. The task generator runs many iterations of this script
+ for many build variants. The bottleneck is that creating the test membership file takes a long time.
+ Instead, we can cache this data & reuse it in CI for a significant speedup.
+
+ Run this command in CI before running the burn in task generator.
+ """
+ _configure_logging(False)
+ buildscripts.resmokelib.parser.set_run_options()
+
+ LOGGER.info("Generating burn_in test membership mapping file.")
+ test_membership = create_test_membership_map(test_kind=SUPPORTED_TEST_KINDS)
+ with open(BURN_IN_TEST_MEMBERSHIP_FILE, "w") as file:
+ json.dump(test_membership, file)
+ LOGGER.info(
+ f"Finished writing burn_in test membership mapping to {BURN_IN_TEST_MEMBERSHIP_FILE}")
+
+
if __name__ == "__main__":
- main() # pylint: disable=no-value-for-parameter
+ cli()
diff --git a/buildscripts/ciconfig/evergreen.py b/buildscripts/ciconfig/evergreen.py
index 6c52a5f55d4a6..a3a028caabf18 100644
--- a/buildscripts/ciconfig/evergreen.py
+++ b/buildscripts/ciconfig/evergreen.py
@@ -7,6 +7,10 @@
import datetime
import distutils.spawn
+import os
+import subprocess
+import sys
+import time
from typing import Set, List, Optional
import yaml
@@ -21,17 +25,22 @@ def parse_evergreen_file(path, evergreen_binary="evergreen"):
"""Read an Evergreen file and return EvergreenProjectConfig instance."""
if evergreen_binary:
if not distutils.spawn.find_executable(evergreen_binary):
- raise EnvironmentError(
- "Executable '{}' does not exist or is not in the PATH.".format(evergreen_binary))
+ default_evergreen_location = os.path.expanduser(os.path.join("~", "evergreen"))
+ if os.path.exists(default_evergreen_location):
+ evergreen_binary = default_evergreen_location
+ elif os.path.exists(f"{default_evergreen_location}.exe"):
+ evergreen_binary = f"{default_evergreen_location}.exe"
+ else:
+ raise EnvironmentError(
+ "Executable '{}' does not exist or is not in the PATH.".format(
+ evergreen_binary))
# Call 'evergreen evaluate path' to pre-process the project configuration file.
- cmd = runcommand.RunCommand(evergreen_binary)
- cmd.add("evaluate")
- cmd.add_file(path)
- error_code, output = cmd.execute()
- if error_code:
- raise RuntimeError("Unable to evaluate {}: {}".format(path, output))
- config = yaml.safe_load(output)
+ cmd = [evergreen_binary, "evaluate", path]
+ result = subprocess.run(cmd, capture_output=True, text=True)
+ if result.returncode:
+ raise RuntimeError("Unable to evaluate {}: {}".format(path, result.stdout))
+ config = yaml.safe_load(result.stdout)
else:
with open(path, "r") as fstream:
config = yaml.safe_load(fstream)
@@ -59,6 +68,7 @@ def __init__(self, conf):
self.distro_names = set()
for variant in self.variants:
self.distro_names.update(variant.distro_names)
+ self.functions = self._conf["functions"]
@property
def task_names(self) -> List[str]:
@@ -320,7 +330,7 @@ def task_names(self):
def is_required_variant(self) -> bool:
"""Return True if the variant is a required variant."""
- return self.display_name.startswith("! ")
+ return self.display_name.startswith("!")
def get_task(self, task_name):
"""Return the task with the given name as an instance of VariantTask.
diff --git a/buildscripts/clang_format.py b/buildscripts/clang_format.py
index ac768880b9f62..974bb3fc4e22d 100755
--- a/buildscripts/clang_format.py
+++ b/buildscripts/clang_format.py
@@ -264,7 +264,8 @@ def is_interesting_file(file_name):
"""Return true if this file should be checked."""
return (file_name.startswith("jstests")
or file_name.startswith("src") and not file_name.startswith("src/third_party/")
- and not file_name.startswith("src/mongo/gotools/")) and FILES_RE.search(file_name)
+ and not file_name.startswith("src/mongo/gotools/")
+ and not file_name.startswith("src/streams/third_party")) and FILES_RE.search(file_name)
def get_list_from_lines(lines):
diff --git a/buildscripts/clang_tidy.py b/buildscripts/clang_tidy.py
index 4b7c021f8b237..417885785ddfd 100755
--- a/buildscripts/clang_tidy.py
+++ b/buildscripts/clang_tidy.py
@@ -186,6 +186,11 @@ def main():
# A few special cases of files to ignore
if not file_doc["file"].startswith("src/mongo/"):
continue
+
+ # Don't run clang_tidy on the streams/third_party code.
+ if file_doc["file"].startswith("src/mongo/db/modules/enterprise/src/streams/third_party"):
+ continue
+
# TODO SERVER-49884 Remove this when we no longer check in generated Bison.
if file_doc["file"].endswith("/parser_gen.cpp"):
continue
diff --git a/buildscripts/config_diff.py b/buildscripts/config_diff.py
index f82522cd21a2b..2f4ad8eef0498 100755
--- a/buildscripts/config_diff.py
+++ b/buildscripts/config_diff.py
@@ -3,6 +3,8 @@
The comparison is computed by scanning though `base_version_dirs` and `incremented_version_dirs` looking for all configs and setParameters in each tree.
It then compares these looking for additions, removals, and deltas. Finally it outputs a summary to the console.
+
+This comparison does not currently support nested properties is as it does only simple string comparison on key:property pairs - see build_diff_fn as a means of extending the comparison capability in the future.
"""
import argparse
@@ -95,7 +97,7 @@ def _compare_and_partition(self, yaml_props: dict, yaml_file_name: str) -> None:
# present in the base version properties, but not in the incremented version properties,
# which means they were removed in the incremented version
in_both_prop = self.properties_diff.removed.pop(compare_key)
- changed_properties = self.calc_diff(yaml_val, in_both_prop)
+ changed_properties = self.calc_diff(in_both_prop, yaml_val)
if len(changed_properties) > 0:
self.properties_diff.modified[compare_key] = changed_properties
@@ -248,7 +250,7 @@ def test_yaml_obj_filters_comparison_types_correctly(self):
short_name: networkMessageCompressors
default: 'snappy,zstd,zlib'
"""
- yaml_obj = yaml.load(document)
+ yaml_obj = yaml.load(document, Loader=yaml.FullLoader)
fixture = BuildBasePropertiesForComparisonHandler(ComparisonType.SERVER_PARAMETERS)
fixture.handle(yaml_obj, filename)
@@ -271,7 +273,7 @@ def test_empty_yaml_obj_does_nothing(self):
cpp_namespace: "mongo"
"""
- yaml_obj = yaml.load(document)
+ yaml_obj = yaml.load(document, Loader=yaml.FullLoader)
fixture = BuildBasePropertiesForComparisonHandler(ComparisonType.SERVER_PARAMETERS)
fixture.handle(yaml_obj, filename)
@@ -321,7 +323,7 @@ def test_yaml_obj_filtered_correctly(self):
default: 'zlib'
"""
- inc_yaml_obj = yaml.load(document)
+ inc_yaml_obj = yaml.load(document, Loader=yaml.FullLoader)
inc_fixture = ComputeDiffsFromIncrementedVersionHandler(ComparisonType.CONFIGS, {},
self.config_diff_function)
@@ -367,7 +369,7 @@ def test_added_works_correctly(self):
default: 'snappy,zstd,zlib'
"""
- inc_yaml_obj = yaml.load(document)
+ inc_yaml_obj = yaml.load(document, Loader=yaml.FullLoader)
inc_fixture = ComputeDiffsFromIncrementedVersionHandler(ComparisonType.CONFIGS, {},
self.config_diff_function)
@@ -400,7 +402,7 @@ def test_removed_works_correctly(self):
def get_base_data():
return {("ok", "test.yaml"): {"yes": "no"}, ("also_ok", "blah.yaml"): {"no": "yes"}}
- inc_yaml_obj = yaml.load(document)
+ inc_yaml_obj = yaml.load(document, Loader=yaml.FullLoader)
inc_fixture = ComputeDiffsFromIncrementedVersionHandler(ComparisonType.CONFIGS,
get_base_data(),
@@ -430,7 +432,7 @@ def get_base_data():
self.assertTrue(len(properties_diffs.added) == 0)
self.assertTrue(len(properties_diffs.modified) == 0)
- def test_modified_works_correctly(self):
+ def test_empty_modified_works_correctly(self):
filename = "test.yaml"
document = """
server_parameters:
@@ -465,8 +467,7 @@ def test_modified_works_correctly(self):
short_name: networkMessageCompressors
default: 'snappy,zstd,zlib'
"""
-
- inc_yaml_obj = yaml.load(document)
+ inc_yaml_obj = yaml.load(document, Loader=yaml.FullLoader)
inc_fixture = ComputeDiffsFromIncrementedVersionHandler(ComparisonType.CONFIGS, {},
build_diff_fn(['default']))
@@ -488,6 +489,184 @@ def test_modified_works_correctly(self):
self.assertTrue(len(properties_diffs.removed) == 0)
self.assertTrue(len(properties_diffs.modified) == 0)
+ def test_not_modified_between_yamls_reports_correctly(self):
+ filename = "test.yaml"
+ document = """
+ server_parameters:
+ testOptions:
+ description: "Cluster server parameter for change stream options"
+ set_at: cluster
+ cpp_class:
+ name: ChangeStreamOptionsParameter
+ override_set: true
+ override_validate: true
+
+ testParameter:
+ description: "Some parameter"
+ set_at: cluster
+ cpp_class:
+ name: ChangeStreamOptionsParameter
+ override_set: true
+ override_validate: true
+
+ configs:
+ "asdf":
+ description: 'Comma-separated list of compressors to use for network messages'
+ source: [ cli, ini, yaml ]
+ arg_vartype: String
+ short_name: networkMessageCompressors
+ default: 'snappy,zstd,zlib'
+
+ "zxcv":
+ description: 'Comma-separated list of compressors to use for network messages'
+ source: [ cli, ini, yaml ]
+ arg_vartype: String
+ short_name: networkMessageCompressors
+ default: 'snappy,zstd,zlib'
+ """
+
+ document_inc = document
+
+ document_yaml = yaml.load(document, Loader=yaml.FullLoader)
+ document_inc_yaml = yaml.load(document_inc, Loader=yaml.FullLoader)
+
+ diff_fn = build_diff_fn(_COMPARE_FIELDS_CONFIGS)
+
+ config_base_properties_handler = BuildBasePropertiesForComparisonHandler(
+ ComparisonType.CONFIGS)
+ config_base_properties_handler.handle(document_yaml, filename)
+
+ config_inc_properties_handler = ComputeDiffsFromIncrementedVersionHandler(
+ ComparisonType.CONFIGS, config_base_properties_handler.properties, diff_fn)
+ config_inc_properties_handler.handle(document_inc_yaml, filename)
+
+ property_diff = config_inc_properties_handler.properties_diff
+
+ self.assertEqual(0, len(property_diff.modified))
+
+ diff_fn = build_diff_fn(_COMPARE_FIELDS_SERVER_PARAMETERS)
+
+ sp_base_properties_handler = BuildBasePropertiesForComparisonHandler(
+ ComparisonType.SERVER_PARAMETERS)
+ sp_base_properties_handler.handle(document_yaml, filename)
+
+ sp_inc_properties_handler = ComputeDiffsFromIncrementedVersionHandler(
+ ComparisonType.SERVER_PARAMETERS, sp_base_properties_handler.properties, diff_fn)
+ sp_inc_properties_handler.handle(document_inc_yaml, filename)
+
+ property_diff = sp_inc_properties_handler.properties_diff
+
+ self.assertEqual(0, len(property_diff.modified))
+
+ def test_modified_between_yamls_reports_correctly(self):
+ filename = "test.yaml"
+ document = """
+ server_parameters:
+ testOptions:
+ description: "Cluster server parameter for change stream options"
+ set_at: cluster
+ cpp_class:
+ name: ChangeStreamOptionsParameter
+ override_set: true
+ override_validate: true
+
+ testParameter:
+ description: "Some parameter"
+ set_at: cluster
+ cpp_class:
+ name: ChangeStreamOptionsParameter
+ override_set: true
+ override_validate: true
+
+ configs:
+ "asdf":
+ description: 'Comma-separated list of compressors to use for network messages'
+ source: [ cli, ini, yaml ]
+ arg_vartype: String
+ short_name: networkMessageCompressors
+ default: 'snappy,zstd,zlib'
+
+ "zxcv":
+ description: 'Comma-separated list of compressors to use for network messages'
+ source: [ cli, ini, yaml ]
+ arg_vartype: String
+ short_name: networkMessageCompressors
+ default: 'snappy,zstd,zlib'
+ """
+
+ document_inc = """
+ server_parameters:
+ testOptions:
+ description: "Cluster server parameter for change stream options"
+ set_at: runtime
+ cpp_class:
+ name: ChangeStreamOptionsParameter
+ override_set: true
+ override_validate: true
+
+ testParameter:
+ description: "Some parameter"
+ set_at: cluster
+ cpp_class:
+ name: ChangeStreamOptionsParameter
+ override_set: true
+ override_validate: true
+
+ configs:
+ "asdf":
+ description: 'Comma-separated list of compressors to use for network messages'
+ source: [ cli, ini, yaml ]
+ arg_vartype: int
+ short_name: networkMessageCompressors
+ default: 'snappy,zstd,zlib'
+
+ "zxcv":
+ description: 'Comma-separated list of compressors to use for network messages'
+ source: [ cli, ini, yaml ]
+ arg_vartype: String
+ short_name: networkMessageCompressors
+ default: 'snappy,zstd,zlib'
+ """
+
+ document_yaml = yaml.load(document, Loader=yaml.FullLoader)
+ document_inc_yaml = yaml.load(document_inc, Loader=yaml.FullLoader)
+
+ diff_fn = build_diff_fn(_COMPARE_FIELDS_CONFIGS)
+
+ config_base_properties_handler = BuildBasePropertiesForComparisonHandler(
+ ComparisonType.CONFIGS)
+ config_base_properties_handler.handle(document_yaml, filename)
+
+ config_inc_properties_handler = ComputeDiffsFromIncrementedVersionHandler(
+ ComparisonType.CONFIGS, config_base_properties_handler.properties, diff_fn)
+ config_inc_properties_handler.handle(document_inc_yaml, filename)
+
+ property_diff = config_inc_properties_handler.properties_diff
+
+ self.assertEqual(
+ property_diff.modified.get(("asdf", filename)).get("arg_vartype").base, "String")
+ self.assertEqual(
+ property_diff.modified.get(("asdf", filename)).get("arg_vartype").inc, "int")
+ self.assertIsNone(property_diff.modified.get(("zxcv", filename)))
+
+ diff_fn = build_diff_fn(_COMPARE_FIELDS_SERVER_PARAMETERS)
+
+ sp_base_properties_handler = BuildBasePropertiesForComparisonHandler(
+ ComparisonType.SERVER_PARAMETERS)
+ sp_base_properties_handler.handle(document_yaml, filename)
+
+ sp_inc_properties_handler = ComputeDiffsFromIncrementedVersionHandler(
+ ComparisonType.SERVER_PARAMETERS, sp_base_properties_handler.properties, diff_fn)
+ sp_inc_properties_handler.handle(document_inc_yaml, filename)
+
+ property_diff = sp_inc_properties_handler.properties_diff
+
+ self.assertEqual(
+ property_diff.modified.get(("testOptions", filename)).get("set_at").base, "cluster")
+ self.assertEqual(
+ property_diff.modified.get(("testOptions", filename)).get("set_at").inc, "runtime")
+ self.assertIsNone(property_diff.modified.get(("testParameter", filename)))
+
class TestPropertiesDiffFunction(unittest.TestCase):
def test_empty_returns_empty(self):
diff --git a/buildscripts/cost_model/mongod-inmemory.yaml b/buildscripts/cost_model/mongod-inmemory.yaml
index cb0789fb7e7cc..14e2dfdd0cbd3 100644
--- a/buildscripts/cost_model/mongod-inmemory.yaml
+++ b/buildscripts/cost_model/mongod-inmemory.yaml
@@ -7,6 +7,6 @@ systemLog:
logAppend: false
setParameter:
featureFlagCommonQueryFramework: true
- internalQueryFrameworkControl: "tryBonsai"
+ internalQueryFrameworkControl: "tryBonsaiExperimental"
internalMeasureQueryExecutionTimeInNanoseconds: true
enableTestCommands: 1
diff --git a/buildscripts/cost_model/mongod.yaml b/buildscripts/cost_model/mongod.yaml
index c60835883fda8..bc21abb30be8b 100644
--- a/buildscripts/cost_model/mongod.yaml
+++ b/buildscripts/cost_model/mongod.yaml
@@ -6,6 +6,6 @@ systemLog:
logAppend: false
setParameter:
featureFlagCommonQueryFramework: true
- internalQueryFrameworkControl: "tryBonsai"
+ internalQueryFrameworkControl: "tryBonsaiExperimental"
internalMeasureQueryExecutionTimeInNanoseconds: true
enableTestCommands: 1
diff --git a/buildscripts/debugsymb_mapper.py b/buildscripts/debugsymb_mapper.py
index d85ad64d9c9e0..f09418cac5f16 100644
--- a/buildscripts/debugsymb_mapper.py
+++ b/buildscripts/debugsymb_mapper.py
@@ -157,19 +157,20 @@ class Mapper:
default_web_service_base_url: str = "https://symbolizer-service.server-tig.prod.corp.mongodb.com"
default_cache_dir = os.path.join(os.getcwd(), 'build', 'symbols_cache')
- selected_binaries = ('mongos.debug', 'mongod.debug', 'mongo.debug')
+ selected_binaries = ('mongos', 'mongod', 'mongo')
default_client_credentials_scope = "servertig-symbolizer-fullaccess"
default_client_credentials_user_name = "client-user"
default_creds_file_path = os.path.join(os.getcwd(), '.symbolizer_credentials.json')
- def __init__(self, evg_version: str, evg_variant: str, client_id: str, client_secret: str,
- cache_dir: str = None, web_service_base_url: str = None,
+ def __init__(self, evg_version: str, evg_variant: str, is_san_variant: bool, client_id: str,
+ client_secret: str, cache_dir: str = None, web_service_base_url: str = None,
logger: logging.Logger = None):
"""
Initialize instance.
:param evg_version: Evergreen version ID.
:param evg_variant: Evergreen build variant name.
+ :param is_san_variant: Whether build variant is sanitizer build.
:param client_id: Client id for Okta Oauth.
:param client_secret: Secret key for Okta Oauth.
:param cache_dir: Full path to cache directory as a string.
@@ -178,6 +179,7 @@ def __init__(self, evg_version: str, evg_variant: str, client_id: str, client_se
"""
self.evg_version = evg_version
self.evg_variant = evg_variant
+ self.is_san_variant = is_san_variant
self.cache_dir = cache_dir or self.default_cache_dir
self.web_service_base_url = web_service_base_url or self.default_web_service_base_url
@@ -263,11 +265,13 @@ def setup_urls(self):
urlinfo = self.multiversion_setup.get_urls(self.evg_version, self.evg_variant)
- download_symbols_url = urlinfo.urls.get("mongo-debugsymbols.tgz", None)
binaries_url = urlinfo.urls.get("Binaries", "")
-
- if not download_symbols_url:
- download_symbols_url = urlinfo.urls.get("mongo-debugsymbols.zip", None)
+ if self.is_san_variant:
+ # Sanitizer builds are not stripped and contain debug symbols
+ download_symbols_url = binaries_url
+ else:
+ download_symbols_url = urlinfo.urls.get("mongo-debugsymbols.tgz") or urlinfo.urls.get(
+ "mongo-debugsymbols.zip")
if not download_symbols_url:
self.logger.error("Couldn't find URL for debug symbols. Version: %s, URLs dict: %s",
@@ -319,23 +323,17 @@ def generate_build_id_mapping(self) -> Generator[Dict[str, str], None, None]:
extractor = CmdOutputExtractor()
- debug_symbols_path = self.download(self.debug_symbols_url)
- debug_symbols_unpacked_path = self.unpack(debug_symbols_path)
-
binaries_path = self.download(self.url)
binaries_unpacked_path = self.unpack(binaries_path)
- # we need to analyze two directories: main binary folder inside debug-symbols and
+ # we need to analyze two directories: main binary folder and
# shared libraries folder inside binaries.
# main binary folder holds main binaries, like mongos, mongod, mongo ...
# shared libraries folder holds shared libraries, tons of them.
# some build variants do not contain shared libraries.
- debug_symbols_unpacked_path = os.path.join(debug_symbols_unpacked_path, 'dist-test')
binaries_unpacked_path = os.path.join(binaries_unpacked_path, 'dist-test')
- self.logger.info("INSIDE unpacked debug-symbols/dist-test: %s",
- os.listdir(debug_symbols_unpacked_path))
self.logger.info("INSIDE unpacked binaries/dist-test: %s",
os.listdir(binaries_unpacked_path))
@@ -352,19 +350,19 @@ def generate_build_id_mapping(self) -> Generator[Dict[str, str], None, None]:
# start with main binary folder
for binary in self.selected_binaries:
- full_bin_path = os.path.join(debug_symbols_unpacked_path,
+ full_bin_path = os.path.join(binaries_unpacked_path,
self.path_options.main_binary_folder_name, binary)
if not os.path.exists(full_bin_path):
self.logger.error("Could not find binary at %s", full_bin_path)
- return
+ continue
build_id_output = extractor.get_build_id(full_bin_path)
if not build_id_output.build_id:
self.logger.error("Build ID couldn't be extracted. \nReadELF output %s",
build_id_output.cmd_output)
- return
+ continue
else:
self.logger.info("Extracted build ID: %s", build_id_output.build_id)
@@ -397,14 +395,14 @@ def generate_build_id_mapping(self) -> Generator[Dict[str, str], None, None]:
if not os.path.exists(sofile_path):
self.logger.error("Could not find binary at %s", sofile_path)
- return
+ continue
build_id_output = extractor.get_build_id(sofile_path)
if not build_id_output.build_id:
self.logger.error("Build ID couldn't be extracted. \nReadELF out %s",
build_id_output.cmd_output)
- return
+ continue
else:
self.logger.info("Extracted build ID: %s", build_id_output.build_id)
@@ -426,6 +424,7 @@ def run(self):
# mappings is a generator, we iterate over to generate mappings on the go
for mapping in mappings:
+ self.logger.info("Creating mapping %s", mapping)
response = self.http_client.post('/'.join((self.web_service_base_url, 'add')),
json=mapping)
if response.status_code != 200:
@@ -440,11 +439,12 @@ def make_argument_parser(parser=None, **kwargs):
if parser is None:
parser = argparse.ArgumentParser(**kwargs)
- parser.add_argument('--version')
- parser.add_argument('--client-id')
- parser.add_argument('--client-secret')
- parser.add_argument('--variant')
- parser.add_argument('--web-service-base-url', default="")
+ parser.add_argument("--version")
+ parser.add_argument("--client-id")
+ parser.add_argument("--client-secret")
+ parser.add_argument("--variant")
+ parser.add_argument("--is-san-variant", action="store_true")
+ parser.add_argument("--web-service-base-url", default="")
return parser
@@ -452,7 +452,8 @@ def main(options):
"""Execute mapper here. Main entry point."""
mapper = Mapper(evg_version=options.version, evg_variant=options.variant,
- client_id=options.client_id, client_secret=options.client_secret,
+ is_san_variant=options.is_san_variant, client_id=options.client_id,
+ client_secret=options.client_secret,
web_service_base_url=options.web_service_base_url)
# when used as a context manager, mapper instance automatically cleans files/folders after finishing its job.
diff --git a/buildscripts/eslint.py b/buildscripts/eslint.py
index 58019284d9c80..6e74eac8e1b63 100755
--- a/buildscripts/eslint.py
+++ b/buildscripts/eslint.py
@@ -19,6 +19,7 @@
import tarfile
import tempfile
import threading
+import platform
from typing import Optional
import urllib.error
import urllib.parse
@@ -50,14 +51,17 @@
# Name of ESLint as a binary.
ESLINT_PROGNAME = "eslint"
+# Arch of running system
+ARCH = platform.machine() if platform.machine() != "aarch64" else "arm64"
+
# URL location of our provided ESLint binaries.
ESLINT_HTTP_LINUX_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/eslint-" + \
- ESLINT_VERSION + "-linux.tar.gz"
+ ESLINT_VERSION + "-linux-" + ARCH + ".tar.gz"
ESLINT_HTTP_DARWIN_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/eslint-" + \
ESLINT_VERSION + "-darwin.tar.gz"
# Path in the tarball to the ESLint binary.
-ESLINT_SOURCE_TAR_BASE = string.Template(ESLINT_PROGNAME + "-$platform-$arch")
+ESLINT_SOURCE_TAR_BASE = string.Template(ESLINT_PROGNAME + "-$operating_system-$arch")
LOGGER = structlog.get_logger(__name__)
@@ -76,15 +80,15 @@ def extract_eslint(tar_path, target_file):
tarfp.close()
-def get_eslint_from_cache(dest_file, platform, arch):
+def get_eslint_from_cache(dest_file, operating_system, arch):
"""Get ESLint binary from mongodb's cache."""
# Get URL
- if platform == "Linux":
+ if operating_system == "Linux":
url = ESLINT_HTTP_LINUX_CACHE
- elif platform == "Darwin":
+ elif operating_system == "Darwin":
url = ESLINT_HTTP_DARWIN_CACHE
else:
- raise ValueError('ESLint is not available as a binary for ' + platform)
+ raise ValueError('ESLint is not available as a binary for ' + operating_system)
dest_dir = tempfile.gettempdir()
temp_tar_file = os.path.join(dest_dir, "temp.tar.gz")
@@ -94,7 +98,8 @@ def get_eslint_from_cache(dest_file, platform, arch):
urllib.request.urlretrieve(url, temp_tar_file)
print("Extracting ESLint %s to %s" % (ESLINT_VERSION, dest_file))
- eslint_distfile = ESLINT_SOURCE_TAR_BASE.substitute(platform=platform, arch=arch)
+ eslint_distfile = ESLINT_SOURCE_TAR_BASE.substitute(operating_system=operating_system,
+ arch=arch)
extract_eslint(temp_tar_file, eslint_distfile)
shutil.move(eslint_distfile, dest_file)
@@ -108,7 +113,7 @@ def __init__(self, path, cache_dir):
# Initialize ESLint configuration information
if sys.platform.startswith("linux"):
- self.arch = "x86_64"
+ self.arch = ARCH
self.tar_path = None
elif sys.platform == "darwin":
self.arch = "x86_64"
diff --git a/buildscripts/eslint/README.md b/buildscripts/eslint/README.md
index ea0d7fc3ae7f3..bba2f46862d9b 100644
--- a/buildscripts/eslint/README.md
+++ b/buildscripts/eslint/README.md
@@ -21,10 +21,12 @@
"pkg": {
"scripts": [ "conf/**/*", "lib/**/*", "messages/**/*" ],
"targets": [ "linux-x64", "macos-x64" ]
+ # "targets": [ "linux-arm" ]
},
```
6. Run pkg command to make ESLint executables.
```
+ npm install
pkg .
```
7. Check that executables are working.
@@ -38,6 +40,10 @@
```
eslint-macos --help
```
+ or (if you are on arm)
+ ```
+ eslint --help
+ ```
(*) If executable fails to find some .js files there are [extra steps](#extra-steps)
required to be done before step 6.
@@ -48,19 +54,25 @@ Rename produced files.
```
mv eslint-linux eslint-Linux-x86_64
mv eslint-macos eslint-Darwin-x86_64
+# arm
+# mv eslint eslint-Linux-arm64
```
-Archive files.
+Archive files. (No leading v in version e.g. 8.28.0 NOT v8.28.0)
```
-tar -czvf eslint-${version}-linux.tar.gz eslint-Linux-x86_64
+tar -czvf eslint-${version}-linux-x86_64.tar.gz eslint-Linux-x86_64
tar -czvf eslint-${version}-darwin.tar.gz eslint-Darwin-x86_64
+# arm
+# tar -czvf eslint-${version}-linux-arm64.tar.gz eslint-Linux-arm64
```
### Upload archives to `boxes.10gen.com`
Archives should be available by the following links:
```
-https://s3.amazonaws.com/boxes.10gen.com/build/eslint-${version}-linux.tar.gz
+https://s3.amazonaws.com/boxes.10gen.com/build/eslint-${version}-linux-x86_64.tar.gz
https://s3.amazonaws.com/boxes.10gen.com/build/eslint-${version}-darwin.tar.gz
+# arm
+# https://s3.amazonaws.com/boxes.10gen.com/build/eslint-${version}-linux-arm64.tar.gz
```
Build team has an access to do that.
You can create a build ticket in Jira for them to do it
diff --git a/buildscripts/evergreen_activate_gen_tasks.py b/buildscripts/evergreen_activate_gen_tasks.py
index 5c20b285e6b1d..b757a146d8571 100755
--- a/buildscripts/evergreen_activate_gen_tasks.py
+++ b/buildscripts/evergreen_activate_gen_tasks.py
@@ -26,6 +26,7 @@
EVG_CONFIG_FILE = "./.evergreen.yml"
BURN_IN_TAGS = "burn_in_tags"
BURN_IN_TESTS = "burn_in_tests"
+BURN_IN_VARIANT_SUFFIX = "generated-by-burn-in-tags"
class EvgExpansions(BaseModel):
@@ -35,13 +36,11 @@ class EvgExpansions(BaseModel):
build_id: ID of build being run.
version_id: ID of version being run.
task_name: Name of task creating the generated configuration.
- burn_in_tag_buildvariants: Buildvariants to run burn_in_tags on.
"""
build_id: str
version_id: str
task_name: str
- burn_in_tag_buildvariants: Optional[str] = None
@classmethod
def from_yaml_file(cls, path: str) -> "EvgExpansions":
@@ -53,13 +52,6 @@ def task(self) -> str:
"""Get the task being generated."""
return remove_gen_suffix(self.task_name)
- @property
- def burn_in_tag_buildvariants_list(self) -> List[str]:
- """Get the list of burn_in_tags buildvariants."""
- if self.burn_in_tag_buildvariants is None:
- return []
- return self.burn_in_tag_buildvariants.split()
-
def activate_task(expansions: EvgExpansions, evg_api: EvergreenApi) -> None:
"""
@@ -70,16 +62,12 @@ def activate_task(expansions: EvgExpansions, evg_api: EvergreenApi) -> None:
"""
if expansions.task == BURN_IN_TAGS:
version = evg_api.version_by_id(expansions.version_id)
- for base_build_variant in expansions.burn_in_tag_buildvariants_list:
- build_variant = f"{base_build_variant}-required"
- try:
- build_id = version.build_variants_map[build_variant]
- except KeyError:
- LOGGER.warning(
- "It is likely nothing to burn_in, so burn_in_tags build variant"
- " was not generated. Skipping...", build_variant=build_variant)
- continue
-
+ burn_in_build_variants = [
+ variant for variant in version.build_variants_map.keys()
+ if variant.endswith(BURN_IN_VARIANT_SUFFIX)
+ ]
+ for build_variant in burn_in_build_variants:
+ build_id = version.build_variants_map[build_variant]
task_list = evg_api.tasks_by_build(build_id)
for task in task_list:
diff --git a/buildscripts/evergreen_gen_build_metrics_tasks.py b/buildscripts/evergreen_gen_build_metrics_tasks.py
index 5cba9c832ac1c..0358b64703046 100755
--- a/buildscripts/evergreen_gen_build_metrics_tasks.py
+++ b/buildscripts/evergreen_gen_build_metrics_tasks.py
@@ -134,7 +134,6 @@ def create_task_group(target_platform, tasks):
FunctionCall("cleanup environment"),
FunctionCall("set up venv"),
FunctionCall("upload pip requirements"),
- FunctionCall("get all modified patch files"),
FunctionCall("f_expansions_write"),
FunctionCall("configure evergreen api credentials"),
FunctionCall("get buildnumber"),
@@ -197,7 +196,7 @@ def create_task_group(target_platform, tasks):
for link_model, tasks in tasks['linux_arm64_tasks'].items():
variant.add_task_group(
create_task_group(f'linux_arm64_{link_model}', tasks),
- ['amazon2022-arm64-large'])
+ ['amazon2023-arm64-large'])
project = ShrubProject({variant})
with open('build_metrics_task_gen.json', 'w') as fout:
diff --git a/buildscripts/evergreen_task_timeout.py b/buildscripts/evergreen_task_timeout.py
index 6c51cf79b7b1b..60ce3f8f7fc05 100755
--- a/buildscripts/evergreen_task_timeout.py
+++ b/buildscripts/evergreen_task_timeout.py
@@ -144,16 +144,6 @@ def lookup_idle_override(self, build_variant: str, task_name: str) -> Optional[t
return None
-def _is_required_build_variant(build_variant: str) -> bool:
- """
- Determine if the given build variants is a required build variant.
-
- :param build_variant: Name of build variant to check.
- :return: True if the given build variant is required.
- """
- return build_variant.endswith("-required")
-
-
def output_timeout(exec_timeout: timedelta, idle_timeout: Optional[timedelta],
output_file: Optional[str]) -> None:
"""
@@ -163,9 +153,6 @@ def output_timeout(exec_timeout: timedelta, idle_timeout: Optional[timedelta],
:param idle_timeout: Idle timeout to output.
:param output_file: Location of output file to write.
"""
- # the math library is triggering this error in this function for some
- # reason
- # pylint: disable=c-extension-no-member
output = {
"exec_timeout_secs": math.ceil(exec_timeout.total_seconds()),
}
@@ -226,7 +213,7 @@ def determine_exec_timeout(self, task_name: str, variant: str,
LOGGER.info("Overriding configured timeout", exec_timeout_secs=override.total_seconds())
determined_timeout = override
- elif _is_required_build_variant(
+ elif self._is_required_build_variant(
variant) and determined_timeout > DEFAULT_REQUIRED_BUILD_TIMEOUT:
LOGGER.info("Overriding required-builder timeout",
exec_timeout_secs=DEFAULT_REQUIRED_BUILD_TIMEOUT.total_seconds())
@@ -314,6 +301,17 @@ def is_build_variant_asan(self, build_variant: str) -> bool:
bv = self.evg_project_config.get_variant(build_variant)
return bv.is_asan_build()
+ def _is_required_build_variant(self, build_variant: str) -> bool:
+ """
+ Determine if the given build variants is a required build variant.
+
+ :param build_variant: Name of build variant to check.
+ :param evergreen_project_config: Evergreen config to query the variant name.
+ :return: True if the given build variant is required.
+ """
+ bv = self.evg_project_config.get_variant(build_variant)
+ return "!" in bv.display_name
+
def determine_timeouts(self, cli_idle_timeout: Optional[timedelta],
cli_exec_timeout: Optional[timedelta], outfile: Optional[str],
project: str, task: str, variant: str, evg_alias: str, suite_name: str,
@@ -357,6 +355,8 @@ def main():
help="Evergreen project task is being executed on.")
parser.add_argument("--evg-alias", dest="evg_alias", required=True,
help="Evergreen alias used to trigger build.")
+ parser.add_argument("--test-flags", dest="test_flags",
+ help="Test flags that are used for `resmoke.py run` command call.")
parser.add_argument("--timeout", dest="timeout", type=int, help="Timeout to use (in sec).")
parser.add_argument("--exec-timeout", dest="exec_timeout", type=int,
help="Exec timeout to use (in sec).")
@@ -393,7 +393,9 @@ def dependencies(binder: inject.Binder) -> None:
parse_evergreen_file(os.path.expanduser(options.evg_project_config)))
binder.bind(
ResmokeProxyService,
- ResmokeProxyService(run_options=f"--installDir={shlex.quote(options.install_dir)}"))
+ ResmokeProxyService(
+ run_options=f"--installDir={shlex.quote(options.install_dir)} {options.test_flags}")
+ )
inject.configure(dependencies)
diff --git a/buildscripts/gdb/mongo.py b/buildscripts/gdb/mongo.py
index 317c52acf5cac..fe1a5d443d06a 100644
--- a/buildscripts/gdb/mongo.py
+++ b/buildscripts/gdb/mongo.py
@@ -14,7 +14,7 @@
if not gdb:
sys.path.insert(0, str(Path(os.path.abspath(__file__)).parent.parent.parent))
- from buildscripts.gdb.mongo_printers import absl_get_nodes, get_unique_ptr
+ from buildscripts.gdb.mongo_printers import absl_get_nodes, get_unique_ptr, get_unique_ptr_bytes
def detect_toolchain(progspace):
@@ -159,6 +159,36 @@ def get_thread_id():
raise ValueError("Failed to find thread id in {}".format(thread_info))
+MAIN_GLOBAL_BLOCK = None
+
+
+def lookup_type(gdb_type_str: str) -> gdb.Type:
+ """
+ Try to find the type object from string.
+
+ GDB says it searches the global blocks, however this appear not to be the
+ case or at least it doesn't search all global blocks, sometimes it required
+ to get the global block based off the current frame.
+ """
+ global MAIN_GLOBAL_BLOCK # pylint: disable=global-statement
+
+ exceptions = []
+ try:
+ return gdb.lookup_type(gdb_type_str)
+ except Exception as exc:
+ exceptions.append(exc)
+
+ if MAIN_GLOBAL_BLOCK is None:
+ MAIN_GLOBAL_BLOCK = gdb.lookup_symbol("main")[0].symtab.global_block()
+
+ try:
+ return gdb.lookup_type(gdb_type_str, MAIN_GLOBAL_BLOCK)
+ except Exception as exc:
+ exceptions.append(exc)
+
+ raise gdb.error("Failed to get type, tried:\n%s" % '\n'.join([str(exc) for exc in exceptions]))
+
+
def get_current_thread_name():
"""Return the name of the current GDB thread."""
fallback_name = '"%s"' % (gdb.selected_thread().name or '')
@@ -217,7 +247,7 @@ def get_wt_session(recovery_unit, recovery_unit_impl_type):
if not wt_session_handle.dereference().address:
return None
wt_session = wt_session_handle.dereference().cast(
- gdb.lookup_type("mongo::WiredTigerSession"))["_session"]
+ lookup_type("mongo::WiredTigerSession"))["_session"]
return wt_session
@@ -230,13 +260,13 @@ def get_decorations(obj):
TODO: De-duplicate the logic between here and DecorablePrinter. This code was copied from there.
"""
type_name = str(obj.type).replace("class", "").replace(" ", "")
- decorable = obj.cast(gdb.lookup_type("mongo::Decorable<{}>".format(type_name)))
+ decorable = obj.cast(lookup_type("mongo::Decorable<{}>".format(type_name)))
decl_vector = decorable["_decorations"]["_registry"]["_decorationInfo"]
start = decl_vector["_M_impl"]["_M_start"]
finish = decl_vector["_M_impl"]["_M_finish"]
decorable_t = decorable.type.template_argument(0)
- decinfo_t = gdb.lookup_type('mongo::DecorationRegistry<{}>::DecorationInfo'.format(
+ decinfo_t = lookup_type('mongo::DecorationRegistry<{}>::DecorationInfo'.format(
str(decorable_t).replace("class", "").strip()))
count = int((int(finish) - int(start)) / decinfo_t.sizeof)
@@ -249,13 +279,13 @@ def get_decorations(obj):
type_name = type_name[0:type_name.rindex(">")]
type_name = type_name[type_name.index("constructAt<"):].replace("constructAt<", "")
# get_unique_ptr should be loaded from 'mongo_printers.py'.
- decoration_data = get_unique_ptr(decorable["_decorations"]["_decorationData"])
+ decoration_data = get_unique_ptr_bytes(decorable["_decorations"]["_decorationData"])
if type_name.endswith('*'):
type_name = type_name[0:len(type_name) - 1]
type_name = type_name.rstrip()
try:
- type_t = gdb.lookup_type(type_name)
+ type_t = lookup_type(type_name)
obj = decoration_data[dindex].cast(type_t)
yield (type_name, obj)
except Exception as err:
@@ -341,14 +371,14 @@ class GetMongoDecoration(gdb.Command):
"""
Search for a decoration on an object by typename and print it e.g.
- (gdb) mongo-decoration opCtx ReadConcernArgs
+ (gdb) mongodb-decoration opCtx ReadConcernArgs
would print out a decoration on opCtx whose type name contains the string "ReadConcernArgs".
"""
def __init__(self):
"""Initialize GetMongoDecoration."""
- RegisterMongoCommand.register(self, "mongo-decoration", gdb.COMMAND_DATA)
+ RegisterMongoCommand.register(self, "mongodb-decoration", gdb.COMMAND_DATA)
def invoke(self, args, _from_tty):
"""Invoke GetMongoDecoration."""
@@ -501,7 +531,7 @@ def dump_session(session):
val = get_boost_optional(txn_part_observable_state['txnResourceStash'])
if val:
locker_addr = get_unique_ptr(val["_locker"])
- locker_obj = locker_addr.dereference().cast(gdb.lookup_type("mongo::LockerImpl"))
+ locker_obj = locker_addr.dereference().cast(lookup_type("mongo::LockerImpl"))
print('txnResourceStash._locker', "@", locker_addr)
print("txnResourceStash._locker._id", "=", locker_obj["_id"])
else:
@@ -584,8 +614,6 @@ def dump_mongod_locks():
try:
# Call into mongod, and dump the state of lock manager
# Note that output will go to mongod's standard output, not the debugger output window
- # Do not call mongo::getGlobalLockManager() due to the compiler optimizing this function in a very weird way
- # See SERVER-72816 for more context
gdb.execute(
"call mongo::LockManager::get((mongo::ServiceContext*) mongo::getGlobalServiceContext())->dump()",
from_tty=False, to_string=False)
@@ -647,7 +675,7 @@ def dump_recovery_units(recovery_unit_impl_type):
recovery_unit_handle = get_unique_ptr(operation_context["_recoveryUnit"])
# By default, cast the recovery unit as "mongo::WiredTigerRecoveryUnit"
recovery_unit = recovery_unit_handle.dereference().cast(
- gdb.lookup_type(recovery_unit_impl_type))
+ lookup_type(recovery_unit_impl_type))
output_doc["recoveryUnit"] = hex(recovery_unit_handle) if recovery_unit else "0x0"
wt_session = get_wt_session(recovery_unit, recovery_unit_impl_type)
@@ -692,7 +720,7 @@ def dump_session(session, recovery_unit_impl_type):
recovery_unit_handle = get_unique_ptr(txn_resource_stash["_recoveryUnit"])
# By default, cast the recovery unit as "mongo::WiredTigerRecoveryUnit"
recovery_unit = recovery_unit_handle.dereference().cast(
- gdb.lookup_type(recovery_unit_impl_type))
+ lookup_type(recovery_unit_impl_type))
output_doc["recoveryUnit"] = hex(recovery_unit_handle) if recovery_unit else "0x0"
wt_session = get_wt_session(recovery_unit, recovery_unit_impl_type)
diff --git a/buildscripts/gdb/mongo_lock.py b/buildscripts/gdb/mongo_lock.py
index d05acaf67796f..38dcaa8b0f936 100644
--- a/buildscripts/gdb/mongo_lock.py
+++ b/buildscripts/gdb/mongo_lock.py
@@ -9,7 +9,7 @@
if not gdb:
sys.path.insert(0, str(Path(os.path.abspath(__file__)).parent.parent.parent))
- from buildscripts.gdb.mongo import get_current_thread_name, get_thread_id, RegisterMongoCommand
+ from buildscripts.gdb.mongo import get_current_thread_name, get_thread_id, lookup_type, RegisterMongoCommand
if sys.version_info[0] < 3:
raise gdb.GdbError(
@@ -323,10 +323,8 @@ def find_lock_manager_holders(graph, thread_dict, show):
(_, lock_waiter_lwpid, _) = gdb.selected_thread().ptid
lock_waiter = thread_dict[lock_waiter_lwpid]
- locker_ptr_type = gdb.lookup_type("mongo::LockerImpl").pointer()
+ locker_ptr_type = lookup_type("mongo::LockerImpl").pointer()
- # Do not call mongo::getGlobalLockManager() due to the compiler optimizing this function in a very weird way
- # See SERVER-72816 for more context
lock_head = gdb.parse_and_eval(
"mongo::LockManager::get((mongo::ServiceContext*) mongo::getGlobalServiceContext())->_getBucket(resId)->findOrInsert(resId)"
)
diff --git a/buildscripts/gdb/mongo_printers.py b/buildscripts/gdb/mongo_printers.py
index a38c677d9e0cd..0fdb262b15b3a 100644
--- a/buildscripts/gdb/mongo_printers.py
+++ b/buildscripts/gdb/mongo_printers.py
@@ -9,9 +9,13 @@
import gdb
import gdb.printing
+ROOT_PATH = str(Path(os.path.abspath(__file__)).parent.parent.parent)
+if ROOT_PATH not in sys.path:
+ sys.path.insert(0, ROOT_PATH)
+from src.third_party.immer.dist.tools.gdb_pretty_printers.printers import ListIter as ImmerListIter # pylint: disable=wrong-import-position
+
if not gdb:
- sys.path.insert(0, str(Path(os.path.abspath(__file__)).parent.parent.parent))
- from buildscripts.gdb.mongo import get_boost_optional
+ from buildscripts.gdb.mongo import get_boost_optional, lookup_type
from buildscripts.gdb.optimizer_printers import register_abt_printers
try:
@@ -29,9 +33,20 @@
"MongoDB gdb extensions only support Python 3. Your GDB was compiled against Python 2")
+def get_unique_ptr_bytes(obj):
+ """Read the value of a libstdc++ std::unique_ptr.
+
+ Returns a gdb.Value where its type resolves to `unsigned char*`. The caller must take care to
+ cast the returned value themselves. This function is particularly useful in the context of
+ mongo::Decorable<> types which store the decorations as a slab of memory with
+ std::unique_ptr. In all other cases get_unique_ptr() can be preferred.
+ """
+ return obj.cast(gdb.lookup_type('std::_Head_base<0, unsigned char*, false>'))['_M_head_impl']
+
+
def get_unique_ptr(obj):
"""Read the value of a libstdc++ std::unique_ptr."""
- return obj.cast(gdb.lookup_type('std::_Head_base<0, unsigned char*, false>'))['_M_head_impl']
+ return get_unique_ptr_bytes(obj).cast(obj.type.template_argument(0).pointer())
###################################################################################################
@@ -127,7 +142,7 @@ class BSONObjPrinter(object):
def __init__(self, val):
"""Initialize BSONObjPrinter."""
self.val = val
- self.ptr = self.val['_objdata'].cast(gdb.lookup_type('void').pointer())
+ self.ptr = self.val['_objdata'].cast(lookup_type('void').pointer())
self.is_valid = False
# Handle the endianness of the BSON object size, which is represented as a 32-bit integer
@@ -200,11 +215,6 @@ def __init__(self, val):
"""Initialize OplogEntryPrinter."""
self.val = val
- @staticmethod
- def display_hint():
- """Display hint."""
- return 'string'
-
def to_string(self):
"""Return OplogEntry for printing."""
optime = self.val['_entry']['_opTimeBase']
@@ -212,7 +222,7 @@ def to_string(self):
return "OplogEntry(%s, %s, %s, %s)" % (
str(self.val['_entry']['_durableReplOperation']['_opType']).split('::')[-1],
str(self.val['_entry']['_commandType']).split('::')[-1],
- self.val['_entry']['_durableReplOperation']['_nss']['_ns'], optime_str)
+ self.val['_entry']['_durableReplOperation']['_nss'], optime_str)
class UUIDPrinter(object):
@@ -248,7 +258,7 @@ def display_hint():
def to_string(self):
"""Return OID for printing."""
- raw_bytes = [int(self.val['_data'][i]) for i in range(12)]
+ raw_bytes = [int(self.val['_data'][i]) for i in range(OBJECT_ID_WIDTH)]
oid_hex_bytes = [hex(b & 0xFF)[2:].zfill(2) for b in raw_bytes]
return "ObjectID('%s')" % "".join(oid_hex_bytes)
@@ -286,7 +296,7 @@ def to_string(self):
holder = holder_ptr.dereference()
str_len = int(holder["_capacity"])
# Start of data is immediately after pointer for holder
- start_ptr = (holder_ptr + 1).dereference().cast(gdb.lookup_type("char")).address
+ start_ptr = (holder_ptr + 1).dereference().cast(lookup_type("char")).address
raw_bytes = [int(start_ptr[i]) for i in range(0, str_len)]
hex_bytes = [hex(b & 0xFF)[2:].zfill(2) for b in raw_bytes]
return "RecordId big string %d hex bytes @ %s: %s" % (str_len, holder_ptr + 1,
@@ -295,6 +305,55 @@ def to_string(self):
return "unknown RecordId format: %d" % rid_format
+TENANT_ID_MASK = 0x80
+OBJECT_ID_WIDTH = 12
+
+
+def extract_tenant_id(data):
+ raw_bytes = [int(data[i]) for i in range(1, OBJECT_ID_WIDTH + 1)]
+ return "".join([hex(b & 0xFF)[2:].zfill(2) for b in raw_bytes])
+
+
+class DatabaseNamePrinter(object):
+ """Pretty-printer for mongo::DatabaseName."""
+
+ def __init__(self, val):
+ """Initialize DatabaseNamePrinter."""
+ self.val = val
+
+ @staticmethod
+ def display_hint():
+ """Display hint."""
+ return 'string'
+
+ def to_string(self):
+ """Return string representation of DatabaseName."""
+ data = self.val['_data']['_M_dataplus']['_M_p']
+ if data[0] & TENANT_ID_MASK:
+ return f"{extract_tenant_id(data)}_{(data + OBJECT_ID_WIDTH + 1).string()}"
+ return (data + 1).string()
+
+
+class NamespaceStringPrinter(object):
+ """Pretty-printer for mongo::NamespaceString."""
+
+ def __init__(self, val):
+ """Initialize NamespaceStringPrinter."""
+ self.val = val
+
+ @staticmethod
+ def display_hint():
+ """Display hint."""
+ return 'string'
+
+ def to_string(self):
+ """Return string representation of NamespaceString."""
+ data = self.val['_data']['_M_dataplus']['_M_p']
+ if data[0] & TENANT_ID_MASK:
+ return f"{extract_tenant_id(data)}_{(data + OBJECT_ID_WIDTH + 1).string()}"
+ return (data + 1).string()
+
+
class DecorablePrinter(object):
"""Pretty-printer for mongo::Decorable<>."""
@@ -307,7 +366,7 @@ def __init__(self, val):
self.start = decl_vector["_M_impl"]["_M_start"]
finish = decl_vector["_M_impl"]["_M_finish"]
decorable_t = val.type.template_argument(0)
- decinfo_t = gdb.lookup_type('mongo::DecorationRegistry<{}>::DecorationInfo'.format(
+ decinfo_t = lookup_type('mongo::DecorationRegistry<{}>::DecorationInfo'.format(
str(decorable_t).replace("class", "").strip()))
self.count = int((int(finish) - int(self.start)) / decinfo_t.sizeof)
@@ -322,7 +381,7 @@ def to_string(self):
def children(self):
"""Children."""
- decoration_data = get_unique_ptr(self.val["_decorations"]["_decorationData"])
+ decoration_data = get_unique_ptr_bytes(self.val["_decorations"]["_decorationData"])
for index in range(self.count):
descriptor = self.start[index]
@@ -342,7 +401,7 @@ def children(self):
type_name = type_name.rstrip()
# Cast the raw char[] into the actual object that is stored there.
- type_t = gdb.lookup_type(type_name)
+ type_t = lookup_type(type_name)
obj = decoration_data[dindex].cast(type_t)
yield ('key', "%d:%s:%s" % (index, obj.address, type_name))
@@ -589,6 +648,61 @@ def children(self):
yield ('value', kvp['value'])
+class ImmutableMapIter(ImmerListIter):
+ def __init__(self, val):
+ super().__init__(val)
+ self.max = (1 << 64) - 1
+ self.pair = None
+ self.curr = (None, self.max, self.max)
+
+ def __next__(self):
+ if self.pair:
+ result = ('value', self.pair['second'])
+ self.pair = None
+ self.i += 1
+ return result
+ if self.i == self.size:
+ raise StopIteration
+ if self.i < self.curr[1] or self.i >= self.curr[2]:
+ self.curr = self.region()
+ self.pair = self.curr[0][self.i - self.curr[1]].cast(
+ gdb.lookup_type(self.v.type.template_argument(0).name))
+ result = ('key', self.pair['first'])
+ return result
+
+
+class ImmutableMapPrinter:
+ """Pretty-printer for mongo::immutable::map<>."""
+
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ return '%s of size %d' % (self.val.type, int(self.val['_storage']['impl_']['size']))
+
+ def children(self):
+ return ImmutableMapIter(self.val['_storage'])
+
+ def display_hint(self):
+ return 'map'
+
+
+class ImmutableSetPrinter:
+ """Pretty-printer for mongo::immutable::set<>."""
+
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ return '%s of size %d' % (self.val.type, int(self.val['_storage']['impl_']['size']))
+
+ def children(self):
+ return ImmerListIter(self.val['_storage'])
+
+ def display_hint(self):
+ return 'array'
+
+
def find_match_brackets(search, opening='<', closing='>'):
"""Return the index of the closing bracket that matches the first opening bracket.
@@ -721,7 +835,7 @@ def make_inverse_enum_dict(enum_type_name):
For example, if the enum type is 'mongo::sbe::vm::Builtin' with an element 'regexMatch', the
dictionary will contain 'regexMatch' value and not 'mongo::sbe::vm::Builtin::regexMatch'.
"""
- enum_dict = gdb.types.make_enum_dict(gdb.lookup_type(enum_type_name))
+ enum_dict = gdb.types.make_enum_dict(lookup_type(enum_type_name))
enum_inverse_dic = dict()
for key, value in enum_dict.items():
enum_inverse_dic[int(value)] = key.split('::')[-1] # take last element
@@ -768,11 +882,11 @@ def __init__(self, val):
# either use an inline buffer or an allocated one. The choice of storage is decoded in the
# last bit of the 'metadata_' field.
storage = self.val['_instrs']['storage_']
- meta = storage['metadata_'].cast(gdb.lookup_type('size_t'))
+ meta = storage['metadata_'].cast(lookup_type('size_t'))
self.is_inlined = (meta % 2 == 0)
self.size = (meta >> 1)
self.pdata = \
- storage['data_']['inlined']['inlined_data'].cast(gdb.lookup_type('uint8_t').pointer()) \
+ storage['data_']['inlined']['inlined_data'].cast(lookup_type('uint8_t').pointer()) \
if self.is_inlined \
else storage['data_']['allocated']['allocated_data']
@@ -796,17 +910,17 @@ def children(self):
yield 'instrs total size', self.size
# Sizes for types we'll use when parsing the insructions stream.
- int_size = gdb.lookup_type('int').sizeof
- ptr_size = gdb.lookup_type('void').pointer().sizeof
- tag_size = gdb.lookup_type('mongo::sbe::value::TypeTags').sizeof
- value_size = gdb.lookup_type('mongo::sbe::value::Value').sizeof
- uint8_size = gdb.lookup_type('uint8_t').sizeof
- uint32_size = gdb.lookup_type('uint32_t').sizeof
- uint64_size = gdb.lookup_type('uint64_t').sizeof
- builtin_size = gdb.lookup_type('mongo::sbe::vm::Builtin').sizeof
- time_unit_size = gdb.lookup_type('mongo::TimeUnit').sizeof
- timezone_size = gdb.lookup_type('mongo::TimeZone').sizeof
- day_of_week_size = gdb.lookup_type('mongo::DayOfWeek').sizeof
+ int_size = lookup_type('int').sizeof
+ ptr_size = lookup_type('void').pointer().sizeof
+ tag_size = lookup_type('mongo::sbe::value::TypeTags').sizeof
+ value_size = lookup_type('mongo::sbe::value::Value').sizeof
+ uint8_size = lookup_type('uint8_t').sizeof
+ uint32_size = lookup_type('uint32_t').sizeof
+ uint64_size = lookup_type('uint64_t').sizeof
+ builtin_size = lookup_type('mongo::sbe::vm::Builtin').sizeof
+ time_unit_size = lookup_type('mongo::TimeUnit').sizeof
+ timezone_size = lookup_type('mongo::TimeZone').sizeof
+ day_of_week_size = lookup_type('mongo::DayOfWeek').sizeof
cur_op = self.pdata
end_op = self.pdata + self.size
@@ -851,9 +965,9 @@ def children(self):
cur_op += uint32_size
elif op_name in ['function', 'functionSmall']:
arity_size = \
- gdb.lookup_type('mongo::sbe::vm::ArityType').sizeof \
+ lookup_type('mongo::sbe::vm::ArityType').sizeof \
if op_name == 'function' \
- else gdb.lookup_type('mongo::sbe::vm::SmallArityType').sizeof
+ else lookup_type('mongo::sbe::vm::SmallArityType').sizeof
builtin_id = read_as_integer(cur_op, builtin_size)
args = 'builtin: ' + self.builtins_lookup.get(builtin_id, "unknown")
args += ' arity: ' + str(read_as_integer(cur_op + builtin_size, arity_size))
@@ -896,6 +1010,8 @@ def build_pretty_printer():
"""Build a pretty printer."""
pp = MongoPrettyPrinterCollection()
pp.add('BSONObj', 'mongo::BSONObj', False, BSONObjPrinter)
+ pp.add('DatabaseName', 'mongo::DatabaseName', False, DatabaseNamePrinter)
+ pp.add('NamespaceString', 'mongo::NamespaceString', False, NamespaceStringPrinter)
pp.add('Decorable', 'mongo::Decorable', True, DecorablePrinter)
pp.add('Status', 'mongo::Status', False, StatusPrinter)
pp.add('StatusWith', 'mongo::StatusWith', True, StatusWithPrinter)
@@ -914,6 +1030,8 @@ def build_pretty_printer():
pp.add('__wt_update', '__wt_update', False, WtUpdateToBsonPrinter)
pp.add('CodeFragment', 'mongo::sbe::vm::CodeFragment', False, SbeCodeFragmentPrinter)
pp.add('boost::optional', 'boost::optional', True, BoostOptionalPrinter)
+ pp.add('immutable::map', 'mongo::immutable::map', True, ImmutableMapPrinter)
+ pp.add('immutable::set', 'mongo::immutable::set', True, ImmutableSetPrinter)
# Optimizer/ABT related pretty printers that can be used only with a running process.
register_abt_printers(pp)
diff --git a/buildscripts/gdb/optimizer_printers.py b/buildscripts/gdb/optimizer_printers.py
index 8c6c399e6945b..926e190df1b61 100644
--- a/buildscripts/gdb/optimizer_printers.py
+++ b/buildscripts/gdb/optimizer_printers.py
@@ -8,7 +8,7 @@
if not gdb:
sys.path.insert(0, str(Path(os.path.abspath(__file__)).parent.parent.parent))
- from buildscripts.gdb.mongo import get_boost_optional
+ from buildscripts.gdb.mongo import get_boost_optional, lookup_type
def eval_print_fn(val, print_fn):
@@ -51,6 +51,14 @@ def __init__(self, val):
super().__init__(val, "ExplainGenerator::explainInterval")
+class CandidateIndexEntryPrinter(OptimizerTypePrinter):
+ """Pretty-printer for mongo::optimizer::CandidateIndexEntry."""
+
+ def __init__(self, val):
+ """Initialize CandidateIndexEntryPrinter."""
+ super().__init__(val, "ExplainGenerator::explainCandidateIndex")
+
+
class IntervalExprPrinter(OptimizerTypePrinter):
"""Pretty-printer for mongo::optimizer::IntervalRequirement::Node."""
@@ -99,6 +107,7 @@ def __init__(self, val, arity, name):
self.val = val
self.arity = arity
self.name = name
+ self.custom_children = []
@staticmethod
def display_hint():
@@ -109,7 +118,16 @@ def children(self):
"""children."""
prior_indent = ABTPrinter.indent_level
- current_indent = ABTPrinter.indent_level + self.arity - 1
+ current_indent = ABTPrinter.indent_level + self.arity + len(self.custom_children) - 1
+ for child in self.custom_children:
+ lhs = "\n"
+ for _ in range(current_indent):
+ lhs += "| "
+
+ ABTPrinter.indent_level = current_indent
+ yield lhs, child
+ current_indent -= 1
+
for i in range(self.arity):
lhs = "\n"
for _ in range(current_indent):
@@ -121,6 +139,10 @@ def children(self):
current_indent -= 1
ABTPrinter.indent_level = prior_indent
+ # Adds a custom child node which is not directly contained in the "_nodes" member variable.
+ def add_child(self, child):
+ self.custom_children.append(child)
+
def to_string(self):
# Default for nodes which just print their type.
return self.name
@@ -262,9 +284,18 @@ def display_hint():
"""Display hint."""
return None
+ @staticmethod
+ def print_sbe_value(tag, value):
+ value_print_fn = "sbe::value::print"
+ (print_fn_symbol, _) = gdb.lookup_symbol(value_print_fn)
+ if print_fn_symbol is None:
+ raise gdb.GdbError("Could not find pretty print function: " + value_print_fn)
+ print_fn = print_fn_symbol.value()
+ return print_fn(tag, value)
+
def to_string(self):
- return "Constant[tag={},val={}]".format(
- str(self.val["_tag"]).split("::")[-1], self.val["_val"])
+ return "Constant[{}]".format(
+ ConstantPrinter.print_sbe_value(self.val["_tag"], self.val["_val"]))
class VariablePrinter(object):
@@ -550,9 +581,9 @@ def to_string(self):
if get_boost_optional(root_proj) is not None:
res += ": " + str(root_proj) + ", "
# Rely on default printer for std::set, but remove the extra metadata at the start.
- # TODO SERVER-75541 pretty print field projections map.
- # field_projections = self.val["_fieldProjections"]
- # res += str(field_projections).split("elems =")[-1]
+ field_projections = self.val["_fieldProjections"]
+ res += "" if field_projections["size_"] == 0 else str(field_projections).split(
+ "elems =")[-1]
res += "}"
return res
@@ -599,7 +630,7 @@ def __init__(self, val):
def to_string(self):
return "IndexScan[{{{}}}, scanDef={}, indexDef={}, interval={}]".format(
self.val["_fieldProjectionMap"], self.val["_scanDefName"], self.val["_indexDefName"],
- self.val["_indexInterval"])
+ self.val["_indexInterval"]).replace("\n", "")
class SeekNodePrinter(FixedArityNodePrinter):
@@ -610,7 +641,7 @@ def __init__(self, val):
super().__init__(val, 2, "Seek")
def to_string(self):
- return "Seek[rid_projection: {}, {}, scanDef: {}]".format(self.val["_rid_projectionName"],
+ return "Seek[rid_projection: {}, {}, scanDef: {}]".format(self.val["_ridProjectionName"],
self.val["_fieldProjectionMap"],
self.val["_scanDefName"])
@@ -638,12 +669,70 @@ def to_string(self):
self.val["_nodeId"]["_index"])
+class ResidualRequirementPrinter(object):
+ """Pretty-printer for ResidualRequirement."""
+
+ def __init__(self, val):
+ """Initialize ResidualRequirementPrinter."""
+ self.val = val
+
+ def to_string(self):
+ key = self.val["_key"]
+ req = self.val["_req"]
+ res = "<"
+ if get_boost_optional(key["_projectionName"]) is not None:
+ res += "refProj: " + str(get_boost_optional(key["_projectionName"])) + ", "
+
+ res += "path: '" + str(key["_path"]).replace("| ", "").replace("\n", " -> ") + "'"
+
+ if get_boost_optional(req["_boundProjectionName"]) is not None:
+ res += "boundProj: " + str(get_boost_optional(req["_boundProjectionName"])) + ", "
+
+ res += ">"
+ return res
+
+
class SargableNodePrinter(FixedArityNodePrinter):
"""Pretty-printer for SargableNode."""
def __init__(self, val):
"""Initialize SargableNodePrinter."""
- super().__init__(val, 3, "Sargable")
+ # Although Sargable technically has 3 children, avoid printing the refs (child1) and bind block (child2).
+ super().__init__(val, 1, "Sargable")
+
+ # Add children for requirements, candidateIndex, and scan_params.
+ self.add_child(str(self.val["_reqMap"]).replace("\n", ""))
+ self.add_child(self.print_candidate_indexes())
+
+ self.scan_params = get_boost_optional(self.val["_scanParams"])
+ if self.scan_params is not None:
+ self.add_child(self.print_scan_params())
+
+ def print_scan_params(self):
+ res = "scan_params: (proj: " + str(self.scan_params["_fieldProjectionMap"]) + ", "
+ residual_reqs = get_boost_optional(self.scan_params["_residualRequirements"])
+ if residual_reqs is not None:
+ res += "residual: " + str(residual_reqs)
+ res += ")"
+ return res
+
+ def print_candidate_indexes(self):
+ res = "candidateIndexes: ["
+ indexes = Vector(self.val["_candidateIndexes"])
+ for i in range(indexes.count()):
+ if i > 0:
+ res += ", "
+ res += ""
+ res += "]"
+ return res
+
+ @staticmethod
+ def index_req_to_string(index_req):
+ req_map = ["Index", "Seek", "Complete"]
+ return req_map[index_req]
+
+ def to_string(self):
+ return "Sargable [" + self.index_req_to_string(self.val["_target"]) + "]"
class RIDIntersectNodePrinter(FixedArityNodePrinter):
@@ -813,7 +902,7 @@ def get_dynamic_type(self):
def to_string(self):
dynamic_type = self.get_dynamic_type()
try:
- dynamic_type = gdb.lookup_type(dynamic_type).strip_typedefs()
+ dynamic_type = lookup_type(dynamic_type).strip_typedefs()
except gdb.error:
return "Unknown PolyValue tag: {}, did you add a new one?".format(self.tag)
# GDB automatically formats types with children, remove the extra characters to get the
@@ -899,7 +988,7 @@ class ABTPrinter(PolyValuePrinter):
def get_bound_projections(node):
# Casts the input node to an ExpressionBinder and returns the set of bound projection names.
pp = PolyValuePrinter(ABTPrinter.abt_type_set, ABTPrinter.abt_namespace, node)
- dynamic_type = gdb.lookup_type(pp.get_dynamic_type()).strip_typedefs()
+ dynamic_type = lookup_type(pp.get_dynamic_type()).strip_typedefs()
binder = pp.cast_control_block(dynamic_type)
return Vector(binder["_names"])
@@ -908,6 +997,69 @@ def __init__(self, val):
super().__init__(ABTPrinter.abt_type_set, ABTPrinter.abt_namespace, val)
+class AtomPrinter(object):
+ """Pretty-printer for Atom."""
+
+ def __init__(self, val):
+ """Initialize AtomPrinter."""
+ self.val = val
+
+ def to_string(self):
+ return self.val["_expr"]
+
+
+class ConjunctionPrinter(object):
+ """Pretty-printer for Conjunction."""
+
+ def __init__(self, val, separator=" ^ "):
+ """Initialize ConjunctionPrinter."""
+ self.val = val
+ self.dynamic_nodes = Vector(self.val["_dyNodes"])
+ self.dynamic_count = self.dynamic_nodes.count()
+ self.separator = separator
+
+ def to_string(self):
+ if self.dynamic_count == 0:
+ return ""
+
+ res = ""
+ first = True
+ for child in self.dynamic_nodes:
+ if first:
+ first = False
+ else:
+ res += self.separator
+
+ res += str(child)
+ return res
+
+
+class DisjunctionPrinter(ConjunctionPrinter):
+ """Pretty-printer for Disjunction."""
+
+ def __init__(self, val):
+ super().__init__(val, " U ")
+
+
+class BoolExprPrinter(PolyValuePrinter):
+ """Pretty-printer for BoolExpr."""
+
+ type_set = ["Atom", "Conjunction", "Disjunction"]
+
+ def __init__(self, val, template_type):
+ """Initialize BoolExprPrinter."""
+ namespace = "mongo::optimizer::BoolExpr<" + template_type + ">::"
+ super().__init__(BoolExprPrinter.type_set, namespace, val)
+
+
+class ResidualReqExprPrinter(BoolExprPrinter):
+ """Pretty-printer for BoolExpr."""
+
+ def __init__(self, val):
+ """Initialize ResidualReqExprPrinter."""
+ super().__init__(val, "mongo::optimizer::ResidualRequirement")
+
+
def register_abt_printers(pp):
"""Registers a number of pretty printers related to the CQF optimizer."""
@@ -934,6 +1086,28 @@ def register_abt_printers(pp):
pp.add("PartialSchemaRequirements", "mongo::optimizer::PartialSchemaRequirements", False,
PartialSchemaReqMapPrinter)
+ # ResidualRequirement printer.
+ pp.add("ResidualRequirement", "mongo::optimizer::ResidualRequirement", False,
+ ResidualRequirementPrinter)
+
+ # CandidateIndexEntry printer.
+ pp.add("CandidateIndexEntry", "mongo::optimizer::CandidateIndexEntry", False,
+ CandidateIndexEntryPrinter)
+
+ pp.add(
+ "ResidualRequirementExpr",
+ ("mongo::optimizer::algebra::PolyValue<" +
+ "mongo::optimizer::BoolExpr::Atom, " +
+ "mongo::optimizer::BoolExpr::Conjunction, " +
+ "mongo::optimizer::BoolExpr::Disjunction>"),
+ False,
+ ResidualReqExprPrinter,
+ )
+ for bool_type in BoolExprPrinter.type_set:
+ pp.add(bool_type,
+ "mongo::optimizer::BoolExpr::" + bool_type,
+ False, getattr(sys.modules[__name__], bool_type + "Printer"))
+
# Utility types within the optimizer.
pp.add("StrongStringAlias", "mongo::optimizer::StrongStringAlias", True,
StrongStringAliasPrinter)
@@ -946,7 +1120,7 @@ def register_abt_printers(pp):
# stale.
try:
# ABT printer.
- abt_type = gdb.lookup_type("mongo::optimizer::ABT").strip_typedefs()
+ abt_type = lookup_type("mongo::optimizer::ABT").strip_typedefs()
pp.add('ABT', abt_type.name, False, ABTPrinter)
abt_ref_type = abt_type.name + "::Reference"
diff --git a/buildscripts/gdb/wt_dump_table.py b/buildscripts/gdb/wt_dump_table.py
index 699cb3381e4d2..1add011e3e968 100644
--- a/buildscripts/gdb/wt_dump_table.py
+++ b/buildscripts/gdb/wt_dump_table.py
@@ -1,6 +1,13 @@
import gdb
import bson
+import sys
+import os
from pprint import pprint
+from pathlib import Path
+
+if not gdb:
+ sys.path.insert(0, str(Path(os.path.abspath(__file__)).parent.parent.parent))
+ from buildscripts.gdb.mongo import lookup_type
DEBUGGING = False
'''
@@ -21,7 +28,7 @@
def dump_pages_for_table(ident):
- conn_impl_type = gdb.lookup_type("WT_CONNECTION_IMPL")
+ conn_impl_type = lookup_type("WT_CONNECTION_IMPL")
if not conn_impl_type:
print('WT_CONNECTION_IMPL type not found. Try invoking this function from a different \
thread and frame.')
@@ -104,7 +111,7 @@ def get_data_handle(conn, handle_name):
def get_btree_handle(dhandle):
- btree = gdb.lookup_type('WT_BTREE').pointer()
+ btree = lookup_type('WT_BTREE').pointer()
return dhandle['handle'].reinterpret_cast(btree).dereference()
diff --git a/buildscripts/idl/gen_all_feature_flag_list.py b/buildscripts/idl/gen_all_feature_flag_list.py
index b76920bde07ce..1dcc46d11bc3b 100644
--- a/buildscripts/idl/gen_all_feature_flag_list.py
+++ b/buildscripts/idl/gen_all_feature_flag_list.py
@@ -56,16 +56,22 @@ def is_third_party_idl(idl_path: str) -> bool:
return False
-def gen_all_feature_flags(idl_dir: str = os.getcwd()):
+def gen_all_feature_flags(idl_dirs: List[str] = None):
"""Generate a list of all feature flags."""
+ default_idl_dirs = ["src", "buildscripts"]
+
+ if not idl_dirs:
+ idl_dirs = default_idl_dirs
+
all_flags = []
- for idl_path in sorted(lib.list_idls(idl_dir)):
- if is_third_party_idl(idl_path):
- continue
- doc = parser.parse_file(open(idl_path), idl_path)
- for feature_flag in doc.spec.feature_flags:
- if feature_flag.default.literal != "true":
- all_flags.append(feature_flag.name)
+ for idl_dir in idl_dirs:
+ for idl_path in sorted(lib.list_idls(idl_dir)):
+ if is_third_party_idl(idl_path):
+ continue
+ doc = parser.parse_file(open(idl_path), idl_path)
+ for feature_flag in doc.spec.feature_flags:
+ if feature_flag.default.literal != "true":
+ all_flags.append(feature_flag.name)
force_disabled_flags = yaml.safe_load(
open("buildscripts/resmokeconfig/fully_disabled_feature_flags.yml"))
diff --git a/buildscripts/idl/idl/ast.py b/buildscripts/idl/idl/ast.py
index 6a27bfad81803..2c338c45d87c2 100644
--- a/buildscripts/idl/idl/ast.py
+++ b/buildscripts/idl/idl/ast.py
@@ -109,6 +109,9 @@ def __init__(self, file_name, line, column):
self.first_element_field_name = None # type: str
self.deserialize_with_tenant = False # type: bool
self.internal_only = False # type: bool
+ # Marks whether this type is a query shape component.
+ # Can only be true if is_struct is true.
+ self.is_query_shape_component = False # type: bool
super(Type, self).__init__(file_name, line, column)
@@ -140,6 +143,11 @@ def __init__(self, file_name, line, column):
self.cpp_validator_func = None # type: str
self.is_command_reply = False # type: bool
self.generic_list_type = None # type: Optional[GenericListType]
+ # Determines whether or not this IDL struct can be a component of a query shape. See WRITING-13831.
+ self.query_shape_component = False # type: bool
+ # pylint: disable=invalid-name
+ self.unsafe_dangerous_disable_extra_field_duplicate_checks = None # type: bool
+
super(Struct, self).__init__(file_name, line, column)
@@ -202,6 +210,31 @@ def __init__(self, file_name, line, column):
super(Validator, self).__init__(file_name, line, column)
+@enum.unique
+class QueryShapeFieldType(enum.Enum):
+ # Abstract literal from shape.
+ LITERAL = enum.auto()
+ # Leave value as-is in shape.
+ PARAMETER = enum.auto()
+ # Anonymize string value.
+ ANONYMIZE = enum.auto()
+ # IDL type uses custom serializer -- defer to that serializer.
+ CUSTOM = enum.auto()
+
+ @classmethod
+ def bind(cls, string_value):
+ # type: (Optional[str]) -> Optional[QueryShapeFieldType]
+ if string_value is None:
+ return None
+ bindings = {
+ "literal": cls.LITERAL,
+ "parameter": cls.PARAMETER,
+ "anonymize": cls.ANONYMIZE,
+ "custom": cls.CUSTOM,
+ }
+ return bindings.get(string_value, None)
+
+
class Field(common.SourceLocation):
"""
An instance of a field in a struct.
@@ -226,9 +259,6 @@ def __init__(self, file_name, line, column):
self.type = None # type: Type
self.always_serialize = False # type: bool
- # Set if this field must be populated before entering the BSON iteration loop
- self.preparse = False # type: bool
-
# Properties specific to fields which are arrays.
self.supports_doc_sequence = False # type: bool
@@ -245,8 +275,24 @@ def __init__(self, file_name, line, column):
# Extra info for generic fields.
self.generic_field_info = None # type: Optional[GenericFieldInfo]
+ # Determines whether or not this field represents a literal value that should be abstracted when serializing a query shape.
+ # See WRITING-13831 for details on query shape.
+ self.query_shape = None # type: Optional[QueryShapeFieldType]
+
super(Field, self).__init__(file_name, line, column)
+ @property
+ def should_serialize_with_options(self):
+ # type: () -> bool
+ """Returns true if the IDL compiler should add a call to serialization options for this field."""
+ return self.query_shape is not None and self.query_shape in [
+ QueryShapeFieldType.LITERAL, QueryShapeFieldType.ANONYMIZE
+ ]
+
+ @property
+ def should_shapify(self):
+ return self.query_shape is not None and self.query_shape != QueryShapeFieldType.PARAMETER
+
class Privilege(common.SourceLocation):
"""IDL privilege information."""
diff --git a/buildscripts/idl/idl/binder.py b/buildscripts/idl/idl/binder.py
index b6f8446b41de8..0d75ee91a47e4 100644
--- a/buildscripts/idl/idl/binder.py
+++ b/buildscripts/idl/idl/binder.py
@@ -272,6 +272,13 @@ def _bind_struct_common(ctxt, parsed_spec, struct, ast_struct):
ast_struct.allow_global_collection_name = struct.allow_global_collection_name
ast_struct.non_const_getter = struct.non_const_getter
ast_struct.is_command_reply = struct.is_command_reply
+ ast_struct.query_shape_component = struct.query_shape_component
+ ast_struct.unsafe_dangerous_disable_extra_field_duplicate_checks = struct.unsafe_dangerous_disable_extra_field_duplicate_checks
+
+ # Check that unsafe_dangerous_disable_extra_field_duplicate_checks is used correctly
+ if ast_struct.unsafe_dangerous_disable_extra_field_duplicate_checks and ast_struct.strict is True:
+ ctxt.add_strict_and_disable_check_not_allowed(ast_struct)
+
if struct.is_generic_cmd_list:
if struct.is_generic_cmd_list == "arg":
ast_struct.generic_list_type = ast.GenericListType.ARG
@@ -324,6 +331,20 @@ def _bind_struct_common(ctxt, parsed_spec, struct, ast_struct):
if not _is_duplicate_field(ctxt, ast_struct.name, ast_struct.fields, ast_field):
ast_struct.fields.append(ast_field)
+ # Verify that each field on the struct defines a query shape type on the field if and only if
+ # query_shape_component is defined on the struct.
+ if not field.hidden and struct.query_shape_component and ast_field.query_shape is None:
+ ctxt.add_must_declare_shape_type(ast_field, ast_struct.name, ast_field.name)
+
+ if not struct.query_shape_component and ast_field.query_shape is not None:
+ ctxt.add_must_be_query_shape_component(ast_field, ast_struct.name, ast_field.name)
+
+ if ast_field.query_shape == ast.QueryShapeFieldType.ANONYMIZE and not (
+ ast_field.type.cpp_type in ["std::string", "std::vector"]
+ or 'string' in ast_field.type.bson_serialization_type):
+ ctxt.add_query_shape_anonymize_must_be_string(ast_field, ast_field.name,
+ ast_field.type.cpp_type)
+
# Fill out the field comparison_order property as needed
if ast_struct.generate_comparison_operators and ast_struct.fields:
# If the user did not specify an ordering of fields, then number all fields in
@@ -363,6 +384,7 @@ def _inject_hidden_fields(struct):
serialization_context_field.cpp_name = "serializationContext"
serialization_context_field.optional = False
serialization_context_field.default = "SerializationContext()"
+ serialization_context_field.hidden = True
struct.fields.append(serialization_context_field)
@@ -422,8 +444,6 @@ def _inject_hidden_command_fields(command):
expect_prefix_field.type.type_name = "bool"
expect_prefix_field.cpp_name = "expectPrefix"
expect_prefix_field.optional = True
- # we must extract expectPrefix before any other fields that may consume it
- expect_prefix_field.preparse = True
command.fields.append(expect_prefix_field)
@@ -438,6 +458,7 @@ def _bind_struct_type(struct):
ast_type.cpp_type = _get_struct_qualified_cpp_name(struct)
ast_type.bson_serialization_type = ["object"]
ast_type.first_element_field_name = struct.fields[0].name if struct.fields else None
+ ast_type.is_query_shape_component = struct.query_shape_component
return ast_type
@@ -453,6 +474,10 @@ def _bind_struct_field(ctxt, ast_field, idl_type):
assert isinstance(array.element_type, syntax.Struct)
struct = cast(syntax.Struct, array.element_type)
+ # Check that unsafe_dangerous_disable_extra_field_duplicate_checks is used correctly
+ if struct.unsafe_dangerous_disable_extra_field_duplicate_checks:
+ ctxt.add_inheritance_and_disable_check_not_allowed(ast_field)
+
ast_field.type = _bind_struct_type(struct)
ast_field.type.is_array = isinstance(idl_type, syntax.ArrayType)
@@ -1002,6 +1027,7 @@ def _bind_type(idltype):
ast_type.deserializer = _normalize_method_name(idltype.cpp_type, idltype.deserializer)
ast_type.deserialize_with_tenant = idltype.deserialize_with_tenant
ast_type.internal_only = idltype.internal_only
+ ast_type.is_query_shape_component = True
return ast_type
@@ -1026,7 +1052,11 @@ def _bind_field(ctxt, parsed_spec, field):
# to provide compatibility support.
ast_field.stability = field.stability
ast_field.always_serialize = field.always_serialize
- ast_field.preparse = field.preparse
+
+ if field.query_shape is not None:
+ ast_field.query_shape = ast.QueryShapeFieldType.bind(field.query_shape)
+ if ast_field.query_shape is None:
+ ctxt.add_invalid_query_shape_value(ast_field, field.query_shape)
ast_field.cpp_name = field.name
if field.cpp_name:
@@ -1108,6 +1138,8 @@ def _bind_field(ctxt, parsed_spec, field):
if ast_field.validator is None:
return None
+ if ast_field.should_shapify and not ast_field.type.is_query_shape_component:
+ ctxt.add_must_be_query_shape_component(ast_field, ast_field.type.name, ast_field.name)
return ast_field
@@ -1195,8 +1227,8 @@ def _bind_chained_struct(ctxt, parsed_spec, ast_struct, chained_struct):
ast_struct.fields.append(ast_field)
-def _bind_globals(parsed_spec):
- # type: (syntax.IDLSpec) -> ast.Global
+def _bind_globals(ctxt, parsed_spec):
+ # type: (errors.ParserContext, syntax.IDLSpec) -> ast.Global
"""Bind the globals object from the idl.syntax tree into the idl.ast tree by doing a deep copy."""
if parsed_spec.globals:
ast_global = ast.Global(parsed_spec.globals.file_name, parsed_spec.globals.line,
@@ -1204,6 +1236,9 @@ def _bind_globals(parsed_spec):
ast_global.cpp_namespace = parsed_spec.globals.cpp_namespace
ast_global.cpp_includes = parsed_spec.globals.cpp_includes
+ if not ast_global.cpp_namespace.startswith("mongo"):
+ ctxt.add_bad_cpp_namespace(ast_global, ast_global.cpp_namespace)
+
configs = parsed_spec.globals.configs
if configs:
ast_global.configs = ast.ConfigGlobal(configs.file_name, configs.line, configs.column)
@@ -1242,15 +1277,6 @@ def _validate_enum_int(ctxt, idl_enum):
str(value_error))
return
- # Check the values are continuous so they can be static_cast.
- min_value = min(int_values_set)
- max_value = max(int_values_set)
-
- valid_int = set(range(min_value, max_value + 1))
-
- if valid_int != int_values_set:
- ctxt.add_enum_non_continuous_range_error(idl_enum, idl_enum.name)
-
def _bind_enum(ctxt, idl_enum):
# type: (errors.ParserContext, syntax.Enum) -> ast.Enum
@@ -1436,13 +1462,20 @@ def _bind_feature_flags(ctxt, param):
ctxt.add_feature_flag_default_false_has_version(param)
return None
- # Feature flags that default to true are required to have a version
- if param.default.literal == "true" and not param.version:
+ # Feature flags that default to true and should be FCV gated are required to have a version
+ if param.default.literal == "true" and param.shouldBeFCVGated.literal == "true" and not param.version:
ctxt.add_feature_flag_default_true_missing_version(param)
return None
+ # Feature flags that should not be FCV gated must not have a version
+ if param.shouldBeFCVGated.literal == "false" and param.version:
+ ctxt.add_feature_flag_fcv_gated_false_has_version(param)
+ return None
+
expr = syntax.Expression(param.default.file_name, param.default.line, param.default.column)
- expr.expr = '%s, "%s"_sd' % (param.default.literal, param.version if param.version else '')
+ expr.expr = '%s, "%s"_sd, %s' % (param.default.literal, param.version if
+ (param.shouldBeFCVGated.literal == "true"
+ and param.version) else '', param.shouldBeFCVGated.literal)
ast_param.default = _bind_expression(expr)
ast_param.default.export = False
@@ -1597,7 +1630,7 @@ def bind(parsed_spec):
bound_spec = ast.IDLAST()
- bound_spec.globals = _bind_globals(parsed_spec)
+ bound_spec.globals = _bind_globals(ctxt, parsed_spec)
_validate_types(ctxt, parsed_spec)
diff --git a/buildscripts/idl/idl/cpp_types.py b/buildscripts/idl/idl/cpp_types.py
index a2631bb4f72fb..42696d0714e27 100644
--- a/buildscripts/idl/idl/cpp_types.py
+++ b/buildscripts/idl/idl/cpp_types.py
@@ -511,14 +511,14 @@ def has_serializer(self):
pass
@abstractmethod
- def gen_serializer_expression(self, indented_writer, expression):
- # type: (writer.IndentedTextWriter, str) -> str
+ def gen_serializer_expression(self, indented_writer, expression, should_shapify=False):
+ # type: (writer.IndentedTextWriter, str, bool) -> str
"""Generate code with the text writer and return an expression to serialize the type."""
pass
-def _call_method_or_global_function(expression, ast_type):
- # type: (str, ast.Type) -> str
+def _call_method_or_global_function(expression, ast_type, should_shapify=False):
+ # type: (str, ast.Type, bool) -> str
"""
Given a fully-qualified method name, call it correctly.
@@ -528,18 +528,32 @@ def _call_method_or_global_function(expression, ast_type):
"""
method_name = ast_type.serializer
serialization_context = 'getSerializationContext()' if ast_type.deserialize_with_tenant else ''
+ shape_options = ''
+ if should_shapify:
+ shape_options = 'options'
short_method_name = writer.get_method_name(method_name)
if writer.is_function(method_name):
if ast_type.deserialize_with_tenant:
serialization_context = ', ' + serialization_context
- return common.template_args('${method_name}(${expression}${serialization_context})',
- expression=expression, method_name=method_name,
- serialization_context=serialization_context)
+ if should_shapify:
+ shape_options = ', ' + shape_options
+
+ return common.template_args(
+ '${method_name}(${expression}${shape_options}${serialization_context})',
+ expression=expression,
+ method_name=method_name,
+ shape_options=shape_options,
+ serialization_context=serialization_context,
+ )
- return common.template_args('${expression}.${method_name}(${serialization_context})',
- expression=expression, method_name=short_method_name,
- serialization_context=serialization_context)
+ return common.template_args(
+ '${expression}.${method_name}(${shape_options}${serialization_context})',
+ expression=expression,
+ method_name=short_method_name,
+ shape_options=shape_options,
+ serialization_context=serialization_context,
+ )
class _CommonBsonCppTypeBase(BsonCppTypeBase):
@@ -560,9 +574,9 @@ def has_serializer(self):
# type: () -> bool
return self._ast_type.serializer is not None
- def gen_serializer_expression(self, indented_writer, expression):
- # type: (writer.IndentedTextWriter, str) -> str
- return _call_method_or_global_function(expression, self._ast_type)
+ def gen_serializer_expression(self, indented_writer, expression, should_shapify=False):
+ # type: (writer.IndentedTextWriter, str, bool) -> str
+ return _call_method_or_global_function(expression, self._ast_type, should_shapify)
class _ObjectBsonCppTypeBase(BsonCppTypeBase):
@@ -584,8 +598,8 @@ def has_serializer(self):
# type: () -> bool
return self._ast_type.serializer is not None
- def gen_serializer_expression(self, indented_writer, expression):
- # type: (writer.IndentedTextWriter, str) -> str
+ def gen_serializer_expression(self, indented_writer, expression, should_shapify=False):
+ # type: (writer.IndentedTextWriter, str, bool) -> str
method_name = writer.get_method_name(self._ast_type.serializer)
if self._ast_type.deserialize_with_tenant: # SerializationContext is tied to tenant deserialization
indented_writer.write_line(
@@ -618,8 +632,8 @@ def has_serializer(self):
# type: () -> bool
return self._ast_type.serializer is not None
- def gen_serializer_expression(self, indented_writer, expression):
- # type: (writer.IndentedTextWriter, str) -> str
+ def gen_serializer_expression(self, indented_writer, expression, should_shapify=False):
+ # type: (writer.IndentedTextWriter, str, bool) -> str
method_name = writer.get_method_name(self._ast_type.serializer)
indented_writer.write_line(
common.template_args('BSONArray localArray(${expression}.${method_name}());',
@@ -642,8 +656,8 @@ def has_serializer(self):
# type: () -> bool
return True
- def gen_serializer_expression(self, indented_writer, expression):
- # type: (writer.IndentedTextWriter, str) -> str
+ def gen_serializer_expression(self, indented_writer, expression, should_shapify=False):
+ # type: (writer.IndentedTextWriter, str, bool) -> str
if self._ast_type.serializer:
method_name = writer.get_method_name(self._ast_type.serializer)
indented_writer.write_line(
diff --git a/buildscripts/idl/idl/errors.py b/buildscripts/idl/idl/errors.py
index 3098fdee09b0e..2cda901647f42 100644
--- a/buildscripts/idl/idl/errors.py
+++ b/buildscripts/idl/idl/errors.py
@@ -81,7 +81,6 @@
ERROR_ID_ENUM_BAD_TYPE = "ID0036"
ERROR_ID_ENUM_BAD_INT_VAUE = "ID0037"
ERROR_ID_ENUM_NON_UNIQUE_VALUES = "ID0038"
-ERROR_ID_ENUM_NON_CONTINUOUS_RANGE = "ID0039"
ERROR_ID_BAD_COMMAND_NAMESPACE = "ID0041"
ERROR_ID_FIELD_NO_COMMAND = "ID0042"
ERROR_ID_NO_ARRAY_OF_CHAIN = "ID0043"
@@ -130,6 +129,16 @@
ERROR_ID_STABILITY_UNKNOWN_VALUE = "ID0091"
ERROR_ID_DUPLICATE_UNSTABLE_STABILITY = "ID0092"
ERROR_ID_INVALID_ARRAY_VARIANT = "ID0093"
+ERROR_ID_FIELD_MUST_DECLARE_SHAPE_LITERAL = "ID0094"
+ERROR_ID_CANNOT_DECLARE_SHAPE_LITERAL = "ID0095"
+ERROR_ID_INVALID_TYPE_FOR_SHAPIFY = "ID0096"
+ERROR_ID_QUERY_SHAPE_PROPERTIES_MUTUALLY_EXCLUSIVE = "ID0097"
+ERROR_ID_QUERY_SHAPE_PROPERTY_CANNOT_BE_FALSE = "ID0098"
+ERROR_ID_STRICT_AND_DISABLE_CHECK_NOT_ALLOWED = "ID0099"
+ERROR_ID_INHERITANCE_AND_DISABLE_CHECK_NOT_ALLOWED = "ID0100"
+ERROR_ID_FEATURE_FLAG_SHOULD_BE_FCV_GATED_FALSE_HAS_VERSION = "ID0101"
+ERROR_ID_QUERY_SHAPE_INVALID_VALUE = "ID0102"
+ERROR_ID_BAD_CPP_NAMESPACE = "ID0103"
class IDLError(Exception):
@@ -366,6 +375,14 @@ def get_bool(self, node):
return True
return False
+ def get_required_bool(self, node):
+ # type: (Union[yaml.nodes.MappingNode, yaml.nodes.ScalarNode, yaml.nodes.SequenceNode]) -> bool
+ boolean_value = yaml.safe_load(node.value)
+ if not isinstance(boolean_value, bool):
+ self._add_node_error(node, ERROR_ID_IS_NODE_VALID_BOOL,
+ "Illegal bool value, expected either 'true' or 'false'.")
+ return boolean_value
+
def get_list(self, node):
# type: (Union[yaml.nodes.MappingNode, yaml.nodes.ScalarNode, yaml.nodes.SequenceNode]) -> List[str]
"""Get a YAML scalar or sequence node as a list of strings."""
@@ -595,13 +612,6 @@ def add_enum_value_not_unique_error(self, location, enum_name):
self._add_error(location, ERROR_ID_ENUM_NON_UNIQUE_VALUES,
"Enum '%s' has duplicate values, all values must be unique" % (enum_name))
- def add_enum_non_continuous_range_error(self, location, enum_name):
- # type: (common.SourceLocation, str) -> None
- """Add an error for an enum having duplicate values."""
- self._add_error(location, ERROR_ID_ENUM_NON_CONTINUOUS_RANGE,
- ("Enum '%s' has non-continuous integer variables, enums must have a " +
- "continuous range of integer variables.") % (enum_name))
-
def add_bad_command_namespace_error(self, location, command_name, command_namespace,
valid_commands):
# type: (common.SourceLocation, str, str, List[str]) -> None
@@ -828,9 +838,10 @@ def add_missing_short_name_with_single_name(self, location, name):
def add_feature_flag_default_true_missing_version(self, location):
# type: (common.SourceLocation) -> None
- """Add an error about a default flag with a default value of true but no version."""
- self._add_error(location, ERROR_ID_FEATURE_FLAG_DEFAULT_TRUE_MISSING_VERSION,
- ("Missing 'version' required for feature flag that defaults to true"))
+ """Add an error about a default flag with a default value of true and should be FCV gated but no version."""
+ self._add_error(location, ERROR_ID_FEATURE_FLAG_DEFAULT_TRUE_MISSING_VERSION, (
+ "Missing 'version' required for feature flag that defaults to true and should be FCV gated"
+ ))
def add_feature_flag_default_false_has_version(self, location):
# type: (common.SourceLocation) -> None
@@ -839,6 +850,13 @@ def add_feature_flag_default_false_has_version(self, location):
location, ERROR_ID_FEATURE_FLAG_DEFAULT_FALSE_HAS_VERSION,
("The 'version' attribute is not allowed for feature flag that defaults to false"))
+ def add_feature_flag_fcv_gated_false_has_version(self, location):
+ # type: (common.SourceLocation) -> None
+ """Add an error about a feature flag that should not be FCV gated but has a version."""
+ self._add_error(
+ location, ERROR_ID_FEATURE_FLAG_SHOULD_BE_FCV_GATED_FALSE_HAS_VERSION,
+ ("The 'version' attribute is not allowed for feature flag that should be FCV gated"))
+
def add_reply_type_invalid_type(self, location, command_name, reply_type_name):
# type: (common.SourceLocation, str, str) -> None
"""Add an error about a command whose reply_type refers to an unknown type."""
@@ -935,6 +953,50 @@ def add_duplicate_unstable_stability(self, location):
"Field specifies both 'unstable' and 'stability' options, should use 'stability: [stable|unstable|internal]' instead and remove the deprecated 'unstable' option."
))
+ def add_must_declare_shape_type(self, location, struct_name, field_name):
+ # type: (common.SourceLocation, str, str) -> None
+ """Add an error about a field not specifying either query_shape_literal or query_shape_anonymize if the struct is query_shape_component."""
+ self._add_error(
+ location, ERROR_ID_FIELD_MUST_DECLARE_SHAPE_LITERAL,
+ f"Field '{field_name}' must specify either 'query_shape_literal' or 'query_shape_anonymize' since struct '{struct_name}' is a query shape component."
+ )
+
+ def add_must_be_query_shape_component(self, location, struct_name, field_name):
+ # type: (common.SourceLocation, str, str) -> None
+ self._add_error(
+ location, ERROR_ID_CANNOT_DECLARE_SHAPE_LITERAL,
+ f"Field '{field_name}' cannot specify 'query_shape_literal' property since struct '{struct_name}' is not a query shape component."
+ )
+
+ def add_query_shape_anonymize_must_be_string(self, location, field_name, field_type):
+ self._add_error(
+ location, ERROR_ID_INVALID_TYPE_FOR_SHAPIFY,
+ f"In order for {field_name} to be marked as a query shape fieldpath, it must have a string type, not {field_type}."
+ )
+
+ def add_invalid_query_shape_value(self, location, query_shape_value):
+ self._add_error(location, ERROR_ID_QUERY_SHAPE_INVALID_VALUE,
+ f"'{query_shape_value}' is not a valid value for 'query_shape'.")
+
+ def add_strict_and_disable_check_not_allowed(self, location):
+ self._add_error(
+ location, ERROR_ID_STRICT_AND_DISABLE_CHECK_NOT_ALLOWED,
+ "Cannot set strict = true and unsafe_dangerous_disable_extra_field_duplicate_checks = true on a struct. unsafe_dangerous_disable_extra_field_duplicate_checks is only permitted on strict = false"
+ )
+
+ def add_inheritance_and_disable_check_not_allowed(self, location):
+ self._add_error(
+ location, ERROR_ID_INHERITANCE_AND_DISABLE_CHECK_NOT_ALLOWED,
+ "Fields cannot have unsafe_dangerous_disable_extra_field_duplicate_checks = true. unsafe_dangerous_disable_extra_field_duplicate_checks on non field structs"
+ )
+
+ def add_bad_cpp_namespace(self, location, namespace):
+ # type: (common.SourceLocation, str) -> None
+ self._add_error(
+ location, ERROR_ID_BAD_CPP_NAMESPACE,
+ "cpp_namespace must start with 'mongo::' or be just 'mongo', namespace '%s' is not supported"
+ % (namespace))
+
def _assert_unique_error_messages():
# type: () -> None
diff --git a/buildscripts/idl/idl/generator.py b/buildscripts/idl/idl/generator.py
index 8f16cbf46c973..5afe093c8a2e4 100644
--- a/buildscripts/idl/idl/generator.py
+++ b/buildscripts/idl/idl/generator.py
@@ -36,7 +36,7 @@
import textwrap
from abc import ABCMeta, abstractmethod
from enum import Enum
-from typing import Dict, List, Mapping, Tuple, Union, cast
+from typing import Callable, Dict, List, Mapping, Optional, Tuple, Union, cast
from . import (ast, bson, common, cpp_types, enum_types, generic_field_list_types, struct_types,
writer)
@@ -175,6 +175,12 @@ def _gen_field_usage_constant(field):
return "k%sBit" % (common.title_case(field.cpp_name))
+def _gen_field_element_name(field):
+ # type: (ast.Field) -> str
+ """Get the name for a BSONElement pointer in field iteration."""
+ return "BSONElement_%s" % (common.title_case(field.cpp_name))
+
+
def _get_constant(name):
# type: (str) -> str
"""Transform an arbitrary label to a constant name."""
@@ -273,7 +279,7 @@ def _get_field_usage_checker(indented_writer, struct):
# Only use the fast field usage checker if we never expect extra fields that we need to ignore
# but still wish to do duplicate detection on.
- if struct.strict:
+ if struct.strict or struct.unsafe_dangerous_disable_extra_field_duplicate_checks:
return _FastFieldUsageChecker(indented_writer, struct.fields)
return _SlowFieldUsageChecker(indented_writer, struct.fields)
@@ -767,14 +773,14 @@ def gen_op_msg_request_member(self, command):
self._writer.write_empty_line()
- def gen_field_list_entries_declaration_struct(self, struct): # type: (ast.Struct) -> None
+ def gen_field_list_entries_declaration_struct(self, struct):
+ # type: (ast.Struct) -> None
"""Generate the field list entries map for a generic argument or reply field list."""
field_list_info = generic_field_list_types.get_field_list_info(struct)
self._writer.write_line(
common.template_args('// Map: fieldName -> ${should_forward_name}',
should_forward_name=field_list_info.get_should_forward_name()))
- self._writer.write_line(
- "static const stdx::unordered_map _genericFields;")
+ self._writer.write_line("static const StaticImmortal> _genericFields;")
self.write_empty_line()
def gen_known_fields_declaration(self):
@@ -1079,6 +1085,10 @@ def generate(self, spec):
if any(command.api_version for command in spec.commands):
header_list.append('mongo/db/commands.h')
+ # Include serialization options only if there is a struct which is part of a query shape.
+ if any(struct.query_shape_component for struct in spec.structs):
+ header_list.append('mongo/db/query/serialization_options.h')
+
header_list.sort()
for include in header_list:
@@ -1490,8 +1500,9 @@ def _gen_usage_check(self, field, bson_element, field_usage_check):
self._writer.write_line('%s = true;' % (_get_has_field_member_name(field)))
def gen_field_deserializer(self, field, field_type, bson_object, bson_element,
- field_usage_check, tenant, is_command_field=False, check_type=True):
- # type: (ast.Field, ast.Type, str, str, _FieldUsageCheckerBase, str, bool, bool) -> None
+ field_usage_check, tenant, is_command_field=False, check_type=True,
+ deserialize_fn=None):
+ # type: (ast.Field, ast.Type, str, str, _FieldUsageCheckerBase, str, bool, bool, Optional[Callable[[], None]]) -> None
"""Generate the C++ deserializer piece for a field.
If field_type is scalar and check_type is True (the default), generate type-checking code.
@@ -1506,12 +1517,18 @@ def gen_field_deserializer(self, field, field_type, bson_object, bson_element,
predicate = "MONGO_likely(ctxt.checkAndAssertType(%s, Array))" % (bson_element)
with self._predicate(predicate):
self._gen_usage_check(field, bson_element, field_usage_check)
- self._gen_array_deserializer(field, bson_element, field_type, tenant)
+ if deserialize_fn:
+ deserialize_fn()
+ else:
+ self._gen_array_deserializer(field, bson_element, field_type, tenant)
return
elif field_type.is_variant:
self._gen_usage_check(field, bson_element, field_usage_check)
- self._gen_variant_deserializer(field, bson_element, tenant)
+ if deserialize_fn:
+ deserialize_fn()
+ else:
+ self._gen_variant_deserializer(field, bson_element, tenant)
return
def validate_and_assign_or_uassert(field, expression):
@@ -1528,6 +1545,7 @@ def validate_and_assign_or_uassert(field, expression):
self._writer.write_line('%s = std::move(value);' % (field_name))
if field.chained:
+ assert not deserialize_fn
# Do not generate a predicate check since we always call these deserializers.
if field_type.is_struct:
@@ -1552,6 +1570,9 @@ def validate_and_assign_or_uassert(field, expression):
with self._predicate(predicate):
self._gen_usage_check(field, bson_element, field_usage_check)
+ if deserialize_fn:
+ deserialize_fn()
+ return
object_value = self._gen_field_deserializer_expression(
bson_element, field, field_type, tenant)
@@ -1575,6 +1596,10 @@ def validate_and_assign_or_uassert(field, expression):
else:
validate_and_assign_or_uassert(field, object_value)
+ # if we explicitly set _dollarTenant, we know we have a non-prefixed tenantId
+ if field.name == '$tenant':
+ self._writer.write_line('_serializationContext.setTenantIdSource(true);')
+
if is_command_field and predicate:
with self._block('else {', '}'):
self._writer.write_line(
@@ -1672,7 +1697,7 @@ def _gen_initializer_vars(self, constructor, is_command):
# initialized first; don't move in the event a boost::none is supplied
initializer_vars.insert(0, '_%s(%s)' % (arg.name, initializer_var))
elif arg.name in ["nss", "nssOrUUID"]:
- # TODO(SERVER-75669): Remove this denylist, prevent use-after-move by defining fields in the correct order.
+ # TODO (SERVER-74238): Remove this denylist, prevent use-after-move by defining fields in the correct order.
initializer_vars.append('_%s(%s)' % (arg.name, arg.name))
else:
initializer_vars.append('_%s(std::move(%s))' % (arg.name, arg.name))
@@ -1713,9 +1738,7 @@ def _gen_constructor(self, struct, constructor, default_init):
initializes_db_name = True
elif [arg for arg in constructor.args if arg.name == 'nssOrUUID']:
if [field for field in struct.fields if field.serialize_op_msg_request_only]:
- initializers.append(
- '_dbName(nssOrUUID.uuid() ? nssOrUUID.dbName().value() : nssOrUUID.nss()->dbName())'
- )
+ initializers.append('_dbName(nssOrUUID.dbName())')
initializes_db_name = True
# Serialize has fields third
@@ -1749,7 +1772,7 @@ def gen_constructors(self, struct):
required_constructor = struct_type_info.get_required_constructor_method()
if len(required_constructor.args) != len(constructor.args):
- #print(struct.name + ": "+ str(required_constructor.args))
+ # print(struct.name + ": "+ str(required_constructor.args))
self._gen_constructor(struct, required_constructor, False)
def gen_field_list_entry_lookup_methods_struct(self, struct):
@@ -1759,14 +1782,14 @@ def gen_field_list_entry_lookup_methods_struct(self, struct):
defn = field_list_info.get_has_field_method().get_definition()
with self._block('%s {' % (defn, ), '}'):
self._writer.write_line(
- 'return _genericFields.find(fieldName.toString()) != _genericFields.end();')
+ 'return _genericFields->find(fieldName) != _genericFields->end();')
self._writer.write_empty_line()
defn = field_list_info.get_should_forward_method().get_definition()
with self._block('%s {' % (defn, ), '}'):
- self._writer.write_line('auto it = _genericFields.find(fieldName.toString());')
- self._writer.write_line('return (it == _genericFields.end() || it->second);')
+ self._writer.write_line('auto it = _genericFields->find(fieldName);')
+ self._writer.write_line('return (it == _genericFields->end() || it->second);')
self._writer.write_empty_line()
@@ -1789,25 +1812,32 @@ def _gen_fields_deserializer_common(self, struct, bson_object, tenant):
# type: (ast.Struct, str, str) -> _FieldUsageCheckerBase
"""Generate the C++ code to deserialize list of fields."""
- struct_fields = struct.fields.copy()
- preparse_fields = [] # type: List[ast.Field]
-
field_usage_check = _get_field_usage_checker(self._writer, struct)
if isinstance(struct, ast.Command):
self._writer.write_line('BSONElement commandElement;')
self._writer.write_line('bool firstFieldFound = false;')
self._writer.write_empty_line()
- # inject a context into the IDLParserContext that tags the class as a command request
- self._writer.write_line(
- 'setSerializationContext(SerializationContext::stateCommandRequest());')
-
- # some fields are consumed in the BSON iteration loop and need to be parsed before
- # entering the main loop
- for field in struct.fields: # iterate over the original list
- if field.preparse:
- struct_fields.remove(field)
- preparse_fields.append(field)
+ # Update the serialization context whether or not we received a tenantId object
+ if tenant == 'request.getValidatedTenantId()':
+ # inject a context into the IDLParserContext that tags the class as a command request
+ self._writer.write_line(
+ 'setSerializationContext(SerializationContext::stateCommandRequest());')
+ self._writer.write_line(
+ '_serializationContext.setTenantIdSource(request.getValidatedTenantId() != boost::none);'
+ )
+ else:
+ # if a non-default serialization context was passed in via the IDLParserContext,
+ # use that to set the local serialization context, otherwise set it to a command
+ # request
+ with self._block(
+ 'if (ctxt.getSerializationContext() != SerializationContext::stateDefault()) {',
+ '}'):
+ self._writer.write_line(
+ 'setSerializationContext(ctxt.getSerializationContext());')
+ with self._block('else {', '}'):
+ self._writer.write_line(
+ 'setSerializationContext(SerializationContext::stateCommandRequest());')
else:
# set the local serializer flags according to the constexpr set by is_command_reply
@@ -1818,86 +1848,103 @@ def _gen_fields_deserializer_common(self, struct, bson_object, tenant):
self._writer.write_empty_line()
- # we need to build two loops: one for the preparsed fields, and one for fields that don't
- # depend on other fields
- fields = [] # type: List[List[ast.Field]]
- if preparse_fields:
- fields.append(preparse_fields)
- fields.append(struct_fields)
- for_blocks = len(fields)
- last_block = for_blocks - 1
-
- for block_num in range(for_blocks):
- with self._block('for (const auto& element :%s) {' % (bson_object), '}'):
-
- self._writer.write_line('const auto fieldName = element.fieldNameStringData();')
+ deferred_fields = [] # type: List[ast.Field]
+ deferred_field_names = [] # type: List[str]
+ if 'expectPrefix' in [field.name for field in struct.fields]:
+ # Deserialization of 'expectPrefix' modifies the deserializationContext and how
+ # certain other fields are then deserialized.
+ # Such dependent fields include those which "deserialize_with_tenant" and
+ # any complex struct type.
+ # In practice, this typically only occurs on Command structs.
+ deferred_fields = [
+ field for field in struct.fields
+ if field.type and (field.type.is_struct or field.type.deserialize_with_tenant)
+ ]
+ deferred_field_names = [field.name for field in deferred_fields]
+ if deferred_fields:
+ self._writer.write_line(
+ '// Anchors for values of fields which may depend on others.')
+ for field in deferred_fields:
+ self._writer.write_line('BSONElement %s;' % (_gen_field_element_name(field)))
self._writer.write_empty_line()
- if isinstance(struct, ast.Command) and block_num == last_block:
- with self._predicate("firstFieldFound == false"):
- # Get the Command element if we need it for later in the deserializer to get the
- # namespace
- if struct.namespace != common.COMMAND_NAMESPACE_IGNORED:
- self._writer.write_line('commandElement = element;')
+ with self._block('for (const auto& element :%s) {' % (bson_object), '}'):
- self._writer.write_line('firstFieldFound = true;')
- self._writer.write_line('continue;')
+ self._writer.write_line('const auto fieldName = element.fieldNameStringData();')
+ self._writer.write_empty_line()
- self._writer.write_empty_line()
+ if isinstance(struct, ast.Command):
+ with self._predicate("firstFieldFound == false"):
+ # Get the Command element if we need it for later in the deserializer to get the
+ # namespace
+ if struct.namespace != common.COMMAND_NAMESPACE_IGNORED:
+ self._writer.write_line('commandElement = element;')
- first_field = True
- for field in fields[block_num]:
- # Do not parse chained fields as fields since they are actually chained types.
- if field.chained and not field.chained_struct_field:
- continue
- # Internal only fields are not parsed from BSON objects
- if field.type and field.type.internal_only:
- continue
+ self._writer.write_line('firstFieldFound = true;')
+ self._writer.write_line('continue;')
- field_predicate = 'fieldName == %s' % (_get_field_constant_name(field))
+ self._writer.write_empty_line()
- with self._predicate(field_predicate, not first_field):
+ first_field = True
+ for field in struct.fields:
+ # Do not parse chained fields as fields since they are actually chained types.
+ if field.chained and not field.chained_struct_field:
+ continue
+ # Internal only fields are not parsed from BSON objects
+ if field.type and field.type.internal_only:
+ continue
- if field.ignore:
- field_usage_check.add(field, "element")
+ field_predicate = 'fieldName == %s' % (_get_field_constant_name(field))
- self._writer.write_line('// ignore field')
- else:
- self.gen_field_deserializer(field, field.type, bson_object, "element",
- field_usage_check, tenant)
+ with self._predicate(field_predicate, not first_field):
- if first_field:
- first_field = False
+ def defer_field():
+ # type: () -> None
+ """Field depends on other field(s), store its location and defer processing till later."""
+ assert field.name in deferred_field_names
+ self._writer.write_line('%s = element;' % (_gen_field_element_name(field)))
- # only check for extraneous fields in the final block
- if block_num == last_block:
- # End of for fields
- # Generate strict check for extranous fields
- if struct.strict:
- # For commands, check if this is a well known command field that the IDL parser
- # should ignore regardless of strict mode.
- command_predicate = None
- if isinstance(struct, ast.Command):
- command_predicate = "!mongo::isGenericArgument(fieldName)"
-
- # Ditto for command replies
- if struct.is_command_reply:
- command_predicate = "!mongo::isGenericReply(fieldName)"
-
- with self._block('else {', '}'):
- with self._predicate(command_predicate):
- self._writer.write_line('ctxt.throwUnknownField(fieldName);')
+ if field.ignore:
+ field_usage_check.add(field, "element")
+ self._writer.write_line('// ignore field')
else:
- with self._else(not first_field):
- self._writer.write_line(
- 'auto push_result = usedFieldSet.insert(fieldName);')
- with writer.IndentedScopedBlock(
- self._writer,
- 'if (MONGO_unlikely(push_result.second == false)) {', '}'):
- self._writer.write_line('ctxt.throwDuplicateField(fieldName);')
+ fn = defer_field if field.name in deferred_field_names else None
+ self.gen_field_deserializer(field, field.type, bson_object, "element",
+ field_usage_check, tenant, deserialize_fn=fn)
+
+ if first_field:
+ first_field = False
+
+ # End of for fields
+ # Generate strict check for extranous fields
+ if struct.strict:
+ # For commands, check if this is a well known command field that the IDL parser
+ # should ignore regardless of strict mode.
+ command_predicate = None
+ if isinstance(struct, ast.Command):
+ command_predicate = "!mongo::isGenericArgument(fieldName)"
+
+ # Ditto for command replies
+ if struct.is_command_reply:
+ command_predicate = "!mongo::isGenericReply(fieldName)"
- if block_num < last_block:
- self._writer.write_empty_line()
+ with self._block('else {', '}'):
+ with self._predicate(command_predicate):
+ self._writer.write_line('ctxt.throwUnknownField(fieldName);')
+ elif not struct.unsafe_dangerous_disable_extra_field_duplicate_checks:
+ with self._else(not first_field):
+ self._writer.write_line('auto push_result = usedFieldSet.insert(fieldName);')
+ with writer.IndentedScopedBlock(
+ self._writer, 'if (MONGO_unlikely(push_result.second == false)) {',
+ '}'):
+ self._writer.write_line('ctxt.throwDuplicateField(fieldName);')
+
+ # Handle the deferred fields after their possible dependencies have been processed.
+ for field in deferred_fields:
+ element_name = _gen_field_element_name(field)
+ with self._predicate(element_name):
+ self.gen_field_deserializer(field, field.type, bson_object, element_name, None,
+ tenant)
# Parse chained structs if not inlined
# Parse chained types always here
@@ -2148,14 +2195,29 @@ def _gen_serializer_method_custom(self, field):
self._writer.write_template(
'BSONArrayBuilder arrayBuilder(builder->subarrayStart(${field_name}));')
with self._block('for (const auto& item : ${access_member}) {', '}'):
- expression = bson_cpp_type.gen_serializer_expression(self._writer, 'item')
+ expression = bson_cpp_type.gen_serializer_expression(
+ self._writer, 'item',
+ field.query_shape == ast.QueryShapeFieldType.CUSTOM)
template_params['expression'] = expression
self._writer.write_template('arrayBuilder.append(${expression});')
else:
expression = bson_cpp_type.gen_serializer_expression(
- self._writer, _access_member(field))
+ self._writer, _access_member(field),
+ field.query_shape == ast.QueryShapeFieldType.CUSTOM)
template_params['expression'] = expression
- self._writer.write_template('builder->append(${field_name}, ${expression});')
+ if not field.should_serialize_with_options:
+ self._writer.write_template(
+ 'builder->append(${field_name}, ${expression});')
+ elif field.query_shape == ast.QueryShapeFieldType.LITERAL:
+ self._writer.write_template(
+ 'options.serializeLiteral(${expression}).serializeForIDL(${field_name}, builder);'
+ )
+ elif field.query_shape == ast.QueryShapeFieldType.ANONYMIZE:
+ self._writer.write_template(
+ 'builder->append(${field_name}, options.serializeFieldPathFromString(${expression}));'
+ )
+ else:
+ assert False
elif field.type.bson_serialization_type[0] == 'any':
# Any types are special
@@ -2227,18 +2289,29 @@ def _gen_serializer_method_struct(self, field):
if field.chained:
# Just directly call the serializer for chained structs without opening up a nested
# document.
- self._writer.write_template('${access_member}.serialize(builder);')
+ if not field.should_serialize_with_options:
+ self._writer.write_template('${access_member}.serialize(builder);')
+ else:
+ self._writer.write_template('${access_member}.serialize(builder, options);')
+
elif field.type.is_array:
self._writer.write_template(
'BSONArrayBuilder arrayBuilder(builder->subarrayStart(${field_name}));')
with self._block('for (const auto& item : ${access_member}) {', '}'):
self._writer.write_line(
'BSONObjBuilder subObjBuilder(arrayBuilder.subobjStart());')
- self._writer.write_line('item.serialize(&subObjBuilder);')
+ if not field.should_serialize_with_options:
+ self._writer.write_line('item.serialize(&subObjBuilder);')
+ else:
+ self._writer.write_line('item.serialize(&subObjBuilder, options);')
else:
self._writer.write_template(
'BSONObjBuilder subObjBuilder(builder->subobjStart(${field_name}));')
- self._writer.write_template('${access_member}.serialize(&subObjBuilder);')
+ if not field.should_serialize_with_options:
+ self._writer.write_template('${access_member}.serialize(&subObjBuilder);')
+ else:
+ self._writer.write_template(
+ '${access_member}.serialize(&subObjBuilder, options);')
def _gen_serializer_method_array_variant(self, field):
template_params = {
@@ -2276,19 +2349,46 @@ def _gen_serializer_method_variant_helper(self, field, template_params, builder=
template_params[
'cpp_type'] = 'std::vector<' + variant_type.cpp_type + '>' if variant_type.is_array else variant_type.cpp_type
- with self._block('[%s](const ${cpp_type}& value) {' % builder, '},'):
+ template_params['param_opt'] = ""
+ if field.should_serialize_with_options:
+ template_params['param_opt'] = ', options'
+ with self._block('[%s${param_opt}](const ${cpp_type}& value) {' % builder, '},'):
bson_cpp_type = cpp_types.get_bson_cpp_type(variant_type)
if field.type.is_variant and field.type.is_array:
self._writer.write_template('value.serialize(%s);' % builder)
elif bson_cpp_type and bson_cpp_type.has_serializer():
assert not field.type.is_array
- expression = bson_cpp_type.gen_serializer_expression(self._writer, 'value')
+ expression = bson_cpp_type.gen_serializer_expression(
+ self._writer, 'value',
+ field.query_shape == ast.QueryShapeFieldType.CUSTOM)
template_params['expression'] = expression
- self._writer.write_template(
- 'builder->append(${field_name}, ${expression});')
+ if not field.should_serialize_with_options:
+ self._writer.write_template(
+ 'builder->append(${field_name}, ${expression});')
+ elif field.query_shape == ast.QueryShapeFieldType.LITERAL:
+ self._writer.write_template(
+ 'options.serializeLiteral(${expression}).serializeForIDL(${field_name}, builder);'
+ )
+ elif field.query_shape == ast.QueryShapeFieldType.ANONYMIZE:
+ self._writer.write_template(
+ 'builder->append(${field_name}, options.serializeFieldPathFromString(${expression}));'
+ )
+ else:
+ assert False
else:
- self._writer.write_template(
- 'idl::idlSerialize(builder, ${field_name}, value);')
+ if not field.should_serialize_with_options:
+ self._writer.write_template(
+ 'idl::idlSerialize(builder, ${field_name}, value);')
+ elif field.query_shape == ast.QueryShapeFieldType.LITERAL:
+ self._writer.write_template(
+ 'options.serializeLiteral(value).serializeForIDL(${field_name}, builder);'
+ )
+ elif field.query_shape == ast.QueryShapeFieldType.ANONYMIZE:
+ self._writer.write_template(
+ 'idl::idlSerialize(builder, ${field_name}, options.serializeFieldPathFromString(value));'
+ )
+ else:
+ assert False
def _gen_serializer_method_common(self, field):
# type: (ast.Field) -> None
@@ -2320,11 +2420,27 @@ def _gen_serializer_method_common(self, field):
else:
self._gen_serializer_method_variant(field)
else:
- # Generate default serialization using BSONObjBuilder::append
- # Note: BSONObjBuilder::append has overrides for std::vector also
- self._writer.write_line(
- 'builder->append(%s, %s);' % (_get_field_constant_name(field),
- _access_member(field)))
+ # Generate default serialization
+ # Note: BSONObjBuilder::append, which all three branches use, has overrides for std::vector also
+ if not field.should_serialize_with_options:
+ self._writer.write_line(
+ 'builder->append(%s, %s);' % (_get_field_constant_name(field),
+ _access_member(field)))
+ elif field.query_shape == ast.QueryShapeFieldType.LITERAL:
+ # serializeLiteral expects an ImplicitValue, which can't be constructed with an int64_t
+ expression_cast = ""
+ if field.type.cpp_type == "std::int64_t":
+ expression_cast = "(long long)"
+ self._writer.write_line(
+ 'options.serializeLiteral(%s%s).serializeForIDL(%s, builder);'
+ % (expression_cast, _access_member(field),
+ _get_field_constant_name(field)))
+ elif field.query_shape == ast.QueryShapeFieldType.ANONYMIZE:
+ self._writer.write_line(
+ 'builder->append(%s, options.serializeFieldPathFromString(%s));' %
+ (_get_field_constant_name(field), _access_member(field)))
+ else:
+ assert False
else:
self._gen_serializer_method_struct(field)
@@ -2603,8 +2719,8 @@ def gen_field_list_entries_declaration_struct(self, struct):
common.template_args('// Map: fieldName -> ${should_forward_name}',
should_forward_name=field_list_info.get_should_forward_name()))
block_name = common.template_args(
- 'const stdx::unordered_map ${klass}::_genericFields {', klass=klass)
- with self._block(block_name, "};"):
+ 'const StaticImmortal> ${klass}::_genericFields {{', klass=klass)
+ with self._block(block_name, "}};"):
sorted_entries = sorted(struct.fields, key=lambda f: f.name)
for entry in sorted_entries:
self._writer.write_line(
@@ -2724,7 +2840,8 @@ def gen_server_parameters(self, params, header_file_name):
self._writer.write_line(
'%s %s%s;' % (param.cpp_vartype, param.cpp_varname, init))
- blockname = 'idl_' + hashlib.sha1(header_file_name.encode()).hexdigest()
+ blockname = 'idl_' + \
+ hashlib.sha1(header_file_name.encode()).hexdigest()
with self._block('MONGO_SERVER_PARAMETER_REGISTER(%s)(InitializerContext*) {' % (blockname),
'}'):
# ServerParameter instances.
@@ -2964,11 +3081,16 @@ def generate(self, spec, header_file_name):
# Generate mongo includes third
header_list = [
- 'mongo/bson/bsonobjbuilder.h', 'mongo/db/auth/authorization_contract.h',
- 'mongo/db/commands.h', 'mongo/idl/command_generic_argument.h',
- 'mongo/util/overloaded_visitor.h'
+ 'mongo/util/overloaded_visitor.h',
+ 'mongo/util/string_map.h',
]
+ if spec.commands:
+ header_list.append('mongo/db/auth/authorization_contract.h')
+ header_list.append('mongo/idl/command_generic_argument.h')
+ elif len([s for s in spec.structs if s.is_command_reply]) > 0:
+ header_list.append('mongo/idl/command_generic_argument.h')
+
if spec.server_parameters:
header_list.append('mongo/db/server_parameter.h')
header_list.append('mongo/db/server_parameter_with_storage.h')
diff --git a/buildscripts/idl/idl/parser.py b/buildscripts/idl/idl/parser.py
index a5f74e3b8c64c..41b73966d267f 100644
--- a/buildscripts/idl/idl/parser.py
+++ b/buildscripts/idl/idl/parser.py
@@ -132,6 +132,8 @@ def _generic_parser(
if ctxt.is_mapping_node(second_node, first_name):
syntax_node.__dict__[first_name] = rule_desc.mapping_parser_func(
ctxt, second_node)
+ elif rule_desc.node_type == "required_bool_scalar":
+ syntax_node.__dict__[first_name] = ctxt.get_required_bool(second_node)
else:
raise errors.IDLError(
"Unknown node_type '%s' for parser rule" % (rule_desc.node_type))
@@ -147,7 +149,7 @@ def _generic_parser(
# A bool is never "None" like other types, it simply defaults to "false".
# It means "if bool is None" will always return false and there is no support for required
- # 'bool' at this time.
+ # 'bool' at this time. Use the node type 'required_bool_scalar' if this behavior is not desired.
if not rule_desc.node_type == 'bool_scalar':
if syntax_node.__dict__[name] is None:
ctxt.add_missing_required_field_error(node, syntax_node_name, name)
@@ -404,6 +406,8 @@ def _parse_field(ctxt, name, node):
_RuleDesc('bool_scalar'),
"forward_from_shards":
_RuleDesc('bool_scalar'),
+ "query_shape":
+ _RuleDesc('scalar'),
})
return field
@@ -575,6 +579,8 @@ def _parse_struct(ctxt, spec, name, node):
"cpp_validator_func": _RuleDesc('scalar'),
"is_command_reply": _RuleDesc('bool_scalar'),
"is_generic_cmd_list": _RuleDesc('scalar'),
+ "query_shape_component": _RuleDesc('bool_scalar'),
+ "unsafe_dangerous_disable_extra_field_duplicate_checks": _RuleDesc("bool_scalar"),
})
# PyLint has difficulty with some iterables: https://github.com/PyCQA/pylint/issues/3105
@@ -978,6 +984,9 @@ def _parse_feature_flag(ctxt, spec, name, node):
mapping_parser_func=_parse_expression),
"version":
_RuleDesc('scalar'),
+ "shouldBeFCVGated":
+ _RuleDesc('scalar_or_mapping', _RuleDesc.REQUIRED,
+ mapping_parser_func=_parse_expression),
})
spec.feature_flags.append(param)
diff --git a/buildscripts/idl/idl/struct_types.py b/buildscripts/idl/idl/struct_types.py
index ab784420374f9..f2a4b69a8b6f7 100644
--- a/buildscripts/idl/idl/struct_types.py
+++ b/buildscripts/idl/idl/struct_types.py
@@ -327,14 +327,19 @@ def get_deserializer_method(self):
def get_serializer_method(self):
# type: () -> MethodInfo
+ args = ['BSONObjBuilder* builder']
+ if self._struct.query_shape_component:
+ args.append("SerializationOptions options = {}")
return MethodInfo(
- common.title_case(self._struct.cpp_name), 'serialize', ['BSONObjBuilder* builder'],
- 'void', const=True)
+ common.title_case(self._struct.cpp_name), 'serialize', args, 'void', const=True)
def get_to_bson_method(self):
# type: () -> MethodInfo
+ args = []
+ if self._struct.query_shape_component:
+ args.append("SerializationOptions options = {}")
return MethodInfo(
- common.title_case(self._struct.cpp_name), 'toBSON', [], 'BSONObj', const=True)
+ common.title_case(self._struct.cpp_name), 'toBSON', args, 'BSONObj', const=True)
def get_op_msg_request_serializer_method(self):
# type: () -> Optional[MethodInfo]
@@ -580,7 +585,14 @@ def gen_namespace_check(self, indented_writer, db_name, element):
indented_writer.write_line('invariant(_nss.isEmpty());')
allow_global = 'true' if self._struct.allow_global_collection_name else 'false'
indented_writer.write_line(
- '_nss = ctxt.parseNSCollectionRequired(%s, %s, %s);' % (db_name, element, allow_global))
+ 'auto collectionName = ctxt.checkAndAssertCollectionName(%s, %s);' % (element,
+ allow_global))
+ indented_writer.write_line(
+ '_nss = NamespaceStringUtil::parseNamespaceFromRequest(%s, collectionName);' %
+ (db_name))
+ indented_writer.write_line(
+ 'uassert(ErrorCodes::InvalidNamespace, str::stream() << "Invalid namespace specified: "'
+ ' << _nss.toStringForErrorMsg(), _nss.isValid());')
class _CommandWithUUIDNamespaceTypeInfo(_CommandBaseTypeInfo):
@@ -652,8 +664,15 @@ def gen_serializer(self, indented_writer):
def gen_namespace_check(self, indented_writer, db_name, element):
# type: (writer.IndentedTextWriter, str, str) -> None
- indented_writer.write_line('invariant(_nssOrUUID.nss() || _nssOrUUID.uuid());')
- indented_writer.write_line('_nssOrUUID = ctxt.parseNsOrUUID(%s, %s);' % (db_name, element))
+ indented_writer.write_line(
+ 'auto collOrUUID = ctxt.checkAndAssertCollectionNameOrUUID(%s);' % (element))
+ indented_writer.write_line(
+ '_nssOrUUID = stdx::holds_alternative(collOrUUID) ? NamespaceStringUtil::parseNamespaceFromRequest(%s, stdx::get(collOrUUID)) : NamespaceStringOrUUID(%s, stdx::get(collOrUUID));'
+ % (db_name, db_name))
+ indented_writer.write_line(
+ 'uassert(ErrorCodes::InvalidNamespace, str::stream() << "Invalid namespace specified: "'
+ ' << _nssOrUUID.toStringForErrorMsg()'
+ ', !_nssOrUUID.isNamespaceString() || _nssOrUUID.nss().isValid());')
def get_struct_info(struct):
diff --git a/buildscripts/idl/idl/syntax.py b/buildscripts/idl/idl/syntax.py
index e774a07cdad4d..c8e4697525b03 100644
--- a/buildscripts/idl/idl/syntax.py
+++ b/buildscripts/idl/idl/syntax.py
@@ -493,6 +493,10 @@ def __init__(self, file_name, line, column):
self.serialize_op_msg_request_only = False # type: bool
self.constructed = False # type: bool
+ self.query_shape = None # type: Optional[str]
+
+ self.hidden = False # type: bool
+
super(Field, self).__init__(file_name, line, column)
@@ -552,6 +556,8 @@ def __init__(self, file_name, line, column):
self.cpp_validator_func = None # type: str
self.is_command_reply = False # type: bool
self.is_generic_cmd_list = None # type: Optional[str]
+ # pylint: disable=invalid-name
+ self.unsafe_dangerous_disable_extra_field_duplicate_checks = None # type: bool
# Command only property
self.cpp_name = None # type: str
@@ -563,6 +569,8 @@ def __init__(self, file_name, line, column):
# Internal property: cpp_namespace from globals section
self.cpp_namespace = None # type: str
+ self.query_shape_component = False # type: bool
+
super(Struct, self).__init__(file_name, line, column)
@@ -877,6 +885,8 @@ def __init__(self, file_name, line, column):
self.cpp_varname = None # type: str
self.default = None # type: Expression
self.version = None # type: str
+ # pylint: disable=C0103
+ self.shouldBeFCVGated = None # type: Expression
super(FeatureFlag, self).__init__(file_name, line, column)
diff --git a/buildscripts/idl/idl_check_compatibility.py b/buildscripts/idl/idl_check_compatibility.py
index 2b40867519f9c..8d21fb3a31dc6 100644
--- a/buildscripts/idl/idl_check_compatibility.py
+++ b/buildscripts/idl/idl_check_compatibility.py
@@ -226,7 +226,6 @@
'aggregate-param-needsMerge',
'aggregate-param-fromMongos',
# Bulk fixes for fields that are strictly internal all along and should thus be marked unstable.
- 'aggregate-param-$_generateV2ResumeTokens',
'endSessions-param-txnNumber',
'endSessions-param-txnUUID',
'findAndModify-param-stmtId',
@@ -320,7 +319,6 @@
'update-param-isTimeseriesNamespace',
'delete-param-isTimeseriesNamespace',
'findAndModify-param-stmtId',
- 'aggregate-param-$_generateV2ResumeTokens',
'hello-param-loadBalanced',
'hello-reply-serviceId',
'hello-reply-isImplicitDefaultMajorityWC',
diff --git a/buildscripts/idl/tests/compatibility_test_fail/new_generic_argument/generic_argument.idl b/buildscripts/idl/tests/compatibility_test_fail/new_generic_argument/generic_argument.idl
index 157ba6cb002cf..2fcf36675157d 100644
--- a/buildscripts/idl/tests/compatibility_test_fail/new_generic_argument/generic_argument.idl
+++ b/buildscripts/idl/tests/compatibility_test_fail/new_generic_argument/generic_argument.idl
@@ -52,4 +52,4 @@ generic_reply_field_lists:
description: "IDL checker provides no guarantees about unstable generic reply fields"
fields:
unstableGenericReplyField:
- forward_from_shards: false
\ No newline at end of file
+ forward_from_shards: false
diff --git a/buildscripts/idl/tests/compatibility_test_fail/old_generic_argument/generic_argument.idl b/buildscripts/idl/tests/compatibility_test_fail/old_generic_argument/generic_argument.idl
index 8220162ccfadc..4f21a7423c646 100644
--- a/buildscripts/idl/tests/compatibility_test_fail/old_generic_argument/generic_argument.idl
+++ b/buildscripts/idl/tests/compatibility_test_fail/old_generic_argument/generic_argument.idl
@@ -56,4 +56,4 @@ generic_reply_field_lists:
description: "IDL checker provides no guarantees about unstable generic reply fields"
fields:
unstableGenericReplyField:
- forward_from_shards: false
\ No newline at end of file
+ forward_from_shards: false
diff --git a/buildscripts/idl/tests/compatibility_test_pass/new_generic_argument/generic_argument.idl b/buildscripts/idl/tests/compatibility_test_pass/new_generic_argument/generic_argument.idl
index 8aceb00800434..150257b05c917 100644
--- a/buildscripts/idl/tests/compatibility_test_pass/new_generic_argument/generic_argument.idl
+++ b/buildscripts/idl/tests/compatibility_test_pass/new_generic_argument/generic_argument.idl
@@ -58,4 +58,4 @@ generic_reply_field_lists:
Removing an unstable reply field should still pass"
fields:
unstableGenericReplyField:
- forward_from_shards: false
\ No newline at end of file
+ forward_from_shards: false
diff --git a/buildscripts/idl/tests/compatibility_test_pass/old_generic_argument/generic_argument.idl b/buildscripts/idl/tests/compatibility_test_pass/old_generic_argument/generic_argument.idl
index 6f4b37df1685e..bd3d5a670316a 100644
--- a/buildscripts/idl/tests/compatibility_test_pass/old_generic_argument/generic_argument.idl
+++ b/buildscripts/idl/tests/compatibility_test_pass/old_generic_argument/generic_argument.idl
@@ -59,4 +59,4 @@ generic_reply_field_lists:
unstableGenericReplyField:
forward_from_shards: false
removedUnstableGenericReplyField:
- forward_from_shards: false
\ No newline at end of file
+ forward_from_shards: false
diff --git a/buildscripts/idl/tests/test_binder.py b/buildscripts/idl/tests/test_binder.py
index 9de1957515925..e52c922795013 100644
--- a/buildscripts/idl/tests/test_binder.py
+++ b/buildscripts/idl/tests/test_binder.py
@@ -131,13 +131,29 @@ def test_global_positive(self):
spec = self.assert_bind(
textwrap.dedent("""
global:
- cpp_namespace: 'something'
+ cpp_namespace: 'mongo'
cpp_includes:
- 'bar'
- 'foo'"""))
- self.assertEqual(spec.globals.cpp_namespace, "something")
+ self.assertEqual(spec.globals.cpp_namespace, "mongo")
self.assertListEqual(spec.globals.cpp_includes, ['bar', 'foo'])
+ spec = self.assert_bind(
+ textwrap.dedent("""
+ global:
+ cpp_namespace: 'mongo::nested'
+ """))
+ self.assertEqual(spec.globals.cpp_namespace, "mongo::nested")
+
+ def test_global_negatives(self):
+ # type: () -> None
+ """Postive global tests."""
+ self.assert_bind_fail(
+ textwrap.dedent("""
+ global:
+ cpp_namespace: 'something'
+ """), idl.errors.ERROR_ID_BAD_CPP_NAMESPACE)
+
def test_type_positive(self):
# type: () -> None
"""Positive type tests."""
@@ -1587,6 +1603,18 @@ def test_enum_positive(self):
v3: 2
"""))
+ # Test int - non continuous
+ self.assert_bind(
+ textwrap.dedent("""
+ enums:
+ foo:
+ description: foo
+ type: int
+ values:
+ v1: 0
+ v3: 2
+ """))
+
# Test string
self.assert_bind(
textwrap.dedent("""
@@ -1615,18 +1643,6 @@ def test_enum_negative(self):
v1: 0
"""), idl.errors.ERROR_ID_ENUM_BAD_TYPE)
- # Test int - non continuous
- self.assert_bind_fail(
- textwrap.dedent("""
- enums:
- foo:
- description: foo
- type: int
- values:
- v1: 0
- v3: 2
- """), idl.errors.ERROR_ID_ENUM_NON_CONTINUOUS_RANGE)
-
# Test int - dups
self.assert_bind_fail(
textwrap.dedent("""
@@ -2335,7 +2351,7 @@ def test_feature_flag(self):
# type: () -> None
"""Test feature flag checks around version."""
- # feature flag can default to false without a version
+ # feature flag can default to false without a version (shouldBeFCVGated can be true or false)
self.assert_bind(
textwrap.dedent("""
feature_flags:
@@ -2343,9 +2359,20 @@ def test_feature_flag(self):
description: "Make toast"
cpp_varname: gToaster
default: false
+ shouldBeFCVGated: false
"""))
- # feature flag can default to true with a version
+ self.assert_bind(
+ textwrap.dedent("""
+ feature_flags:
+ featureFlagToaster:
+ description: "Make toast"
+ cpp_varname: gToaster
+ default: false
+ shouldBeFCVGated: true
+ """))
+
+ # if shouldBeFCVGated: true, feature flag can default to true with a version
self.assert_bind(
textwrap.dedent("""
feature_flags:
@@ -2354,9 +2381,21 @@ def test_feature_flag(self):
cpp_varname: gToaster
default: true
version: 123
+ shouldBeFCVGated: true
+ """))
+
+ # if shouldBeFCVGated: false, we do not need a version
+ self.assert_bind(
+ textwrap.dedent("""
+ feature_flags:
+ featureFlagToaster:
+ description: "Make toast"
+ cpp_varname: gToaster
+ default: true
+ shouldBeFCVGated: false
"""))
- # true is only allowed with a version
+ # if shouldBeFCVGated: true and default: true, a version is required
self.assert_bind_fail(
textwrap.dedent("""
feature_flags:
@@ -2364,9 +2403,22 @@ def test_feature_flag(self):
description: "Make toast"
cpp_varname: gToaster
default: true
+ shouldBeFCVGated: true
"""), idl.errors.ERROR_ID_FEATURE_FLAG_DEFAULT_TRUE_MISSING_VERSION)
- # false is not allowed with a version
+ # false is not allowed with a version and shouldBeFCVGated: true
+ self.assert_bind_fail(
+ textwrap.dedent("""
+ feature_flags:
+ featureFlagToaster:
+ description: "Make toast"
+ cpp_varname: gToaster
+ default: false
+ version: 123
+ shouldBeFCVGated: true
+ """), idl.errors.ERROR_ID_FEATURE_FLAG_DEFAULT_FALSE_HAS_VERSION)
+
+ # false is not allowed with a version and shouldBeFCVGated: false
self.assert_bind_fail(
textwrap.dedent("""
feature_flags:
@@ -2375,8 +2427,21 @@ def test_feature_flag(self):
cpp_varname: gToaster
default: false
version: 123
+ shouldBeFCVGated: false
"""), idl.errors.ERROR_ID_FEATURE_FLAG_DEFAULT_FALSE_HAS_VERSION)
+ # if shouldBeFCVGated is false, a version is not allowed
+ self.assert_bind_fail(
+ textwrap.dedent("""
+ feature_flags:
+ featureFlagToaster:
+ description: "Make toast"
+ cpp_varname: gToaster
+ default: true
+ version: 123
+ shouldBeFCVGated: false
+ """), idl.errors.ERROR_ID_FEATURE_FLAG_SHOULD_BE_FCV_GATED_FALSE_HAS_VERSION)
+
def test_access_check(self):
# type: () -> None
"""Test access check."""
@@ -2659,6 +2724,234 @@ def test_access_check_negative(self):
reply_type: reply
"""), idl.errors.ERROR_ID_MISSING_ACCESS_CHECK)
+ def test_query_shape_component_validation(self):
+ self.assert_bind(self.common_types + textwrap.dedent("""
+ structs:
+ struct1:
+ query_shape_component: true
+ strict: true
+ description: ""
+ fields:
+ field1:
+ query_shape: literal
+ type: string
+ field2:
+ type: bool
+ query_shape: parameter
+ """))
+
+ self.assert_bind_fail(
+ self.common_types + textwrap.dedent("""
+ structs:
+ struct1:
+ query_shape_component: true
+ strict: true
+ description: ""
+ fields:
+ field1:
+ type: string
+ field2:
+ type: bool
+ query_shape: parameter
+ """), idl.errors.ERROR_ID_FIELD_MUST_DECLARE_SHAPE_LITERAL)
+
+ self.assert_bind_fail(
+ self.common_types + textwrap.dedent("""
+ structs:
+ struct1:
+ strict: true
+ description: ""
+ fields:
+ field1:
+ type: string
+ field2:
+ type: bool
+ query_shape: parameter
+ """), idl.errors.ERROR_ID_CANNOT_DECLARE_SHAPE_LITERAL)
+
+ # Validating query_shape_anonymize relies on std::string
+ basic_types = textwrap.dedent("""
+ types:
+ string:
+ bson_serialization_type: string
+ description: "A BSON UTF-8 string"
+ cpp_type: "std::string"
+ deserializer: "mongo::BSONElement::str"
+ bool:
+ bson_serialization_type: bool
+ description: "A BSON bool"
+ cpp_type: "bool"
+ deserializer: "mongo::BSONElement::boolean"
+ serialization_context:
+ bson_serialization_type: any
+ description: foo
+ cpp_type: foo
+ internal_only: true
+ """)
+ self.assert_bind(basic_types + textwrap.dedent("""
+ structs:
+ struct1:
+ query_shape_component: true
+ strict: true
+ description: ""
+ fields:
+ field1:
+ query_shape: anonymize
+ type: string
+ field2:
+ query_shape: parameter
+ type: bool
+ """))
+
+ self.assert_bind(basic_types + textwrap.dedent("""
+ structs:
+ struct1:
+ query_shape_component: true
+ strict: true
+ description: ""
+ fields:
+ field1:
+ query_shape: anonymize
+ type: array
+ field2:
+ query_shape: parameter
+ type: bool
+ """))
+
+ self.assert_bind_fail(
+ basic_types + textwrap.dedent("""
+ structs:
+ struct1:
+ strict: true
+ description: ""
+ fields:
+ field1:
+ query_shape: blah
+ type: string
+ """), idl.errors.ERROR_ID_QUERY_SHAPE_INVALID_VALUE)
+
+ self.assert_bind_fail(
+ basic_types + textwrap.dedent("""
+ structs:
+ struct1:
+ query_shape_component: true
+ strict: true
+ description: ""
+ fields:
+ field1:
+ query_shape: anonymize
+ type: bool
+ field2:
+ query_shape: parameter
+ type: bool
+ """), idl.errors.ERROR_ID_INVALID_TYPE_FOR_SHAPIFY)
+
+ self.assert_bind_fail(
+ basic_types + textwrap.dedent("""
+ structs:
+ struct1:
+ query_shape_component: true
+ strict: true
+ description: ""
+ fields:
+ field1:
+ query_shape: anonymize
+ type: array
+ field2:
+ query_shape: parameter
+ type: bool
+ """), idl.errors.ERROR_ID_INVALID_TYPE_FOR_SHAPIFY)
+
+ self.assert_bind_fail(
+ basic_types + textwrap.dedent("""
+ structs:
+ StructZero:
+ strict: true
+ description: ""
+ fields:
+ field1:
+ query_shape: literal
+ type: string
+ """), idl.errors.ERROR_ID_CANNOT_DECLARE_SHAPE_LITERAL)
+
+ self.assert_bind_fail(
+ basic_types + textwrap.dedent("""
+ structs:
+ StructZero:
+ strict: true
+ description: ""
+ fields:
+ field1:
+ type: string
+ struct1:
+ query_shape_component: true
+ strict: true
+ description: ""
+ fields:
+ field2:
+ type: StructZero
+ description: ""
+ query_shape: literal
+ """), idl.errors.ERROR_ID_CANNOT_DECLARE_SHAPE_LITERAL)
+
+ # pylint: disable=invalid-name
+ def test_struct_unsafe_dangerous_disable_extra_field_duplicate_checks_negative(self):
+ # type: () -> None
+ """Negative struct tests for unsafe_dangerous_disable_extra_field_duplicate_checks."""
+
+ # Setup some common types
+ test_preamble = self.common_types + \
+ textwrap.dedent("""
+ structs:
+ danger:
+ description: foo
+ strict: false
+ unsafe_dangerous_disable_extra_field_duplicate_checks: true
+ fields:
+ foo: string
+ """)
+
+ # Test strict and unsafe_dangerous_disable_extra_field_duplicate_checks are not allowed
+ self.assert_bind_fail(
+ test_preamble + indent_text(
+ 1,
+ textwrap.dedent("""
+ danger1:
+ description: foo
+ strict: true
+ unsafe_dangerous_disable_extra_field_duplicate_checks: true
+ fields:
+ foo: string
+ """)), idl.errors.ERROR_ID_STRICT_AND_DISABLE_CHECK_NOT_ALLOWED)
+
+ # Test inheritance is prohibited through structs
+ self.assert_bind_fail(
+ test_preamble + indent_text(
+ 1,
+ textwrap.dedent("""
+ danger2:
+ description: foo
+ strict: true
+ fields:
+ foo: string
+ d1: danger
+ """)), idl.errors.ERROR_ID_INHERITANCE_AND_DISABLE_CHECK_NOT_ALLOWED)
+
+ # Test inheritance is prohibited through commands
+ self.assert_bind_fail(
+ test_preamble + textwrap.dedent("""
+ commands:
+ dangerc:
+ description: foo
+ namespace: ignored
+ command_name: dangerc
+ strict: false
+ api_version: ""
+ fields:
+ foo: string
+ d1: danger
+ """), idl.errors.ERROR_ID_INHERITANCE_AND_DISABLE_CHECK_NOT_ALLOWED)
+
if __name__ == '__main__':
diff --git a/buildscripts/idl/tests/test_compatibility.py b/buildscripts/idl/tests/test_compatibility.py
index 97f08f5075df3..1805355a76e37 100644
--- a/buildscripts/idl/tests/test_compatibility.py
+++ b/buildscripts/idl/tests/test_compatibility.py
@@ -32,6 +32,7 @@
import unittest
import sys
from os import path
+
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import idl_check_compatibility # noqa: E402 pylint: disable=wrong-import-position
diff --git a/buildscripts/idl/tests/test_import.py b/buildscripts/idl/tests/test_import.py
index 46f7df2eb28ae..0bd3e97007cd5 100644
--- a/buildscripts/idl/tests/test_import.py
+++ b/buildscripts/idl/tests/test_import.py
@@ -110,7 +110,7 @@ def test_import_positive(self):
"basetypes.idl":
textwrap.dedent("""
global:
- cpp_namespace: 'something'
+ cpp_namespace: 'mongo'
types:
string:
@@ -174,7 +174,7 @@ def test_import_positive(self):
"cycle1a.idl":
textwrap.dedent("""
global:
- cpp_namespace: 'something'
+ cpp_namespace: 'mongo'
imports:
- "cycle1b.idl"
@@ -204,7 +204,7 @@ def test_import_positive(self):
"cycle1b.idl":
textwrap.dedent("""
global:
- cpp_namespace: 'something'
+ cpp_namespace: 'mongo'
imports:
- "cycle1a.idl"
@@ -227,7 +227,7 @@ def test_import_positive(self):
"cycle2.idl":
textwrap.dedent("""
global:
- cpp_namespace: 'something'
+ cpp_namespace: 'mongo'
imports:
- "cycle2.idl"
@@ -254,7 +254,7 @@ def test_import_positive(self):
self.assert_bind(
textwrap.dedent("""
global:
- cpp_namespace: 'something'
+ cpp_namespace: 'mongo'
imports:
- "basetypes.idl"
@@ -271,7 +271,7 @@ def test_import_positive(self):
self.assert_bind(
textwrap.dedent("""
global:
- cpp_namespace: 'something'
+ cpp_namespace: 'mongo'
imports:
- "recurse2.idl"
@@ -290,7 +290,7 @@ def test_import_positive(self):
self.assert_bind(
textwrap.dedent("""
global:
- cpp_namespace: 'something'
+ cpp_namespace: 'mongo'
imports:
- "recurse2.idl"
@@ -311,7 +311,7 @@ def test_import_positive(self):
self.assert_bind(
textwrap.dedent("""
global:
- cpp_namespace: 'something'
+ cpp_namespace: 'mongo'
imports:
- "cycle1a.idl"
@@ -329,7 +329,7 @@ def test_import_positive(self):
self.assert_bind(
textwrap.dedent("""
global:
- cpp_namespace: 'something'
+ cpp_namespace: 'mongo'
imports:
- "cycle2.idl"
@@ -350,7 +350,7 @@ def test_import_negative(self):
"basetypes.idl":
textwrap.dedent("""
global:
- cpp_namespace: 'something'
+ cpp_namespace: 'mongo'
types:
string:
@@ -380,7 +380,7 @@ def test_import_negative(self):
"bug.idl":
textwrap.dedent("""
global:
- cpp_namespace: 'something'
+ cpp_namespace: 'mongo'
types:
bool:
diff --git a/buildscripts/idl/tests/test_parser.py b/buildscripts/idl/tests/test_parser.py
index 7caca54b07f56..91aa7d4ce2a81 100644
--- a/buildscripts/idl/tests/test_parser.py
+++ b/buildscripts/idl/tests/test_parser.py
@@ -1591,6 +1591,17 @@ def test_feature_flag(self):
featureFlagToaster:
description: "Make toast"
cpp_varname: gToaster
+ shouldBeFCVGated: true
+ """), idl.errors.ERROR_ID_MISSING_REQUIRED_FIELD)
+
+ # Missing shouldBeFCVGated
+ self.assert_parse_fail(
+ textwrap.dedent("""
+ feature_flags:
+ featureFlagToaster:
+ description: "Make toast"
+ cpp_varname: gToaster
+ default: false
"""), idl.errors.ERROR_ID_MISSING_REQUIRED_FIELD)
def _test_field_list(self, field_list_name, should_forward_name):
@@ -1982,6 +1993,24 @@ def test_access_checks_negative(self):
reply_type: foo_reply_struct
"""), idl.errors.ERROR_ID_EMPTY_ACCESS_CHECK)
+ # pylint: disable=invalid-name
+ def test_struct_unsafe_dangerous_disable_extra_field_duplicate_checks_negative(self):
+
+ # Test commands and unsafe_dangerous_disable_extra_field_duplicate_checks are disallowed
+ self.assert_parse_fail(
+ textwrap.dedent("""
+ commands:
+ dangerc:
+ description: foo
+ namespace: ignored
+ command_name: dangerc
+ api_version: ""
+ strict: false
+ unsafe_dangerous_disable_extra_field_duplicate_checks: true
+ fields:
+ foo: string
+ """), idl.errors.ERROR_ID_UNKNOWN_NODE)
+
if __name__ == '__main__':
diff --git a/buildscripts/iwyu/README.md b/buildscripts/iwyu/README.md
new file mode 100644
index 0000000000000..2e925d7500a27
--- /dev/null
+++ b/buildscripts/iwyu/README.md
@@ -0,0 +1,64 @@
+# IWYU Analysis tool
+
+This tool will run
+[include-what-you-use](https://github.com/include-what-you-use/include-what-you-use)
+(IWYU) analysis across the codebase via `compile_commands.json`.
+
+The `iwyu_config.yml` file consists of the current options and automatic
+pragma marking. You can exclude files from the analysis here.
+
+The tool has two main modes of operation, `fix` and `check` modes. `fix`
+mode will attempt to make changes to the source files based off IWYU's
+suggestions. The check mode will simply check if there are any suggestion
+at all.
+
+`fix` mode will take a long time to run, as the tool needs to rerun any
+source in which a underlying header was changed to ensure things are not
+broken, and so therefore ends up recompile the codebase several times over.
+
+For more information please refer the the script `--help` option.
+
+# Example usage:
+
+First you must generate the `compile_commands.json` file via this command:
+
+```
+python3 buildscripts/scons.py --build-profile=compiledb compiledb
+```
+
+Next you can run the analysis:
+
+```
+python3 buildscripts/iwyu/run_iwyu_analysis.py
+```
+The default mode is fix mode, and it will start making changes to the code
+if any changes are found.
+
+# Debugging failures
+
+Occasionally IWYU tool will run into problems where it is unable to suggest
+valid changes and the changes will cause things to break (not compile). When
+it his a failure it will copy the source and all the header's that were used
+at the time of the compilation into a directory where the same command can be
+run to reproduce the error.
+
+You can examine the suggested changes in the source and headers and compare
+them to the working source tree. Then you can make corrective changes to allow
+ IWYU to get past the failure.
+
+IWYU is not perfect and it make several mistakes that a human can understand
+and fix appropriately.
+
+# Running the tests
+
+This tool includes its own end to end testing. The test directory includes
+sub directories which contain source and iwyu configs to run the tool against.
+The tests will then compare the results to built in expected results and fail
+if the the tests are not producing the expected results.
+
+To run the tests use the command:
+
+```
+cd buildscripts/iwyu/test
+python3 run_tests.py
+```
diff --git a/buildscripts/iwyu/iwyu_config.yml b/buildscripts/iwyu/iwyu_config.yml
new file mode 100644
index 0000000000000..56a997d626fa5
--- /dev/null
+++ b/buildscripts/iwyu/iwyu_config.yml
@@ -0,0 +1,83 @@
+# options passed to IWYU
+iwyu_options:
+ - '--mapping_file=etc/iwyu_mapping.imp'
+ - '--no_fwd_decls'
+ - '--prefix_header_includes=add'
+ - '--transitive_includes_only'
+
+# options passed to the fix script
+fix_options:
+ - '--blank_lines'
+ - '--nocomments'
+ - '--noreorder'
+ - '--separate_project_includes=mongo'
+ - '--safe_headers'
+ - '--only_re=^src/mongo\/.*'
+ # TODO SERVER-77051 we will eventually turn this on when our codebase is cleaned up with out.
+ # - '--nosafe_headers'
+
+# filename regex to swap no_include in place
+# quotes and brackets not included in this config
+# since this is targeting IWYU added headers
+no_includes:
+ # avoid boost crazyness
+ - 'boost/.+/detail/.+'
+ - 'asio/impl/.+'
+ - 'boost/.+\.ipp'
+ # avoid stdlib detail headers
+ - 'ext/alloc_traits\.h'
+ - 'ext/type_traits\.h'
+ - 'cxxabi\.h' # https://github.com/include-what-you-use/include-what-you-use/issues/909
+ - 'bits/.+'
+ - 'syscall\.h'
+ # arch specific
+ - 'boost/predef/hardware/simd/x86.+'
+ - 'emmintrin\.h'
+ # we use a third party format which confuses IWYU
+ - 'format\.h'
+ # this is a link time symbol overloading thing not meant to be included
+ - 'libunwind-x86_64\.h'
+ # abuse of preprocessor
+ - 'mongo/db/namespace_string_reserved\.def\.h'
+
+# path prefixes (non regex) to skip
+skip_files:
+ - 'src/third_party'
+ - 'build/'
+ - 'src/mongo/tools/mongo_tidy_checks'
+ - 'src/mongo/util/net' # causes linkage issues
+ - 'src/mongo/util/text.cpp'
+ # IWYU confused on forward declares
+ - 'src/mongo/db/exec/near.cpp'
+ - 'src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp'
+ # Asio is going to need some special treatment, the headers are very finicky
+ - 'src/mongo/transport/asio'
+ # causes IWYU to crash:
+ - 'src/mongo/db/update/update_internal_node.cpp'
+ - 'src/mongo/db/update/update_array_node.cpp'
+ - 'src/mongo/db/update/update_object_node.cpp'
+ - 'src/mongo/db/update/update_array_node_test.cpp'
+ - 'src/mongo/db/update/update_object_node_test.cpp'
+ - 'src/mongo/util/options_parser/environment.cpp'
+ - 'src/mongo/util/options_parser/option_section.cpp'
+
+# regex file paths to add keep pragma
+# include quotes are angle brackets
+keep_includes:
+ - '".*\.cstruct"' # these are not true includes, but used for very large initializers
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
+ - '' # IWYU messes up template instantiation
+ - '"mongo/rpc/object_check\.h"'
+ - '"mongo/base/init\.h"'
+ - '"mongo/scripting/mozjs/wrapconstrainedmethod\.h"'
+ - '"mongo/dbtests/dbtests\.h"' # this is due to using statements in the header
+ - '"mongo/config\.h"'
+ - '"mongo/util/overloaded_visitor\.h"'
+ - '"mongo/db/query/optimizer/node\.h"'
+ - '"mongo/util/text\.h"' # includes platform specific functions
diff --git a/buildscripts/iwyu/run_iwyu_analysis.py b/buildscripts/iwyu/run_iwyu_analysis.py
new file mode 100644
index 0000000000000..7d83144e33124
--- /dev/null
+++ b/buildscripts/iwyu/run_iwyu_analysis.py
@@ -0,0 +1,1004 @@
+#!/usr/bin/env python3
+"""
+TOOL FUNCTIONAL DESCRIPTION.
+
+Currently the tool works by running IWYU on a subset of compile_commands.json
+(the ones we care about like checked in mongo source) and testing each change
+in a copy of the original source/header tree so that other compiles are not
+affected until it passes a normal compile itself. Due to header dependencies
+we must recompile the source files to catch issue IWYU may have introduced
+with some dependent header change. Header dependencies do not form a DAG so
+we can not process sources in a deterministic fashion. The tool will loop
+through all the compilations until all dependents in a compilation are
+determined unchanged from the last time the compilation was performed.
+
+The general workflow used here is to run the tool till there no changes
+(several hours on rhel-xxlarge) and fix the errors either in the tool config
+or as a manual human change in the code.
+
+TOOL TECHNICAL DESCRIPTION:
+
+Regarding the code layout, the main function setups a thread pool executor
+and processes each source from the compile_commands. From there it runs a
+thread function and within that 5 parts (each there own function) for
+each source file:
+
+1. Skip if deps are unchanged
+2. Get the headers deps via -MMD
+3. Run IWYU
+4. Apply Fixes
+5. test compile, record new header deps if passed
+
+The tool uses mtime and MD5 hashing to know if any header dep has changed.
+
+"""
+
+import argparse
+import json
+import subprocess
+import tempfile
+import shlex
+import os
+import re
+import concurrent.futures
+import hashlib
+import atexit
+import traceback
+import threading
+import shutil
+import signal
+import sys
+import yaml
+import enum
+from dataclasses import dataclass, asdict
+from typing import Dict, List, Any, Optional, Callable, Union, Tuple
+
+from tqdm import tqdm
+from colorama import init as colorama_init
+from colorama import Fore
+
+colorama_init()
+
+parser = argparse.ArgumentParser(description='Run include what you use and test output')
+
+parser.add_argument('--compile-commands', metavar='FILE', type=str, default='compile_commands.json',
+ help='Path to the compile commands file to use.')
+parser.add_argument(
+ '--check', action='store_true', help=
+ 'Enables check mode, which does not apply fixes and only runs to see if any files produce IWYU changes. Exit 0 if no new changes detected.'
+)
+parser.add_argument(
+ '--config-file', metavar='FILE', type=str, default="", help=
+ 'Enables check mode, which does not apply fixes and only runs to see if any files produce IWYU changes. Exit 0 if no new changes detected.'
+)
+parser.add_argument(
+ '--iwyu-data', metavar='FILE', type=str, default='iwyu.dat',
+ help='Location of data used by IWYU, contains hash and status info about all files.')
+parser.add_argument(
+ '--keep-going', action='store_true', help=
+ 'Do not stop on errors, instead resubmit the job to try again later (after things may have been fixed elsewhere)'
+)
+parser.add_argument(
+ '--cycle-debugging', action='store_true', help=
+ 'Once a cycle has been detected, each directory tree for each step in the cycle will be saved to a .cycle directory.'
+)
+parser.add_argument('--verbose', action='store_true',
+ help='Prints more info about what is taking place.')
+parser.add_argument('--mongo-toolchain-bin-dir', type=str,
+ help='Which toolchain bin directory to use for this analysis.',
+ default='/opt/mongodbtoolchain/v4/bin')
+parser.add_argument(
+ '--start-ratio', type=float, help=
+ 'decimal value between 0 and 1 which indicates what starting ratio index of the total compile commands to run over, can not be greater than the --end-ratio.',
+ default=0.0)
+parser.add_argument(
+ '--end-ratio', type=float, help=
+ 'decimal value between 0 and 1 which indicates what ending ratio index of the total compile commands to run over, can not be less than the --start-ratio.',
+ default=1.0)
+command_line_args = parser.parse_args()
+
+# the current state of all files, contain the cmd_entry, hashes, successes
+IWYU_ANALYSIS_STATE: Dict[str, Any] = {}
+
+# the current state cycles being tracked
+IWYU_CYCLE_STATE: Dict[str, Any] = {}
+
+hash_lookup_locks: Dict[str, threading.Lock] = {}
+mtime_hash_lookup: Dict[str, Dict[str, Any]] = {}
+
+if command_line_args.config_file:
+ config_file = command_line_args.config_file
+else:
+ config_file = os.path.join(os.path.dirname(__file__), "iwyu_config.yml")
+
+with open(config_file, "r") as stream:
+ config = yaml.safe_load(stream)
+ for key, value in config.items():
+ if value is None:
+ config[key] = []
+
+IWYU_OPTIONS = config.get('iwyu_options', [])
+IWYU_FIX_OPTIONS = config.get('fix_options', [])
+NO_INCLUDES = config.get('no_includes', [])
+KEEP_INCLUDES = config.get('keep_includes', [])
+SKIP_FILES = tuple(config.get('skip_files', []))
+CYCLE_FILES: List[str] = []
+
+
+@dataclass
+class CompileCommand:
+ """An entry from compile_commands.json."""
+
+ file: str
+ command: str
+ directory: str
+ output: str
+
+
+class ResultType(enum.Enum):
+ """
+ Descriptions of enums.
+
+ ERROR: unexpected or unrecognized error cases
+ FAILED: the IWYU task for a given compile command entry failed
+ NO_CHANGE: the input header tree and source file have not changed since last time
+ NOT_RUNNING: sources which we intentionally skip running IWYU all together
+ RESUBMIT: the IWYU task failed, but it may work later after other header changes
+ SUCCESS: the IWYU task for a source file has succeeded
+ """
+
+ ERROR = enum.auto()
+ FAILED = enum.auto()
+ NO_CHANGE = enum.auto()
+ NOT_RUNNING = enum.auto()
+ RESUBMIT = enum.auto()
+ SUCCESS = enum.auto()
+
+
+TOOLCHAIN_DIR = command_line_args.mongo_toolchain_bin_dir
+SHUTDOWN_FLAG = False
+CLANG_INCLUDES = None
+IWYU_OPTIONS = [val for pair in zip(['-Xiwyu'] * len(IWYU_OPTIONS), IWYU_OPTIONS) for val in pair]
+if NO_INCLUDES:
+ NO_INCLUDE_REGEX = re.compile(r'^\s*#include\s+[\",<](' + '|'.join(NO_INCLUDES) + ')[\",>]')
+if KEEP_INCLUDES:
+ KEEP_INCLUDE_REGEX = re.compile(r'^\s*#include\s+(' + '|'.join(KEEP_INCLUDES) + ')')
+CHANGED_FILES_REGEX = re.compile(r"^The\sfull\sinclude-list\sfor\s(.+):$", re.MULTILINE)
+
+
+def printer(message: str) -> None:
+ """
+ Prints output as appropriate.
+
+ We don't print output if we are shutting down because the logs will
+ explode and original error will be hard to locate.
+ """
+
+ if not SHUTDOWN_FLAG or command_line_args.verbose:
+ tqdm.write(str(message))
+
+
+def debug_printer(message: str) -> None:
+ """Print each step in the processing of IWYU."""
+
+ if command_line_args.verbose:
+ tqdm.write(str(message))
+
+
+def failed_return() -> ResultType:
+ """A common method to allow the processing to continue even after some file fails."""
+
+ if command_line_args.keep_going:
+ return ResultType.RESUBMIT
+ else:
+ return ResultType.FAILED
+
+
+def in_project_root(file: str) -> bool:
+ """
+ Return true if the file is in the project root.
+
+ This is assuming the project root is the same location
+ as the compile_commands.json file (the format of compile_commands.json
+ expects this as well).
+ """
+
+ return os.path.abspath(file).startswith(
+ os.path.abspath(os.path.dirname(command_line_args.compile_commands)))
+
+
+def copy_error_state(cmd_entry: CompileCommand, test_dir: str,
+ dir_ext: str = '.iwyu_test_dir') -> Optional[str]:
+ """
+ When we fail, we want to copy the current state of the temp dir.
+
+ This is so that the command that was used can be replicated and rerun,
+ primarily for debugging purposes.
+ """
+
+ # we never use a test_dir in check mode, since no files are copied in that mode.
+ if command_line_args.check:
+ return None
+
+ # make a directory in the output location that we can store the state of the the
+ # header dep and source file the compile command was run with, delete old results
+ base, _ = os.path.splitext(cmd_entry.output)
+ if os.path.exists(base + dir_ext):
+ shutil.rmtree(base + dir_ext)
+ os.makedirs(base + dir_ext, exist_ok=True)
+ basedir = os.path.basename(test_dir)
+ error_state_dir = os.path.join(base + dir_ext, basedir)
+ shutil.copytree(test_dir, error_state_dir)
+ return error_state_dir
+
+
+def calc_hash_of_file(file: str) -> Optional[str]:
+ """
+ Calculate the hash of a file. Use mtime as well.
+
+ If the mtime is unchanged, don't do IO, just look up the last hash.
+ """
+
+ # we need to lock on specific file io because GIL does not cover system io, so two threads
+ # could be doing io on the same file at the same time.
+ if file not in hash_lookup_locks:
+ hash_lookup_locks[file] = threading.Lock()
+ with hash_lookup_locks[file]:
+ if file in mtime_hash_lookup and os.path.getmtime(file) == mtime_hash_lookup[file]['mtime']:
+ return mtime_hash_lookup[file]['hash']
+ else:
+ try:
+ hash_val = hashlib.md5(open(file, 'rb').read()).hexdigest()
+ except FileNotFoundError:
+ return None
+
+ mtime_hash_lookup[file] = {'mtime': os.path.getmtime(file), 'hash': hash_val}
+ return hash_val
+
+
+def find_no_include(line: str, lines: List[str], output_lines: List[str]) -> bool:
+ """
+ We need to regex the line to see if it includes an include that matches our NO_INCLUDE_REGEX.
+
+ If so then we do not include that line
+ when we rewrite the file, and instead we add a IWYU no_include pragma inplace
+ """
+
+ no_include_header_found = False
+ if "// IWYU pragma: keep" in line:
+ return no_include_header_found
+ no_include_header = re.findall(NO_INCLUDE_REGEX, line)
+
+ if no_include_header:
+ no_include_header_found = True
+ no_include_line = f'// IWYU pragma: no_include "{no_include_header[0]}"\n'
+ if no_include_line not in lines:
+ output_lines.append(no_include_line)
+ return no_include_header_found
+
+
+def add_pragmas(source_files: List[str]):
+ """
+ We automate some of the pragmas so there is not so much manual work.
+
+ There are general cases for some of the pragmas. In this case we open the target
+ source/header, search via regexes for specific includes we care about, then add
+ the pragma comments as necessary.
+ """
+
+ for source_file in source_files:
+
+ # before we run IWYU, we take a guess at the likely header by swapping .cpp for .h
+ # so it may not be a real header. After IWYU runs we know exactly where to add the pragmas
+ # in case we got it wrong the first time around
+ if not os.path.exists(source_file):
+ continue
+
+ # we load in the file content operate on it, and then write it back out
+ output_lines: List[str] = []
+ with open(source_file, 'r') as fin:
+ file_lines = fin.readlines()
+ for line in file_lines:
+
+ if NO_INCLUDES and find_no_include(line, file_lines, output_lines):
+ continue
+
+ if KEEP_INCLUDES and re.search(KEEP_INCLUDE_REGEX,
+ line) and '// IWYU pragma: keep' not in line:
+
+ output_lines.append(line.strip() + " // IWYU pragma: keep\n")
+ continue
+
+ output_lines.append(line)
+
+ with open(source_file, 'w') as fout:
+ for line in output_lines:
+ fout.write(line)
+
+
+def recalc_hashes(deps: List[str], change_dir: Optional[str] = None) -> Dict[str, Any]:
+ """
+ We calculate the hashes from the header dep list generated by the compiler.
+
+ We also create cumulative hash for convenance.
+
+ Some cases we are operating a test directory, but deps are referenced as if they are
+ in the project root. The change_dir option here allows us to calc the the hashes from
+ the test directory we may be working in, but still record the deps files in a compat
+ fashion with other processes that work out of project root, e.g. testing if there was a
+ change from last time.
+ """
+
+ hashes: Dict[str, Any] = {'deps': {}}
+ full_hash = hashlib.new('md5')
+ for dep in sorted(list(deps)):
+ if not in_project_root(dep):
+ continue
+ if change_dir:
+ orig_dep = dep
+ dep = os.path.join(change_dir, dep)
+ dep_hash = calc_hash_of_file(dep)
+ if dep_hash is None:
+ continue
+ if change_dir:
+ dep = orig_dep
+ full_hash.update(dep_hash.encode('utf-8'))
+ hashes['deps'][dep] = dep_hash
+ hashes['full_hash'] = full_hash.hexdigest()
+ return hashes
+
+
+def setup_test_dir(cmd_entry: CompileCommand, test_dir: str) -> List[str]:
+ """
+ Here we are copying the source and required header tree from the main source tree.
+
+ Returns the associate source and header that were copied into the test dir.
+
+ We want an isolated location to perform analysis and apply changes so everything is not
+ clashing. At this point we don't know for sure what header IWYU is going to associate with the source
+ but for mongo codebase, 99.9% of the time its just swap the .cpp for .h. We need this to apply
+ some pragma to keep IWYU from removing headers it doesn't understand (cross platform or
+ third party like boost or asio). The pragmas are harmless in and of themselves so adding the
+ mistakenly in the 0.1% of the time is negligible.
+ """
+
+ original_sources = [
+ orig_source for orig_source in [cmd_entry.file,
+ os.path.splitext(cmd_entry.file)[0] + '.h']
+ if os.path.exists(orig_source)
+ ]
+ test_source_files = [os.path.join(test_dir, source_file) for source_file in original_sources]
+ dep_headers = [dep for dep in IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps'].keys()]
+
+ # copy each required header from our source tree into our test dir
+ # this does cost some time, but the alternative (everything operating in the real source tree)
+ # was much longer due to constant failures.
+ for source_file in dep_headers + ['etc/iwyu_mapping.imp']:
+ if in_project_root(source_file):
+ os.makedirs(os.path.join(test_dir, os.path.dirname(source_file)), exist_ok=True)
+ shutil.copyfile(source_file, os.path.join(test_dir, source_file))
+
+ # need to create dirs for outputs
+ for output in shlex.split(cmd_entry.output):
+ os.makedirs(os.path.join(test_dir, os.path.dirname(output)), exist_ok=True)
+
+ return test_source_files
+
+
+def get_clang_includes() -> List[str]:
+ """
+ IWYU needs some extra help to know what default includes clang is going to bring in when it normally compiles.
+
+ The query reliably gets the include dirs that would be used in normal compiles. We cache and reuse the result
+ so the subprocess only runs once.
+ """
+ global CLANG_INCLUDES # pylint: disable=global-statement
+ if CLANG_INCLUDES is None:
+ clang_includes = subprocess.getoutput(
+ f"{TOOLCHAIN_DIR}/clang++ -Wp,-v -x c++ - -fsyntax-only < /dev/null 2>&1 | sed -e '/^#include <...>/,/^End of search/{{ //!b }};d'"
+ ).split('\n')
+ clang_includes = ['-I' + include.strip() for include in clang_includes]
+ CLANG_INCLUDES = clang_includes
+ return CLANG_INCLUDES
+
+
+def write_cycle_diff(source_file: str, cycle_dir: str, latest_hashes: Dict[str, Any]) -> None:
+ """
+ Write out the diffs between the last iteration and the latest iteration.
+
+ The file contains the hash for before and after for each file involved in the compilation.
+ """
+
+ with open(os.path.join(cycle_dir, 'hashes_diff.txt'), 'w') as out:
+ dep_list = set(
+ list(IWYU_ANALYSIS_STATE[source_file]['hashes']['deps'].keys()) +
+ list(latest_hashes['deps'].keys()))
+ not_found_str = "not found" + (" " * 23)
+ for dep in sorted(dep_list):
+ out.write(
+ f"Original: {IWYU_ANALYSIS_STATE[source_file]['hashes']['deps'].get(dep, not_found_str)}, Latest: {latest_hashes['deps'].get(dep, not_found_str)} - {dep}\n"
+ )
+
+
+def check_for_cycles(cmd_entry: CompileCommand, latest_hashes: Dict[str, Any],
+ test_dir: str) -> Optional[ResultType]:
+ """
+ IWYU can induce cycles so we should check our previous results to see if a cycle has occurred.
+
+ These cycles can happen if a header change induces some other header change which then inturn induces
+ the original header change. These cycles are generally harmless and are easily broken with a keep
+ pragma but finding what files are induces the cycle is the challenge.
+
+ With cycle debug mode enabled, the entire header tree is saved for each iteration in the cycle so
+ all files can be fully examined.
+ """
+
+ if cmd_entry.file not in IWYU_CYCLE_STATE:
+ IWYU_CYCLE_STATE[cmd_entry.file] = {
+ 'cycles': [],
+ }
+
+ if latest_hashes['full_hash'] in IWYU_CYCLE_STATE[cmd_entry.file]['cycles']:
+ if command_line_args.cycle_debugging:
+ if 'debug_cycles' not in IWYU_CYCLE_STATE[cmd_entry.file]:
+ IWYU_CYCLE_STATE[cmd_entry.file]['debug_cycles'] = {}
+
+ IWYU_CYCLE_STATE[cmd_entry.file]['debug_cycles'][
+ latest_hashes['full_hash']] = latest_hashes
+
+ cycle_dir = copy_error_state(
+ cmd_entry, test_dir, dir_ext=
+ f".{latest_hashes['full_hash']}.cycle{len(IWYU_CYCLE_STATE[cmd_entry.file]['debug_cycles'])}"
+ )
+ write_cycle_diff(cmd_entry.file, cycle_dir, latest_hashes)
+ if latest_hashes['full_hash'] not in IWYU_CYCLE_STATE[cmd_entry.file]['debug_cycles']:
+ printer(f"{Fore.YELLOW}[5] - Cycle Found!: {cmd_entry.file}{Fore.RESET}")
+ else:
+ printer(f"{Fore.RED}[5] - Cycle Done! : {cmd_entry.file}{Fore.RESET}")
+ return failed_return()
+ else:
+ printer(f"{Fore.RED}[5] - Cycle Found!: {cmd_entry.file}{Fore.RESET}")
+ CYCLE_FILES.append(cmd_entry.file)
+ return ResultType.SUCCESS
+ else:
+ IWYU_CYCLE_STATE[cmd_entry.file]['cycles'].append(latest_hashes['full_hash'])
+
+ return None
+
+
+def write_iwyu_data() -> None:
+ """Store the data we have acquired during this run so we can resume at the same spot on subsequent runs."""
+
+ # There might be faster ways to store this like serialization or
+ # what not, but having human readable json is good for debugging.
+ # on a full build this takes around 10 seconds to write out.
+ if IWYU_ANALYSIS_STATE:
+ try:
+ # atomic move operation prevents ctrl+c mashing from
+ # destroying everything, at least we can keep the original
+ # data safe from emotional outbursts.
+ with tempfile.NamedTemporaryFile() as temp:
+ with open(temp.name, 'w') as iwyu_data_file:
+ json.dump(IWYU_ANALYSIS_STATE, iwyu_data_file, sort_keys=True, indent=4)
+ shutil.move(temp.name, command_line_args.iwyu_data)
+ except FileNotFoundError as exc:
+ if temp.name in str(exc):
+ pass
+
+
+def need_to_process(cmd_entry: CompileCommand,
+ custom_printer: Callable[[str], None] = printer) -> Optional[ResultType]:
+ """
+ The first step in the first step for processing a given source file.
+
+ We have a list of skip prefixes, for example build or third_party, but others can be added.
+
+ If it is a file we are not skipping, then we check if we have already done the work by calculating the
+ hashes and seeing if what we recorded last time has changed.
+ """
+
+ if cmd_entry.file.startswith(
+ SKIP_FILES) or cmd_entry.file in CYCLE_FILES or '/conftest_' in cmd_entry.file:
+ custom_printer(f"{Fore.YELLOW}[5] - Not running!: {cmd_entry.file}{Fore.RESET}")
+ return ResultType.NOT_RUNNING
+
+ if IWYU_ANALYSIS_STATE.get(cmd_entry.file):
+ hashes = recalc_hashes(IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps'].keys())
+
+ # we only skip if the matching mode was successful last time, otherwise we assume we need to rerun
+ mode_success = 'CHECK' if command_line_args.check else 'FIX'
+ if command_line_args.verbose:
+ diff_files = list(
+ set(hashes['deps'].keys()).symmetric_difference(
+ set(IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps'].keys())))
+ if diff_files:
+ msg = f"[1] Need to process {cmd_entry.file} because different files:\n"
+ for file in diff_files:
+ msg += f'{file}\n'
+ debug_printer(msg)
+ for file in IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps'].keys():
+ if file in hashes['deps'] and hashes['deps'][file] != IWYU_ANALYSIS_STATE[
+ cmd_entry.file]['hashes']['deps'][file]:
+ debug_printer(
+ f"[1] Need to process {cmd_entry.file} because hash changed:\n{file}: {hashes['deps'][file]}\n{file}: {IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps'][file]}"
+ )
+
+ if hashes['full_hash'] == IWYU_ANALYSIS_STATE[
+ cmd_entry.file]['hashes']['full_hash'] and mode_success in IWYU_ANALYSIS_STATE[
+ cmd_entry.file].get('success', []):
+ custom_printer(f"{Fore.YELLOW}[5] - No Change! : {cmd_entry.file}{Fore.RESET}")
+ return ResultType.NO_CHANGE
+
+ return None
+
+
+def calc_dep_headers(cmd_entry: CompileCommand) -> Optional[ResultType]:
+ """
+ The second step in the IWYU process.
+
+ We need to get a list of headers which are dependencies so we can copy them to an isolated
+ working directory (so parallel IWYU changes don't break us). We will switch on preprocessor
+ for faster generation of the dep file.
+
+ Once we have the deps list, we parse it and calc the hashes of the deps.
+ """
+
+ try:
+ with tempfile.NamedTemporaryFile() as depfile:
+
+ # first time we could be executing a real command so we make sure the dir
+ # so the compiler is not mad
+ outputs = shlex.split(cmd_entry.output)
+ for output in outputs:
+ out_dir = os.path.dirname(output)
+ if out_dir:
+ os.makedirs(out_dir, exist_ok=True)
+
+ # setup up command for fast depfile generation
+ cmd = cmd_entry.command
+ cmd += f' -MD -MF {depfile.name}'
+ cmd = cmd.replace(' -c ', ' -E ')
+ debug_printer(f"[1] - Getting Deps: {cmd_entry.file}")
+
+ try:
+ deps_proc = subprocess.run(cmd, shell=True, capture_output=True, text=True,
+ timeout=300)
+ except subprocess.TimeoutExpired:
+ deps_proc = None
+ pass
+
+ # if successful, record the latest deps with there hashes, otherwise try again later
+ if deps_proc is None or deps_proc.returncode != 0:
+ printer(f"{Fore.RED}[5] - Deps Failed!: {cmd_entry.file}{Fore.RESET}")
+ printer(deps_proc.stderr)
+ return ResultType.RESUBMIT
+ else:
+ with open(depfile.name) as deps:
+ deps_str = deps.read()
+ deps_str = deps_str.replace('\\\n', '').strip()
+
+ hashes = recalc_hashes(shlex.split(deps_str)[1:])
+ if not IWYU_ANALYSIS_STATE.get(cmd_entry.file):
+ IWYU_ANALYSIS_STATE[cmd_entry.file] = asdict(cmd_entry)
+ IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes'] = hashes
+ IWYU_ANALYSIS_STATE[cmd_entry.file]['success'] = []
+
+ # if the dep command failed the context will through an execption, we will ignore just
+ # that case
+ except FileNotFoundError as exc:
+ traceback.print_exc()
+ if depfile.name in str(exc):
+ pass
+
+ return None
+
+
+def execute_iwyu(cmd_entry: CompileCommand, test_dir: str) -> Union[ResultType, bytes]:
+ """
+ The third step of IWYU analysis. Check mode will stop here.
+
+ Here we want to execute IWYU on our source. Note at this point in fix mode
+ we will be working out of an isolated test directory which has the
+ required header tree copied over. Check mode will just pass in the original
+ project root as the test_dir (the real source tree).
+ """
+
+ # assert we are working with a pure clang++ build
+ if not cmd_entry.command.startswith(f'{TOOLCHAIN_DIR}/clang++'):
+ printer("unexpected compiler:")
+ printer(cmd_entry.command)
+ return ResultType.FAILED
+
+ # swap out for our tool and add in extra options for IWYU
+ cmd = f'{TOOLCHAIN_DIR}/include-what-you-use' + cmd_entry.command[len(f'{TOOLCHAIN_DIR}/clang++'
+ ):]
+ cmd += ' ' + ' '.join(get_clang_includes())
+ cmd += ' ' + ' '.join(IWYU_OPTIONS)
+
+ # mimic the PATH we normally use in our build
+ env = os.environ.copy()
+ env['PATH'] += f':{TOOLCHAIN_DIR}'
+
+ debug_printer(f'[2] - Running IWYU: {cmd_entry.file}')
+ proc = subprocess.run(cmd, shell=True, env=env, capture_output=True, cwd=test_dir)
+
+ # IWYU has some bugs about forward declares I am assuming, because in some cases even though
+ # we have passed --no_fwd_decls it still sometimes recommend forward declares and sometimes they
+ # are wrong and cause compilation errors.
+ remove_fwd_declares = []
+ for line in proc.stderr.decode('utf-8').split('\n'):
+ line = line.strip()
+ if not line.endswith(':') and not line.startswith(
+ ('#include ', '-')) and ('class ' in line or 'struct ' in line):
+ continue
+ remove_fwd_declares.append(line)
+ iwyu_output = '\n'.join(remove_fwd_declares)
+
+ # IWYU has weird exit codes, where a >=2 is considered success:
+ # https://github.com/include-what-you-use/include-what-you-use/blob/clang_12/iwyu_globals.h#L27-L34
+ if command_line_args.check and proc.returncode != 2:
+ printer(f"{Fore.RED}[2] - IWYU Failed: {cmd_entry.file}{Fore.RESET}")
+ if proc.returncode < 2:
+ printer(f"exited with error: {proc.returncode}")
+ else:
+ printer(f"changes required: {proc.returncode - 2}")
+ printer(iwyu_output)
+ return failed_return()
+ elif proc.returncode < 2:
+ printer(f'{Fore.RED}[2] - IWYU Failed : {cmd_entry.file}{Fore.RESET}')
+ printer(cmd)
+ printer(str(proc.returncode))
+ printer(proc.stderr.decode('utf-8'))
+ copy_error_state(cmd_entry, test_dir)
+ return failed_return()
+
+ # save the output for debug or inspection later
+ with open(os.path.splitext(cmd_entry.output)[0] + '.iwyu', 'w') as iwyu_out:
+ iwyu_out.write(iwyu_output)
+
+ return iwyu_output.encode('utf-8')
+
+
+def apply_fixes(cmd_entry: CompileCommand, iwyu_output: bytes,
+ test_dir: str) -> Optional[ResultType]:
+ """
+ Step 4 in the IWYU process.
+
+ We need to run the fix_includes script to apply the output from the IWYU binary.
+ """
+ cmd = [f'{sys.executable}', f'{TOOLCHAIN_DIR}/fix_includes.py'] + IWYU_FIX_OPTIONS
+
+ debug_printer(f'[3] - Apply fixes : {cmd_entry.file}')
+ try:
+ subprocess.run(cmd, capture_output=True, input=iwyu_output, timeout=180, cwd=test_dir)
+ except subprocess.TimeoutExpired:
+ printer(f"{Fore.RED}[5] - Apply failed: {cmd_entry.file}{Fore.RESET}")
+ return ResultType.RESUBMIT
+
+ return None
+
+
+def test_compile(cmd_entry: CompileCommand, test_dir: str) -> Optional[ResultType]:
+ """
+ Step 5 in the IWYU analysis and the last step for fix mode.
+
+ We run the normal compile command in a test directory and make sure it is successful before
+ it will be copied back into the real source tree for inclusion into other jobs.
+ """
+
+ try:
+ with tempfile.NamedTemporaryFile() as depfile:
+ debug_printer(f"[4] - Test compile: {cmd_entry.file}")
+
+ # we want to capture the header deps again because IWYU may have changed them
+ cmd = cmd_entry.command
+ cmd += f' -MMD -MF {depfile.name}'
+ try:
+ p3 = subprocess.run(cmd, shell=True, capture_output=True, text=True, timeout=300,
+ cwd=test_dir)
+ except (subprocess.TimeoutExpired, MemoryError):
+ p3 = None
+ pass
+
+ # our test compile has failed so we need to report and setup for debug
+ if p3 is not None and p3.returncode != 0:
+ printer(f"{Fore.RED}[5] - IWYU Failed!: {cmd_entry.file}{Fore.RESET}")
+ printer(f"{cmd}")
+ printer(f"{p3.stderr}")
+ copy_error_state(cmd_entry, test_dir)
+ return failed_return()
+
+ else:
+ with open(depfile.name) as deps:
+ # calculate the hashes of the deps used to create
+ # this successful compile.
+ deps_str = deps.read()
+ deps_str = deps_str.replace('\\\n', '').strip()
+ hashes = recalc_hashes(shlex.split(deps_str)[1:], change_dir=test_dir)
+
+ if result := check_for_cycles(cmd_entry, hashes, test_dir):
+ return result
+
+ IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes'] = hashes
+ if 'FIX' not in IWYU_ANALYSIS_STATE[cmd_entry.file]['success']:
+ IWYU_ANALYSIS_STATE[cmd_entry.file]['success'].append('FIX')
+ printer(f"{Fore.GREEN}[5] - IWYU Success: {cmd_entry.file}{Fore.RESET}")
+ return ResultType.SUCCESS
+
+ # if we failed, the depfile may not have been generated, so check for it
+ # ignore it
+ except FileNotFoundError as exc:
+ if depfile.name in str(exc):
+ pass
+
+ return None
+
+
+def intialize_deps(cmd_entry: CompileCommand) -> Tuple[ResultType, CompileCommand]:
+ """
+ When running in fix mode, we take some time to initialize the header deps.
+
+ This is mainly used to improve the overall time to complete full analysis. We want process
+ the source files in order of files with least dependencies to most dependencies. The rational
+ is that if it has a lot of dependencies we should do last so any changes in those dependencies
+ are automatically accounted for and the change of need to do rework is lessened. Also the
+ progress bar can be more accurate and not count skip files.
+ """
+
+ # step 1
+ if result := need_to_process(cmd_entry, custom_printer=debug_printer):
+ return result, cmd_entry
+
+ # if we have deps from a previous that should be a good enough indicator
+ # of how dependency heavy it is, and its worth just taking that over
+ # needing to invoke the compiler.
+ try:
+ if len(IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps']):
+ return ResultType.SUCCESS, cmd_entry
+
+ except KeyError:
+ pass
+
+ if result := calc_dep_headers(cmd_entry):
+ return result, cmd_entry
+
+ return ResultType.SUCCESS, cmd_entry
+
+
+def check_iwyu(cmd_entry: CompileCommand) -> ResultType:
+ """
+ One of the two thread functions the main thread pool executor will call.
+
+ Here we execute up to step 3 (steps at the top comment) and report success
+ if IWYU reports no required changes.
+ """
+
+ # step 1
+ if result := need_to_process(cmd_entry):
+ return result
+
+ # step 2
+ if result := calc_dep_headers(cmd_entry):
+ return result
+
+ # step 3
+ iwyu_out = execute_iwyu(cmd_entry, '.')
+ if isinstance(iwyu_out, ResultType):
+ return iwyu_out
+
+ # success!
+ printer(f"{Fore.GREEN}[2] - IWYU Success: {cmd_entry.file}{Fore.RESET}")
+ if "CHECK" not in IWYU_ANALYSIS_STATE[cmd_entry.file]['success']:
+ IWYU_ANALYSIS_STATE[cmd_entry.file]['success'].append('CHECK')
+ return ResultType.SUCCESS
+
+
+def fix_iwyu(cmd_entry: CompileCommand) -> ResultType:
+ """
+ One of the two thread functions the main thread pool executor will call.
+
+ Here we execute up to step 5 (steps at the top comment) and report success
+ if we are able to successfully compile the original command after IWYU
+ has made its changes.
+ """
+
+ # step 1
+ if result := need_to_process(cmd_entry):
+ return result
+
+ # step 2
+ if result := calc_dep_headers(cmd_entry):
+ return result
+
+ with tempfile.TemporaryDirectory() as test_dir:
+
+ # the changes will be done in an isolated test dir so not to conflict with
+ # other concurrent processes.
+ test_source_files = setup_test_dir(cmd_entry, test_dir)
+
+ # a first round of pragmas to make sure IWYU doesn't fail or remove things we dont want
+ add_pragmas(test_source_files)
+
+ # step 3
+ iwyu_out = execute_iwyu(cmd_entry, test_dir)
+ if isinstance(iwyu_out, ResultType):
+ return iwyu_out
+
+ # now we can extract exactly what files IWYU operated on and copy only those back
+ changed_files = [
+ os.path.join(test_dir, file)
+ for file in re.findall(CHANGED_FILES_REGEX, iwyu_out.decode('utf-8'))
+ if in_project_root(file)
+ ]
+ test_source_files += [file for file in changed_files if file not in test_source_files]
+
+ # step 4
+ if result := apply_fixes(cmd_entry, iwyu_out, test_dir):
+ return result
+
+ # a final round of pragmas for the next time this is run through IWYU
+ add_pragmas(test_source_files)
+
+ # step 5
+ result = test_compile(cmd_entry, test_dir)
+ if result == ResultType.SUCCESS:
+ for file in test_source_files:
+ if os.path.exists(file):
+ shutil.move(file, file[len(test_dir) + 1:])
+
+ return result
+
+
+def run_iwyu(cmd_entry: CompileCommand) -> Tuple[ResultType, CompileCommand]:
+ """Intermediate function which delegates the underlying mode to run."""
+
+ if command_line_args.check:
+ return check_iwyu(cmd_entry), cmd_entry
+ else:
+ return fix_iwyu(cmd_entry), cmd_entry
+
+
+def main() -> None:
+ """Main function."""
+ global IWYU_ANALYSIS_STATE, SHUTDOWN_FLAG # pylint: disable=global-statement
+ atexit.register(write_iwyu_data)
+
+ with concurrent.futures.ThreadPoolExecutor(
+ max_workers=len(os.sched_getaffinity(0)) + 4) as executor:
+
+ # ctrl+c tru to shutdown as fast as possible.
+ def sigint_handler(the_signal, frame):
+ executor.shutdown(wait=False, cancel_futures=True)
+ sys.exit(1)
+
+ signal.signal(signal.SIGINT, sigint_handler)
+
+ # load in any data from prior runs
+ if os.path.exists(command_line_args.iwyu_data):
+ with open(command_line_args.iwyu_data) as iwyu_data_file:
+ IWYU_ANALYSIS_STATE = json.load(iwyu_data_file)
+
+ # load in the compile commands
+ with open(command_line_args.compile_commands) as compdb_file:
+ compiledb = [CompileCommand(**json_data) for json_data in json.load(compdb_file)]
+
+ # assert the generated source code has been generated
+ for cmd_entry in compiledb:
+ if cmd_entry.file.endswith('_gen.cpp') and not os.path.exists(cmd_entry.file):
+ printer(f"{Fore.RED}[5] - Missing Gen!: {cmd_entry.file}{Fore.RESET}")
+ printer(
+ f"Error: missing generated file {cmd_entry.file}, make sure generated-sources are generated."
+ )
+ sys.exit(1)
+
+ total_cmds = len(compiledb)
+ start_index = int(total_cmds * command_line_args.start_ratio)
+ if start_index < 0:
+ start_index = 0
+ if start_index > total_cmds:
+ start_index = total_cmds
+
+ end_index = int(total_cmds * command_line_args.end_ratio)
+ if end_index < 0:
+ end_index = 0
+ if end_index > total_cmds:
+ end_index = total_cmds
+
+ if start_index == end_index:
+ print(f"Error: start_index and end_index are the same: {start_index}")
+ sys.exit(1)
+ if start_index > end_index:
+ print(
+ f"Error: start_index {start_index} can not be greater than end_index {end_index}"
+ )
+ sys.exit(1)
+
+ print(f"Analyzing compile commands from {start_index} to {end_index}.")
+ compiledb = compiledb[start_index:end_index]
+ if not command_line_args.check:
+ # We can optimize the order we process things by processing source files
+ # with the least number of dependencies first. This is a cost up front
+ # but will result in huge gains in the amount of re-processing to be done.
+ printer("Getting Initial Header Dependencies...")
+ cmd_entry_list = []
+ try:
+ with tqdm(total=len(compiledb), disable=None) as pbar:
+
+ # create and run the dependency check jobs
+ future_cmd = {
+ executor.submit(intialize_deps, cmd_entry): cmd_entry
+ for cmd_entry in compiledb
+ }
+ for future in concurrent.futures.as_completed(future_cmd):
+ result, cmd_entry = future.result()
+ if result != ResultType.NOT_RUNNING:
+ cmd_entry_list.append(cmd_entry)
+ pbar.update(1)
+ except Exception:
+ SHUTDOWN_FLAG = True
+ traceback.print_exc()
+ executor.shutdown(wait=True, cancel_futures=True)
+ sys.exit(1)
+ else:
+ cmd_entry_list = compiledb
+
+ try:
+
+ # this loop will keep looping until a full run produce no new changes.
+ changes_left = True
+ while changes_left:
+ changes_left = False
+
+ with tqdm(total=len(cmd_entry_list), disable=None) as pbar:
+
+ # create and run the IWYU jobs
+ def dep_sorted(cmd_entry):
+ try:
+ return len(IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps'])
+ except KeyError:
+ return 0
+
+ future_cmd = {
+ executor.submit(run_iwyu, cmd_entry): cmd_entry
+ for cmd_entry in sorted(cmd_entry_list, key=dep_sorted)
+ }
+
+ # process the results
+ for future in concurrent.futures.as_completed(future_cmd):
+ result, cmd_entry = future.result()
+
+ # any result which implies there could be changes required sets the
+ # next loop
+ if result not in (ResultType.NO_CHANGE, ResultType.NOT_RUNNING):
+ changes_left = True
+
+ # if a file is considered done for this loop, update the status bar
+ if result in [
+ ResultType.SUCCESS, ResultType.NO_CHANGE, ResultType.NOT_RUNNING
+ ]:
+ pbar.update(1)
+ # resubmit jobs which may have a better change to run later
+ elif result == ResultType.RESUBMIT:
+ executor.submit(run_iwyu, cmd_entry)
+ # handle a failure case, excpetion quickly drops us out of this loop.
+ else:
+ SHUTDOWN_FLAG = True
+ tqdm.write(
+ f"{result.name}: Shutting down other threads, please be patient."
+ )
+ raise Exception(
+ f'Shutdown due to {result.name} {cmd_entry["file"]}')
+
+ except Exception:
+ SHUTDOWN_FLAG = True
+ traceback.print_exc()
+ executor.shutdown(wait=True, cancel_futures=True)
+ sys.exit(1)
+ finally:
+ if CYCLE_FILES:
+ printer(f"{Fore.YELLOW} Cycles detected:")
+ for file in CYCLE_FILES:
+ printer(f' {file}')
+
+
+main()
diff --git a/buildscripts/iwyu/test/basic/a.h b/buildscripts/iwyu/test/basic/a.h
new file mode 100644
index 0000000000000..ad792ace34b47
--- /dev/null
+++ b/buildscripts/iwyu/test/basic/a.h
@@ -0,0 +1 @@
+#include "b.h"
diff --git a/buildscripts/iwyu/test/basic/b.cpp b/buildscripts/iwyu/test/basic/b.cpp
new file mode 100644
index 0000000000000..dcbc86277644a
--- /dev/null
+++ b/buildscripts/iwyu/test/basic/b.cpp
@@ -0,0 +1,5 @@
+#include "a.h"
+
+type_b return_b_function() {
+ return type_b();
+}
diff --git a/buildscripts/iwyu/test/basic/b.h b/buildscripts/iwyu/test/basic/b.h
new file mode 100644
index 0000000000000..422d7626e9077
--- /dev/null
+++ b/buildscripts/iwyu/test/basic/b.h
@@ -0,0 +1 @@
+class type_b {};
diff --git a/buildscripts/iwyu/test/basic/expected_results.py b/buildscripts/iwyu/test/basic/expected_results.py
new file mode 100644
index 0000000000000..98ed60ea4fb80
--- /dev/null
+++ b/buildscripts/iwyu/test/basic/expected_results.py
@@ -0,0 +1,17 @@
+import os
+import sys
+
+EXPECTED_B_CPP = """
+#include "b.h"
+
+type_b return_b_function() {
+ return type_b();
+}
+"""
+
+with open('b.cpp') as f:
+ content = f.read()
+ if content != EXPECTED_B_CPP:
+ print(f'Actual:\n"""{content}"""')
+ print(f'Expected:\n"""{EXPECTED_B_CPP}"""')
+ sys.exit(1)
diff --git a/buildscripts/iwyu/test/basic/test_config.yml b/buildscripts/iwyu/test/basic/test_config.yml
new file mode 100644
index 0000000000000..a5b906f5558b9
--- /dev/null
+++ b/buildscripts/iwyu/test/basic/test_config.yml
@@ -0,0 +1,25 @@
+# options passed to IWYU
+iwyu_options:
+ - '--max_line_length=100'
+ - '--no_fwd_decls'
+ - '--prefix_header_includes=add'
+ - '--transitive_includes_only'
+
+# options passed to the fix script
+fix_options:
+ - '--blank_lines'
+ - '--nocomments'
+ - '--noreorder'
+ - '--safe_headers'
+
+# filename regex to swap no_include in place
+# quotes and brackets not included quotes are always assumed
+# since this is targeting IWYU added headers
+no_includes:
+
+# prefixes (non regex) to skip
+skip_files:
+
+# regex file paths to add keep pragma
+# include quotes are angle brackets
+keep_includes:
diff --git a/buildscripts/iwyu/test/no_include/a.h b/buildscripts/iwyu/test/no_include/a.h
new file mode 100644
index 0000000000000..ad792ace34b47
--- /dev/null
+++ b/buildscripts/iwyu/test/no_include/a.h
@@ -0,0 +1 @@
+#include "b.h"
diff --git a/buildscripts/iwyu/test/no_include/b.cpp b/buildscripts/iwyu/test/no_include/b.cpp
new file mode 100644
index 0000000000000..dcbc86277644a
--- /dev/null
+++ b/buildscripts/iwyu/test/no_include/b.cpp
@@ -0,0 +1,5 @@
+#include "a.h"
+
+type_b return_b_function() {
+ return type_b();
+}
diff --git a/buildscripts/iwyu/test/no_include/b.h b/buildscripts/iwyu/test/no_include/b.h
new file mode 100644
index 0000000000000..422d7626e9077
--- /dev/null
+++ b/buildscripts/iwyu/test/no_include/b.h
@@ -0,0 +1 @@
+class type_b {};
diff --git a/buildscripts/iwyu/test/no_include/expected_results.py b/buildscripts/iwyu/test/no_include/expected_results.py
new file mode 100644
index 0000000000000..90bda7e15a48e
--- /dev/null
+++ b/buildscripts/iwyu/test/no_include/expected_results.py
@@ -0,0 +1,18 @@
+import os
+import sys
+
+EXPECTED_B_CPP = """// IWYU pragma: no_include "b.h"
+
+#include "a.h" // IWYU pragma: keep
+
+type_b return_b_function() {
+ return type_b();
+}
+"""
+
+with open('b.cpp') as f:
+ content = f.read()
+ if content != EXPECTED_B_CPP:
+ print(f'Actual:\n"""{content}"""')
+ print(f'Expected:\n"""{EXPECTED_B_CPP}"""')
+ sys.exit(1)
diff --git a/buildscripts/iwyu/test/no_include/test_config.yml b/buildscripts/iwyu/test/no_include/test_config.yml
new file mode 100644
index 0000000000000..e441f5bac352b
--- /dev/null
+++ b/buildscripts/iwyu/test/no_include/test_config.yml
@@ -0,0 +1,27 @@
+# options passed to IWYU
+iwyu_options:
+ - '--max_line_length=100'
+ - '--no_fwd_decls'
+ - '--prefix_header_includes=add'
+ - '--transitive_includes_only'
+
+# options passed to the fix script
+fix_options:
+ - '--blank_lines'
+ - '--nocomments'
+ - '--noreorder'
+ - '--safe_headers'
+
+# filename regex to swap no_include in place
+# quotes and brackets not included quotes are always assumed
+# since this is targeting IWYU added headers
+no_includes:
+ - 'b.h'
+
+# prefixes (non regex) to skip
+skip_files:
+
+# regex file paths to add keep pragma
+# include quotes are angle brackets
+keep_includes:
+- '"a.h"'
diff --git a/buildscripts/iwyu/test/run_tests.py b/buildscripts/iwyu/test/run_tests.py
new file mode 100644
index 0000000000000..d0e32f00a8dce
--- /dev/null
+++ b/buildscripts/iwyu/test/run_tests.py
@@ -0,0 +1,97 @@
+import pathlib
+import yaml
+import json
+import shutil
+import os
+import glob
+import subprocess
+import sys
+import argparse
+import concurrent.futures
+
+parser = argparse.ArgumentParser(description='Run tests for the IWYU analysis script.')
+
+parser.add_argument('--mongo-toolchain-bin-dir', type=str,
+ help='Which toolchain bin directory to use for this analysis.',
+ default='/opt/mongodbtoolchain/v4/bin')
+
+args = parser.parse_args()
+
+if os.getcwd() != pathlib.Path(__file__).parent:
+ print(
+ f"iwyu test script must run in the tests directory, changing dirs to {pathlib.Path(__file__).parent.resolve()}"
+ )
+ os.chdir(pathlib.Path(__file__).parent.resolve())
+
+analysis_script = pathlib.Path(__file__).parent.parent / 'run_iwyu_analysis.py'
+
+
+def run_test(entry):
+ print(f"Running test {pathlib.Path(entry)}...")
+ test_dir = pathlib.Path(entry) / 'test_run'
+ if os.path.exists(test_dir):
+ shutil.rmtree(test_dir)
+
+ shutil.copytree(pathlib.Path(entry), test_dir)
+
+ source_files = glob.glob('**/*.cpp', root_dir=test_dir, recursive=True)
+ compile_commands = []
+
+ for source_file in source_files:
+ output = os.path.splitext(source_file)[0] + '.o'
+ compile_commands.append({
+ 'file': source_file,
+ 'command': f"{args.mongo_toolchain_bin_dir}/clang++ -o {output} -c {source_file}",
+ "directory": os.path.abspath(test_dir),
+ "output": output,
+ })
+
+ with open(test_dir / 'compile_commands.json', 'w') as compdb:
+ json.dump(compile_commands, compdb)
+
+ os.makedirs(test_dir / 'etc', exist_ok=True)
+ with open(test_dir / 'etc' / 'iwyu_mapping.imp', 'w') as mapping:
+ mapping.write(
+ '[{include: ["\\"placeholder.h\\"", "private", "\\"placeholder2.h\\"", "public"]}]')
+
+ iwyu_run = subprocess.run(
+ [sys.executable, analysis_script, '--verbose', '--config-file=test_config.yml'], text=True,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_dir)
+
+ results_run = subprocess.run(
+ [sys.executable, pathlib.Path(entry) / 'expected_results.py'], stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, text=True, cwd=test_dir)
+
+ msg = '\n'.join([iwyu_run.stdout, results_run.stdout, f"FAILED!: {pathlib.Path(entry)}"])
+ msg = '\n'.join([f"[{pathlib.Path(entry).name}] {line}" for line in msg.split('\n')])
+
+ if results_run.returncode != 0:
+ return results_run.returncode, msg, pathlib.Path(entry).name
+ else:
+ return results_run.returncode, f"[{pathlib.Path(entry).name}] PASSED!: {pathlib.Path(entry)}", pathlib.Path(
+ entry).name
+
+
+failed_tests = []
+with concurrent.futures.ThreadPoolExecutor(
+ max_workers=len(os.sched_getaffinity(0)) + 4) as executor:
+
+ # create and run the IWYU jobs
+ future_cmd = {
+ executor.submit(run_test, entry): entry
+ for entry in pathlib.Path(__file__).parent.glob('*') if os.path.isdir(entry)
+ }
+
+ # process the results
+ for future in concurrent.futures.as_completed(future_cmd):
+ result, message, test_name = future.result()
+ if result != 0:
+ failed_tests += [test_name]
+ print(message)
+
+print("\n***Tests complete.***")
+if failed_tests:
+ print("The following tests failed:")
+ for test in failed_tests:
+ print(' - ' + test)
+ print("Please review the logs above for more information.")
diff --git a/buildscripts/large_file_check.py b/buildscripts/large_file_check.py
new file mode 100755
index 0000000000000..7c5388f1f34f8
--- /dev/null
+++ b/buildscripts/large_file_check.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python3
+"""Check files in git diff to ensure they are within a given size limit."""
+
+# pylint: disable=wrong-import-position
+
+import argparse
+import fnmatch
+import logging
+import os
+import pathlib
+import sys
+import textwrap
+
+from typing import Any, Callable, Dict, List, Optional, Tuple
+
+import structlog
+
+from git import Repo
+
+mongo_dir = os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__))))
+# Get relative imports to work when the package is not installed on the PYTHONPATH.
+if __name__ == "__main__" and __package__ is None:
+ sys.path.append(mongo_dir)
+
+from buildscripts.linter import git
+from buildscripts.patch_builds.change_data import (RevisionMap, find_changed_files_in_repos,
+ generate_revision_map)
+
+
+# Console renderer for structured logging
+def renderer(_logger: logging.Logger, _name: str, eventdict: Dict[Any, Any]) -> str:
+ if 'files' in eventdict:
+ return "{event}: {files}".format(**eventdict)
+ if 'repo' in eventdict:
+ return "{event}: {repo}".format(**eventdict)
+ if 'file' in eventdict:
+ if 'bytes' in eventdict:
+ return "{event}: {file} {bytes} bytes".format(**eventdict)
+ return "{event}: {file}".format(**eventdict)
+ return "{event}".format(**eventdict)
+
+
+# Configure the logger so it doesn't spam output on huge diffs
+structlog.configure(
+ logger_factory=structlog.stdlib.LoggerFactory(),
+ wrapper_class=structlog.stdlib.BoundLogger,
+ cache_logger_on_first_use=True,
+ processors=[
+ structlog.stdlib.filter_by_level,
+ renderer,
+ ],
+)
+
+LOGGER = structlog.get_logger(__name__)
+MONGO_REVISION_ENV_VAR = "REVISION"
+ENTERPRISE_REVISION_ENV_VAR = "ENTERPRISE_REV"
+
+
+def _get_repos_and_revisions() -> Tuple[List[Repo], RevisionMap]:
+ """Get the repo object and a map of revisions to compare against."""
+ modules = git.get_module_paths()
+ repos = [Repo(path) for path in modules]
+ revision_map = generate_revision_map(
+ repos, {
+ "mongo": os.environ.get(MONGO_REVISION_ENV_VAR),
+ "enterprise": os.environ.get(ENTERPRISE_REVISION_ENV_VAR)
+ })
+ return repos, revision_map
+
+
+def git_changed_files(excludes: List[pathlib.Path]) -> List[pathlib.Path]:
+ """
+ Get the files that have changes since the last git commit.
+
+ :param excludes: A list of files which should be excluded from changed file checks.
+ :return: List of changed files.
+ """
+ repos, revision_map = _get_repos_and_revisions()
+ LOGGER.debug("revisions", revision=revision_map)
+
+ def _filter_fn(file_path: pathlib.Path) -> bool:
+ if not file_path.exists():
+ return False
+ for exclude in excludes:
+ if fnmatch.fnmatch(file_path, exclude):
+ return False
+ return True
+
+ files = [
+ filename
+ for filename in list(map(pathlib.Path, find_changed_files_in_repos(repos, revision_map)))
+ if _filter_fn(filename)
+ ]
+
+ LOGGER.debug("Found files to check", files=list(map(str, files)))
+ return files
+
+
+def diff_file_sizes(size_limit: int, excludes: Optional[List[str]] = None) -> List[pathlib.Path]:
+ if excludes is None:
+ excludes = []
+
+ large_files: list[pathlib.Path] = []
+
+ for file_path in git_changed_files(excludes):
+ LOGGER.debug("Checking file size", file=str(file_path))
+ file_size = file_path.stat().st_size
+ if file_size > size_limit:
+ LOGGER.error("File too large", file=str(file_path), bytes=file_size)
+ large_files.append(file_path)
+
+ return large_files
+
+
+def main(*args: str) -> int:
+ """Execute Main entry point."""
+
+ parser = argparse.ArgumentParser(
+ description='Git commit large file checker.', epilog=textwrap.dedent('''\
+ NOTE: The --exclude argument is an exact match but can accept glob patterns. If * is used,
+ it matches *all* characters, including path separators.
+ '''))
+ parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
+ parser.add_argument("--exclude", help="Paths to exclude from check", nargs="+",
+ type=pathlib.Path, required=False)
+ parser.add_argument("--size-mb", help="File size limit (MiB)", type=int, default="10")
+ parsed_args = parser.parse_args(args[1:])
+
+ if parsed_args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ structlog.stdlib.filter_by_level(LOGGER, 'debug', {})
+ else:
+ logging.basicConfig(level=logging.INFO)
+ structlog.stdlib.filter_by_level(LOGGER, 'info', {})
+
+ large_files = diff_file_sizes(parsed_args.size_mb * 1024 * 1024, parsed_args.exclude)
+ if len(large_files) == 0:
+ LOGGER.info("All files passed size check")
+ return 0
+
+ LOGGER.error("Some files failed size check", files=list(map(str, large_files)))
+ return 1
+
+
+if __name__ == '__main__':
+ sys.exit(main(*sys.argv))
diff --git a/buildscripts/libdeps/SCHEMA_CHANGE_LOG.txt b/buildscripts/libdeps/SCHEMA_CHANGE_LOG.txt
index 4e170dfd6e1d4..5e61a0b26fafb 100644
--- a/buildscripts/libdeps/SCHEMA_CHANGE_LOG.txt
+++ b/buildscripts/libdeps/SCHEMA_CHANGE_LOG.txt
@@ -1,3 +1,3 @@
3 removed shim node property
2 flipped edge direction in graph file data
-1 initial schema
\ No newline at end of file
+1 initial schema
diff --git a/buildscripts/libdeps/find_symbols.c b/buildscripts/libdeps/find_symbols.c
index e6eabea1598aa..f28d165a1d8ab 100644
--- a/buildscripts/libdeps/find_symbols.c
+++ b/buildscripts/libdeps/find_symbols.c
@@ -366,4 +366,4 @@ int main(int argc, char** argv) {
clean_up();
exit(0);
-}
\ No newline at end of file
+}
diff --git a/buildscripts/libdeps/graph_visualizer.py b/buildscripts/libdeps/graph_visualizer.py
index 357e42c9d2048..afd4cbd575adc 100644
--- a/buildscripts/libdeps/graph_visualizer.py
+++ b/buildscripts/libdeps/graph_visualizer.py
@@ -43,6 +43,8 @@
import textwrap
import flask
+from werkzeug.serving import is_running_from_reloader
+
from graph_visualizer_web_stack.flask.flask_backend import BackendServer
@@ -98,10 +100,10 @@ def check_node(node_check, cwd):
"""Check node version and install npm packages."""
status, output = subprocess.getstatusoutput(node_check)
- if status != 0 or not output.split('\n')[-1].startswith('v12'):
+ if status != 0 or not output.split('\n')[-1].startswith('v14'):
print(
textwrap.dedent(f"""\
- Failed to get node version 12 from 'node -v':
+ Failed to get node version 14 from 'node -v':
output: '{output}'
Perhaps run 'source {cwd}/setup_node_env.sh install'"""))
exit(1)
@@ -179,24 +181,26 @@ def main():
npm_start = ['npm', 'start']
npm_build = ['npm', 'run', 'build']
- check_node(node_check, cwd)
+ if not is_running_from_reloader():
+ check_node(node_check, cwd)
- frontend_thread = None
- if args.launch in ['frontend', 'both']:
- if args.debug:
- npm_command = npm_start
- else:
- npm_command = npm_build
+ frontend_thread = None
+ if args.launch in ['frontend', 'both']:
+ if args.debug:
+ npm_command = npm_start
+ else:
+ npm_command = npm_build
- frontend_thread = threading.Thread(target=start_frontend_thread,
- args=(web_service_info, npm_command, args.debug))
- frontend_thread.start()
+ frontend_thread = threading.Thread(target=start_frontend_thread,
+ args=(web_service_info, npm_command, args.debug))
+ frontend_thread.start()
if args.launch in ['backend', 'both']:
start_backend(web_service_info, args.debug)
- if frontend_thread:
- frontend_thread.join()
+ if not is_running_from_reloader():
+ if frontend_thread:
+ frontend_thread.join()
if __name__ == "__main__":
diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/package.json b/buildscripts/libdeps/graph_visualizer_web_stack/package.json
index ace34ed2d8d22..c375f0f2ceeff 100644
--- a/buildscripts/libdeps/graph_visualizer_web_stack/package.json
+++ b/buildscripts/libdeps/graph_visualizer_web_stack/package.json
@@ -3,7 +3,7 @@
"version": "4.0.0",
"private": true,
"engines": {
- "node": ">=12.0.0"
+ "node": ">=14.0.0"
},
"engineStrict": true,
"scripts": {
@@ -13,37 +13,36 @@
"test": "react-scripts test",
"eject": "react-scripts eject"
},
- "//": "TODO: adding bezier and force-graph and locking versions until https://github.com/vasturiano/force-graph/issues/182 is resolved",
"dependencies": {
- "@emotion/react": "^11.1.4",
- "@emotion/styled": "^11.0.0",
- "@material-ui/core": "5.0.0-alpha.22",
- "@material-ui/icons": "5.0.0-alpha.22",
- "@material-ui/lab": "5.0.0-alpha.22",
- "bezier-js": "4.0.3",
- "canvas": "^2.5.0",
- "date-fns": "^2.16.1",
- "dayjs": "^1.9.7",
- "force-graph": "1.40.0",
+ "@emotion/react": "^11.11.0",
+ "@emotion/styled": "^11.11.0",
+ "@material-ui/core": "^5.0.0-alpha.22",
+ "@material-ui/icons": "^5.0.0-alpha.22",
+ "@material-ui/lab": "^5.0.0-alpha.22",
+ "bezier-js": "6.1.3",
+ "canvas": "^2.11.2",
+ "date-fns": "^2.30.0",
+ "dayjs": "^1.11.7",
+ "force-graph": "^1.43.1",
"http-proxy-middleware": "^2.0.6",
- "http-server": "^0.12.3",
- "luxon": "^1.25.0",
- "moment": "^2.29.1",
- "p-limit": "^3.0.2",
- "react": "^16.8",
- "react-dom": "^16.0.0",
- "react-force-graph-2d": "1.18.1",
- "react-force-graph-3d": "1.18.8",
- "react-indiana-drag-scroll": "^1.8.0",
- "react-redux": "^7.2.2",
- "react-resize-aware": "^3.1.0",
- "react-resize-detector": "^6.6.5",
- "react-scripts": "^4.0.3",
+ "http-server": "^14.1.1",
+ "luxon": "^3.3.0",
+ "moment": "^2.29.4",
+ "p-limit": "^4.0.0",
+ "react": "^18.2",
+ "react-dom": "^18.2.0",
+ "react-force-graph-2d": "1.25.0",
+ "react-force-graph-3d": "1.23.0",
+ "react-indiana-drag-scroll": "^2.2.0",
+ "react-redux": "^8.0.5",
+ "react-resize-aware": "3.1.1",
+ "react-resize-detector": "^9.1.0",
+ "react-scripts": "^5.0.1",
"react-split-pane": "^0.1.92",
- "react-virtualized": "^9.22.2",
- "react-window": "^1.8.6",
- "redux": "^4.0.5",
- "typescript": "^3.9.7"
+ "react-virtualized": "^9.22.5",
+ "react-window": "^1.8.9",
+ "redux": "^4.2.1",
+ "typescript": "^5.0.4"
},
"browserslist": {
"production": [
diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/setup_node_env.sh b/buildscripts/libdeps/graph_visualizer_web_stack/setup_node_env.sh
index 22c2d2295cfba..e680c85499b65 100755
--- a/buildscripts/libdeps/graph_visualizer_web_stack/setup_node_env.sh
+++ b/buildscripts/libdeps/graph_visualizer_web_stack/setup_node_env.sh
@@ -19,7 +19,7 @@ else
\. "$NVM_DIR/nvm.sh"
fi
-nvm install 12
+nvm install 14
if [ "$1" = "install" ]
then
@@ -46,4 +46,4 @@ then
. "$NVM_DIR/nvm.sh"
fi
-popd > /dev/null
\ No newline at end of file
+popd > /dev/null
diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/DataGrid.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/DataGrid.js
index 00220d4078a00..2fbc51e720d0b 100644
--- a/buildscripts/libdeps/graph_visualizer_web_stack/src/DataGrid.js
+++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/DataGrid.js
@@ -15,6 +15,8 @@ import { setNodeInfos } from "./redux/nodeInfo";
import { setLinks } from "./redux/links";
import { setLinksTrans } from "./redux/linksTrans";
+const {REACT_APP_API_URL} = process.env;
+
function componentToHex(c) {
var hex = c.toString(16);
return hex.length == 1 ? "0" + hex : hex;
@@ -113,7 +115,7 @@ const DataGrid = ({
"selected_nodes": nodes.filter(node => node.selected == true).map(node => node.node),
"transitive_edges": showTransitive
};
- fetch('/api/graphs/' + gitHash + '/d3', {
+ fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/d3', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
@@ -126,7 +128,7 @@ const DataGrid = ({
setLinks(data.graphData.links);
setLinksTrans(data.graphData.links_trans);
});
- fetch('/api/graphs/' + gitHash + '/nodes/details', {
+ fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/nodes/details', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/DrawGraph.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/DrawGraph.js
index b0724498b71cd..2e374807997e6 100644
--- a/buildscripts/libdeps/graph_visualizer_web_stack/src/DrawGraph.js
+++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/DrawGraph.js
@@ -20,6 +20,8 @@ import { setLinksTrans } from "./redux/linksTrans";
import { setShowTransitive } from "./redux/showTransitive";
import LoadingBar from "./LoadingBar";
+const {REACT_APP_API_URL} = process.env;
+
const handleFindNode = (node_value, graphData, activeComponent, forceRef) => {
var targetNode = null;
if (graphData) {
@@ -131,7 +133,7 @@ const DrawGraph = ({
"selected_nodes": nodes.filter(node => node.selected == true).map(node => node.node),
"transitive_edges": showTransitive
};
- fetch('/api/graphs/' + gitHash + '/d3', {
+ fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/d3', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
@@ -144,7 +146,7 @@ const DrawGraph = ({
setLinks(data.graphData.links);
setLinksTrans(data.graphData.links_trans);
});
- fetch('/api/graphs/' + gitHash + '/nodes/details', {
+ fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/nodes/details', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/EdgeList.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/EdgeList.js
index 28226bc6206c0..69830ce83b178 100644
--- a/buildscripts/libdeps/graph_visualizer_web_stack/src/EdgeList.js
+++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/EdgeList.js
@@ -258,4 +258,4 @@ const EdgeList = ({ selectedGraph, links, setLinks, linksTrans, loading, setFind
);
};
-export default connect(getEdges, { setGraphData, setFindNode, setLinks, setSelectedPath })(withStyles(styles)(EdgeList));
\ No newline at end of file
+export default connect(getEdges, { setGraphData, setFindNode, setLinks, setSelectedPath })(withStyles(styles)(EdgeList));
diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphInfo.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphInfo.js
index 5863d2bea9df3..3552d26cd2c76 100644
--- a/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphInfo.js
+++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphInfo.js
@@ -11,6 +11,8 @@ import { connect } from "react-redux";
import { getCounts } from "./redux/store";
import { setCounts } from "./redux/counts";
+const {REACT_APP_API_URL} = process.env;
+
const columns = [
{ id: "ID", field: "type", headerName: "Count Type", width: 50 },
{ field: "value", headerName: "Value", width: 50 },
@@ -26,7 +28,7 @@ const GraphInfo = ({ selectedGraph, counts, datawidth, setCounts }) => {
React.useEffect(() => {
let gitHash = selectedGraph;
if (gitHash) {
- fetch('/api/graphs/' + gitHash + '/analysis')
+ fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/analysis')
.then(response => response.json())
.then(data => {
setCounts(data.results);
diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphPaths.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphPaths.js
index ead5fd4245d2d..ed738f62f169f 100644
--- a/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphPaths.js
+++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphPaths.js
@@ -22,6 +22,8 @@ import { setLinksTrans } from "./redux/linksTrans";
import OverflowTooltip from "./OverflowTooltip";
+const {REACT_APP_API_URL} = process.env;
+
const rowHeight = 25;
const Accordion = withStyles({
@@ -131,7 +133,7 @@ const GraphPaths = ({
"fromNode": fromNode,
"toNode": toNode
};
- fetch('/api/graphs/' + gitHash + '/paths', {
+ fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/paths', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
@@ -146,7 +148,7 @@ const GraphPaths = ({
"extra_nodes": data.extraNodes,
"transitive_edges": showTransitive
};
- fetch('/api/graphs/' + gitHash + '/d3', {
+ fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/d3', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/NodeList.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/NodeList.js
index bdcc5755f43c6..17c73a3cc28f4 100644
--- a/buildscripts/libdeps/graph_visualizer_web_stack/src/NodeList.js
+++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/NodeList.js
@@ -17,6 +17,8 @@ import { setLoading } from "./redux/loading";
import { setListSearchTerm } from "./redux/listSearchTerm";
import { Button, Autocomplete, Grid } from "@material-ui/core";
+const {REACT_APP_API_URL} = process.env;
+
const columns = [
{ dataKey: "check", label: "Selected", width: 70 },
{ dataKey: "name", label: "Name", width: 200 },
@@ -29,7 +31,7 @@ const NodeList = ({ selectedGraph, nodes, searchedNodes, loading, setFindNode, s
React.useEffect(() => {
let gitHash = selectedGraph;
if (gitHash) {
- fetch('/api/graphs/' + gitHash + '/nodes')
+ fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/nodes')
.then(response => response.json())
.then(data => {
setNodes(data.nodes.map((node, index) => {
@@ -55,7 +57,7 @@ const NodeList = ({ selectedGraph, nodes, searchedNodes, loading, setFindNode, s
"selected_nodes": nodes.filter(node => node.selected == true).map(node => node.node),
"transitive_edges": showTransitive
};
- fetch('/api/graphs/' + gitHash + '/d3', {
+ fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/d3', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
@@ -68,7 +70,7 @@ const NodeList = ({ selectedGraph, nodes, searchedNodes, loading, setFindNode, s
setLinks(data.graphData.links);
setLinksTrans(data.graphData.links_trans);
});
- fetch('/api/graphs/' + gitHash + '/nodes/details', {
+ fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/nodes/details', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
@@ -174,4 +176,4 @@ const NodeList = ({ selectedGraph, nodes, searchedNodes, loading, setFindNode, s
);
};
-export default connect(getNodes, { setFindNode, setNodes, setNodeInfos, setLinks, setLinksTrans, setLoading, setListSearchTerm, updateCheckbox, updateSelected, setGraphData })(NodeList);
\ No newline at end of file
+export default connect(getNodes, { setFindNode, setNodes, setNodeInfos, setLinks, setLinksTrans, setLoading, setListSearchTerm, updateCheckbox, updateSelected, setGraphData })(NodeList);
diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/OverflowTooltip.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/OverflowTooltip.js
index c09ec44f319ac..cb76ba4073934 100644
--- a/buildscripts/libdeps/graph_visualizer_web_stack/src/OverflowTooltip.js
+++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/OverflowTooltip.js
@@ -14,6 +14,8 @@ import { getGraphData } from "./redux/store";
import { setLinks } from "./redux/links";
import { setLinksTrans } from "./redux/linksTrans";
+const {REACT_APP_API_URL} = process.env;
+
const OverflowTip = (props) => {
const textElementRef = useRef(null);
const [hoverStatus, setHover] = useState(false);
@@ -33,7 +35,7 @@ const OverflowTip = (props) => {
"selected_nodes": props.nodes.filter(node => node.selected == true).map(node => node.node),
"transitive_edges": props.showTransitive
};
- fetch('/api/graphs/' + gitHash + '/d3', {
+ fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/d3', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
@@ -46,7 +48,7 @@ const OverflowTip = (props) => {
props.setLinks(data.graphData.links);
props.setLinksTrans(data.graphData.links_trans);
});
- fetch('/api/graphs/' + gitHash + '/nodes/details', {
+ fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/nodes/details', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/index.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/index.js
index 2cf4e2644c246..42533ed320204 100644
--- a/buildscripts/libdeps/graph_visualizer_web_stack/src/index.js
+++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/index.js
@@ -1,5 +1,5 @@
import React from "react";
-import ReactDOM from "react-dom";
+import ReactDOM from "react-dom/client";
import { Provider } from "react-redux";
import CssBaseline from "@material-ui/core/CssBaseline";
import { ThemeProvider } from "@material-ui/core/styles";
@@ -8,14 +8,12 @@ import theme from "./theme";
import store from "./redux/store";
import App from "./App";
-
-ReactDOM.render(
+const root = ReactDOM.createRoot(document.getElementById("root"));
+root.render(
- ,
-
- document.querySelector("#root")
+
);
diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/redux/listSearchTerm.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/redux/listSearchTerm.js
index df288f4af472c..2d9594511254c 100644
--- a/buildscripts/libdeps/graph_visualizer_web_stack/src/redux/listSearchTerm.js
+++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/redux/listSearchTerm.js
@@ -13,4 +13,4 @@ export const listSearchTerm = (state = initialState, action) => {
export const setListSearchTerm = (listSearchTerm) => ({
type: "setListSearchTerm",
payload: listSearchTerm,
-});
\ No newline at end of file
+});
diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/setupProxy.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/setupProxy.js
index f9c1a14588ff7..31f23ca4341f7 100644
--- a/buildscripts/libdeps/graph_visualizer_web_stack/src/setupProxy.js
+++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/setupProxy.js
@@ -13,4 +13,4 @@ module.exports = function(app) {
secure: false,
})
);
-};
\ No newline at end of file
+};
diff --git a/buildscripts/linter/filediff.py b/buildscripts/linter/filediff.py
index 7dbad6d9d5863..23d4a988b2308 100644
--- a/buildscripts/linter/filediff.py
+++ b/buildscripts/linter/filediff.py
@@ -34,7 +34,7 @@ def _get_repos_and_revisions() -> Tuple[List[Repo], RevisionMap]:
return repos, revision_map
-def _filter_file(filename: str, is_interesting_file: Callable) -> bool:
+def _filter_file(filename: str, is_interesting_file: Callable[[str], bool]) -> bool:
"""
Determine if file should be included based on existence and passed in method.
@@ -45,7 +45,7 @@ def _filter_file(filename: str, is_interesting_file: Callable) -> bool:
return os.path.exists(filename) and is_interesting_file(filename)
-def gather_changed_files_for_lint(is_interesting_file: Callable) -> List[str]:
+def gather_changed_files_for_lint(is_interesting_file: Callable[[str], bool]) -> List[str]:
"""
Get the files that have changes since the last git commit.
diff --git a/buildscripts/linter/mongolint.py b/buildscripts/linter/mongolint.py
new file mode 100644
index 0000000000000..c2edc606eaecd
--- /dev/null
+++ b/buildscripts/linter/mongolint.py
@@ -0,0 +1,278 @@
+#!/usr/bin/env python3
+"""Simple C++ Linter."""
+
+import argparse
+import bisect
+import io
+import logging
+import re
+import sys
+
+_RE_LINT = re.compile("//.*NOLINT")
+_RE_COMMENT_STRIP = re.compile("//.*")
+_RE_GENERIC_FCV_COMMENT = re.compile(r'\(Generic FCV reference\):')
+GENERIC_FCV = [
+ r'::kLatest',
+ r'::kLastContinuous',
+ r'::kLastLTS',
+ r'::kUpgradingFromLastLTSToLatest',
+ r'::kUpgradingFromLastContinuousToLatest',
+ r'::kDowngradingFromLatestToLastLTS',
+ r'::kDowngradingFromLatestToLastContinuous',
+ r'\.isUpgradingOrDowngrading',
+ r'::kDowngradingFromLatestToLastContinuous',
+ r'::kUpgradingFromLastLTSToLastContinuous',
+]
+_RE_GENERIC_FCV_REF = re.compile(r'(' + '|'.join(GENERIC_FCV) + r')\b')
+_RE_FEATURE_FLAG_IGNORE_FCV_CHECK_REF = re.compile(r'isEnabledAndIgnoreFCVUnsafe\(\)')
+_RE_FEATURE_FLAG_IGNORE_FCV_CHECK_COMMENT = re.compile(r'\(Ignore FCV check\)')
+_RE_HEADER = re.compile(r'\.(h|hpp)$')
+
+
+class Linter:
+ """Simple C++ Linter."""
+
+ _license_header = '''\
+/**
+ * Copyright (C) {year}-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * .
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */'''.splitlines()
+
+ def __init__(self, file_name, raw_lines):
+ """Create new linter."""
+ self.file_name = file_name
+ self.raw_lines = raw_lines
+ self.clean_lines = []
+ self.nolint_suppression = []
+ self.generic_fcv_comments = []
+ self.feature_flag_ignore_fcv_check_comments = []
+ self._error_count = 0
+
+ def lint(self):
+ """Run linter, returning error count."""
+ # steps:
+ # - Check for header
+ # - Check for NOLINT and Strip multi line comments
+ # - Run file-level checks
+ # - Run per-line checks
+
+ start_line = self._check_for_server_side_public_license()
+
+ self._check_newlines()
+ self._check_and_strip_comments()
+
+ # Line-level checks
+ for linenum in range(start_line, len(self.clean_lines)):
+ if not self.clean_lines[linenum]:
+ continue
+
+ # Relax the rule of commenting generic FCV references for files directly related to FCV
+ # implementations.
+ if not "feature_compatibility_version" in self.file_name:
+ self._check_for_generic_fcv(linenum)
+
+ # Don't check feature_flag.h/cpp where the function is defined and test files.
+ if not "feature_flag" in self.file_name and not "test" in self.file_name:
+ self._check_for_feature_flag_ignore_fcv(linenum)
+
+ return self._error_count
+
+ def _check_newlines(self):
+ """Check that each source file ends with a newline character."""
+ if self.raw_lines and self.raw_lines[-1][-1:] != '\n':
+ self._error(
+ len(self.raw_lines), 'mongo/final_newline',
+ 'Files must end with a newline character.')
+
+ def _check_and_strip_comments(self):
+ in_multi_line_comment = False
+
+ for linenum in range(len(self.raw_lines)):
+ clean_line = self.raw_lines[linenum]
+
+ # Users can write NOLINT different ways
+ # // NOLINT
+ # // Some explanation NOLINT
+ # so we need a regular expression
+ if _RE_LINT.search(clean_line):
+ self.nolint_suppression.append(linenum)
+
+ if _RE_GENERIC_FCV_COMMENT.search(clean_line):
+ self.generic_fcv_comments.append(linenum)
+
+ if _RE_FEATURE_FLAG_IGNORE_FCV_CHECK_COMMENT.search(clean_line):
+ self.feature_flag_ignore_fcv_check_comments.append(linenum)
+
+ if not in_multi_line_comment:
+ if "/*" in clean_line and not "*/" in clean_line:
+ in_multi_line_comment = True
+ clean_line = ""
+
+ # Trim comments - approximately
+ # Note, this does not understand if // is in a string
+ # i.e. it will think URLs are also comments but this should be good enough to find
+ # violators of the coding convention
+ if "//" in clean_line:
+ clean_line = _RE_COMMENT_STRIP.sub("", clean_line)
+ else:
+ if "*/" in clean_line:
+ in_multi_line_comment = False
+
+ clean_line = ""
+
+ self.clean_lines.append(clean_line)
+
+ def _license_error(self, linenum, msg, category='legal/license'):
+ style_url = 'https://github.com/mongodb/mongo/wiki/Server-Code-Style'
+ self._error(linenum, category, '{} See {}'.format(msg, style_url))
+ return (False, linenum)
+
+ def _check_for_server_side_public_license(self):
+ """Return the number of the line at which the check ended."""
+ src_iter = (x.rstrip() for x in self.raw_lines)
+ linenum = 0
+ for linenum, lic_line in enumerate(self._license_header):
+ src_line = next(src_iter, None)
+ if src_line is None:
+ self._license_error(linenum, 'Missing or incomplete license header.')
+ return linenum
+ lic_re = re.escape(lic_line).replace(r'\{year\}', r'\d{4}')
+ if not re.fullmatch(lic_re, src_line):
+ self._license_error(
+ linenum, 'Incorrect license header.\n'
+ ' Expected: "{}"\n'
+ ' Received: "{}"\n'.format(lic_line, src_line))
+ return linenum
+
+ # Warn if SSPL appears in Enterprise code, which has a different license.
+ expect_sspl_license = "enterprise" not in self.file_name
+ if not expect_sspl_license:
+ self._license_error(linenum,
+ 'Incorrect license header found. Expected Enterprise license.',
+ category='legal/enterprise_license')
+ return linenum
+ return linenum
+
+ def _check_for_generic_fcv(self, linenum):
+ line = self.clean_lines[linenum]
+ if _RE_GENERIC_FCV_REF.search(line):
+ # Find the first generic FCV comment preceding the current line.
+ i = bisect.bisect_right(self.generic_fcv_comments, linenum)
+ if not i or self.generic_fcv_comments[i - 1] < (linenum - 10):
+ self._error(
+ linenum, 'mongodb/fcv',
+ 'Please add a comment containing "(Generic FCV reference):" within 10 lines ' +
+ 'before the generic FCV reference.')
+
+ def _check_for_feature_flag_ignore_fcv(self, linenum):
+ line = self.clean_lines[linenum]
+ if _RE_FEATURE_FLAG_IGNORE_FCV_CHECK_REF.search(line):
+ # Find the first ignore FCV check comment preceding the current line.
+ i = bisect.bisect_right(self.feature_flag_ignore_fcv_check_comments, linenum)
+ if not i or self.feature_flag_ignore_fcv_check_comments[i - 1] < (linenum - 10):
+ self._error(
+ linenum, 'mongodb/fcv',
+ 'Please add a comment containing "(Ignore FCV check)":" within 10 lines ' +
+ 'before the isEnabledAndIgnoreFCVUnsafe() function call explaining why ' +
+ 'the FCV check is ignored.')
+
+ def _error(self, linenum, category, message):
+ if linenum in self.nolint_suppression:
+ return
+
+ norm_file_name = self.file_name.replace('\\', '/')
+
+ # Custom clang-tidy check tests purposefully produce errors for
+ # tests to find. They should be ignored.
+ if "mongo_tidy_checks/tests/" in norm_file_name:
+ return
+
+ if category == "legal/license":
+ # Enterprise module does not have the SSPL license
+ if "enterprise" in self.file_name:
+ return
+
+ # The following files are in the src/mongo/ directory but technically belong
+ # in src/third_party/ because their copyright does not belong to MongoDB.
+ files_to_ignore = set([
+ 'src/mongo/scripting/mozjs/PosixNSPR.cpp',
+ 'src/mongo/shell/linenoise.cpp',
+ 'src/mongo/shell/linenoise.h',
+ 'src/mongo/shell/mk_wcwidth.cpp',
+ 'src/mongo/shell/mk_wcwidth.h',
+ 'src/mongo/util/md5.cpp',
+ 'src/mongo/util/md5.h',
+ 'src/mongo/util/md5main.cpp',
+ 'src/mongo/util/net/ssl_stream.cpp',
+ 'src/mongo/util/scopeguard.h',
+ ])
+
+ for file_to_ignore in files_to_ignore:
+ if file_to_ignore in norm_file_name:
+ return
+
+ # We count internally from 0 but users count from 1 for line numbers
+ print("Error: %s:%d - %s - %s" % (self.file_name, linenum + 1, category, message))
+ self._error_count += 1
+
+
+def lint_file(file_name):
+ """Lint file and print errors to console."""
+ with io.open(file_name, encoding='utf-8') as file_stream:
+ raw_lines = file_stream.readlines()
+
+ linter = Linter(file_name, raw_lines)
+ return linter.lint()
+
+
+def main():
+ # type: () -> int
+ """Execute Main Entry point."""
+ parser = argparse.ArgumentParser(description='MongoDB Simple C++ Linter.')
+
+ parser.add_argument('file', type=str, help="C++ input file")
+
+ parser.add_argument('-v', '--verbose', action='count', help="Enable verbose tracing")
+
+ args = parser.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+
+ try:
+ error_count = lint_file(args.file)
+ if error_count != 0:
+ print('File "{}" failed with {} errors.'.format(args.file, error_count))
+ return 1
+ return 0
+ except Exception as ex: # pylint: disable=broad-except
+ print('Exception while checking file "{}": {}'.format(args.file, ex))
+ return 2
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/buildscripts/linter/mypy.py b/buildscripts/linter/mypy.py
index b2ec8f6022eb2..eff8bbc50d191 100644
--- a/buildscripts/linter/mypy.py
+++ b/buildscripts/linter/mypy.py
@@ -16,7 +16,7 @@ def __init__(self):
"""Create a mypy linter."""
# User can override the location of mypy from an environment variable.
- super(MypyLinter, self).__init__("mypy", "0.580", os.getenv("MYPY"))
+ super(MypyLinter, self).__init__("mypy", "1.3.0", os.getenv("MYPY"))
def get_lint_version_cmd_args(self):
# type: () -> List[str]
diff --git a/buildscripts/linter/simplecpplint.py b/buildscripts/linter/simplecpplint.py
deleted file mode 100644
index 2f1ca2a6f9a67..0000000000000
--- a/buildscripts/linter/simplecpplint.py
+++ /dev/null
@@ -1,421 +0,0 @@
-#!/usr/bin/env python3
-"""Simple C++ Linter."""
-
-import argparse
-import bisect
-import io
-import logging
-import re
-import sys
-
-
-def _make_polyfill_regex():
- polyfill_required_names = [
- '_',
- 'adopt_lock',
- 'async',
- 'chrono',
- 'condition_variable',
- 'condition_variable_any',
- 'cv_status',
- 'defer_lock',
- 'future',
- 'future_status',
- 'get_terminate',
- 'launch',
- 'lock_guard',
- 'mutex',
- 'notify_all_at_thread_exit',
- 'packaged_task',
- 'promise',
- 'recursive_mutex',
- 'set_terminate',
- 'shared_lock',
- 'shared_mutex',
- 'shared_timed_mutex',
- 'this_thread(?!::at_thread_exit)',
- 'thread',
- 'timed_mutex',
- 'try_to_lock',
- 'unique_lock',
- 'unordered_map',
- 'unordered_multimap',
- 'unordered_multiset',
- 'unordered_set',
- ]
-
- qualified_names = ['boost::' + name + "\\b" for name in polyfill_required_names]
- qualified_names.extend('std::' + name + "\\b" for name in polyfill_required_names)
- qualified_names_regex = '|'.join(qualified_names)
- return re.compile('(' + qualified_names_regex + ')')
-
-
-_RE_LINT = re.compile("//.*NOLINT")
-_RE_COMMENT_STRIP = re.compile("//.*")
-
-_RE_PATTERN_MONGO_POLYFILL = _make_polyfill_regex()
-_RE_UNSTRUCTURED_LOG = re.compile(r'\blogd\s*\(')
-_RE_COLLECTION_SHARDING_RUNTIME = re.compile(r'\bCollectionShardingRuntime\b')
-_RE_RAND = re.compile(r'\b(srand\(|rand\(\))')
-
-_RE_GENERIC_FCV_COMMENT = re.compile(r'\(Generic FCV reference\):')
-GENERIC_FCV = [
- r'::kLatest',
- r'::kLastContinuous',
- r'::kLastLTS',
- r'::kUpgradingFromLastLTSToLatest',
- r'::kUpgradingFromLastContinuousToLatest',
- r'::kDowngradingFromLatestToLastLTS',
- r'::kDowngradingFromLatestToLastContinuous',
- r'\.isUpgradingOrDowngrading',
- r'::kDowngradingFromLatestToLastContinuous',
- r'::kUpgradingFromLastLTSToLastContinuous',
-]
-_RE_GENERIC_FCV_REF = re.compile(r'(' + '|'.join(GENERIC_FCV) + r')\b')
-_RE_FEATURE_FLAG_IGNORE_FCV_CHECK_REF = re.compile(r'isEnabledAndIgnoreFCVUnsafe\(\)')
-_RE_FEATURE_FLAG_IGNORE_FCV_CHECK_COMMENT = re.compile(r'\(Ignore FCV check\)')
-_RE_HEADER = re.compile(r'\.(h|hpp)$')
-
-_CXX_COMPAT_HEADERS = [
- "assert", "ctype", "errno", "fenv", "float", "inttypes", "limits", "locale", "math", "setjmp",
- "signal", "stdarg", "stddef", "stdint", "stdio", "stdlib", "string", "time", "uchar", "wchar",
- "wctype"
-]
-
-# Successful matches `m` have a `m["base"]`, the basename of the file that was included.
-_RE_CXX_COMPAT_HEADERS = re.compile(
- rf'# *include *((<)|("))(?P{"|".join(_CXX_COMPAT_HEADERS)})\.h(?(2)>|")')
-
-
-class Linter:
- """Simple C++ Linter."""
-
- _license_header = '''\
-/**
- * Copyright (C) {year}-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * .
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */'''.splitlines()
-
- def __init__(self, file_name, raw_lines):
- """Create new linter."""
- self.file_name = file_name
- self.raw_lines = raw_lines
- self.clean_lines = []
- self.nolint_suppression = []
- self.generic_fcv_comments = []
- self.feature_flag_ignore_fcv_check_comments = []
- self._error_count = 0
-
- self.found_config_header = False
-
- def lint(self):
- """Run linter, returning error count."""
- # steps:
- # - Check for header
- # - Check for NOLINT and Strip multi line comments
- # - Run file-level checks
- # - Run per-line checks
-
- start_line = self._check_for_server_side_public_license()
-
- self._check_newlines()
- self._check_and_strip_comments()
-
- # File-level checks
- self._check_macro_definition_leaks()
-
- # Line-level checks
- for linenum in range(start_line, len(self.clean_lines)):
- if not self.clean_lines[linenum]:
- continue
-
- self._check_for_mongo_polyfill(linenum)
- self._check_for_mongo_unstructured_log(linenum)
- self._check_for_mongo_config_header(linenum)
- self._check_for_collection_sharding_runtime(linenum)
- self._check_for_rand(linenum)
- self._check_for_c_stdlib_headers(linenum)
-
- # Relax the rule of commenting generic FCV references for files directly related to FCV
- # implementations.
- if not "feature_compatibility_version" in self.file_name:
- self._check_for_generic_fcv(linenum)
-
- # Don't check feature_flag.h/cpp where the function is defined and test files.
- if not "feature_flag" in self.file_name and not "test" in self.file_name:
- self._check_for_feature_flag_ignore_fcv(linenum)
-
- return self._error_count
-
- def _check_newlines(self):
- """Check that each source file ends with a newline character."""
- if self.raw_lines and self.raw_lines[-1][-1:] != '\n':
- self._error(
- len(self.raw_lines), 'mongo/final_newline',
- 'Files must end with a newline character.')
-
- def _check_and_strip_comments(self):
- in_multi_line_comment = False
-
- for linenum in range(len(self.raw_lines)):
- clean_line = self.raw_lines[linenum]
-
- # Users can write NOLINT different ways
- # // NOLINT
- # // Some explanation NOLINT
- # so we need a regular expression
- if _RE_LINT.search(clean_line):
- self.nolint_suppression.append(linenum)
-
- if _RE_GENERIC_FCV_COMMENT.search(clean_line):
- self.generic_fcv_comments.append(linenum)
-
- if _RE_FEATURE_FLAG_IGNORE_FCV_CHECK_COMMENT.search(clean_line):
- self.feature_flag_ignore_fcv_check_comments.append(linenum)
-
- if not in_multi_line_comment:
- if "/*" in clean_line and not "*/" in clean_line:
- in_multi_line_comment = True
- clean_line = ""
-
- # Trim comments - approximately
- # Note, this does not understand if // is in a string
- # i.e. it will think URLs are also comments but this should be good enough to find
- # violators of the coding convention
- if "//" in clean_line:
- clean_line = _RE_COMMENT_STRIP.sub("", clean_line)
- else:
- if "*/" in clean_line:
- in_multi_line_comment = False
-
- clean_line = ""
-
- self.clean_lines.append(clean_line)
-
- def _check_macro_definition_leaks(self):
- """Some header macros should appear in define/undef pairs."""
- if not _RE_HEADER.search(self.file_name):
- return
- # Naive check: doesn't consider `#if` scoping.
- # Assumes an #undef matches the nearest #define.
- for macro in ['MONGO_LOGV2_DEFAULT_COMPONENT']:
- re_define = re.compile(fr"^\s*#\s*define\s+{macro}\b")
- re_undef = re.compile(fr"^\s*#\s*undef\s+{macro}\b")
- def_line = None
- for idx, line in enumerate(self.clean_lines):
- if def_line is None:
- if re_define.match(line):
- def_line = idx
- else:
- if re_undef.match(line):
- def_line = None
- if def_line is not None:
- self._error(def_line, 'mongodb/undefmacro', f'Missing "#undef {macro}"')
-
- def _check_for_mongo_polyfill(self, linenum):
- line = self.clean_lines[linenum]
- match = _RE_PATTERN_MONGO_POLYFILL.search(line)
- if match:
- self._error(
- linenum, 'mongodb/polyfill',
- 'Illegal use of banned name from std::/boost:: for "%s", use mongo::stdx:: variant instead'
- % (match.group(0)))
-
- def _check_for_mongo_unstructured_log(self, linenum):
- line = self.clean_lines[linenum]
- if _RE_UNSTRUCTURED_LOG.search(line) or 'doUnstructuredLogImpl' in line:
- self._error(
- linenum, 'mongodb/unstructuredlog', 'Illegal use of unstructured logging, '
- 'this is only for local development use and should not be committed.')
-
- def _check_for_collection_sharding_runtime(self, linenum):
- line = self.clean_lines[linenum]
- if _RE_COLLECTION_SHARDING_RUNTIME.search(
- line
- ) and "/src/mongo/db/s/" not in self.file_name and "_test.cpp" not in self.file_name:
- self._error(
- linenum, 'mongodb/collection_sharding_runtime', 'Illegal use of '
- 'CollectionShardingRuntime outside of mongo/db/s/; use CollectionShardingState '
- 'instead; see src/mongo/db/s/collection_sharding_state.h for details.')
-
- def _check_for_rand(self, linenum):
- line = self.clean_lines[linenum]
- if _RE_RAND.search(line):
- self._error(linenum, 'mongodb/rand',
- 'Use of rand or srand, use or PseudoRandom instead.')
-
- def _license_error(self, linenum, msg, category='legal/license'):
- style_url = 'https://github.com/mongodb/mongo/wiki/Server-Code-Style'
- self._error(linenum, category, '{} See {}'.format(msg, style_url))
- return (False, linenum)
-
- def _check_for_server_side_public_license(self):
- """Return the number of the line at which the check ended."""
- src_iter = (x.rstrip() for x in self.raw_lines)
- linenum = 0
- for linenum, lic_line in enumerate(self._license_header):
- src_line = next(src_iter, None)
- if src_line is None:
- self._license_error(linenum, 'Missing or incomplete license header.')
- return linenum
- lic_re = re.escape(lic_line).replace(r'\{year\}', r'\d{4}')
- if not re.fullmatch(lic_re, src_line):
- self._license_error(
- linenum, 'Incorrect license header.\n'
- ' Expected: "{}"\n'
- ' Received: "{}"\n'.format(lic_line, src_line))
- return linenum
-
- # Warn if SSPL appears in Enterprise code, which has a different license.
- expect_sspl_license = "enterprise" not in self.file_name
- if not expect_sspl_license:
- self._license_error(linenum,
- 'Incorrect license header found. Expected Enterprise license.',
- category='legal/enterprise_license')
- return linenum
- return linenum
-
- def _check_for_mongo_config_header(self, linenum):
- """Check for a config file."""
- if self.found_config_header:
- return
-
- line = self.clean_lines[linenum]
- self.found_config_header = line.startswith('#include "mongo/config.h"')
-
- if not self.found_config_header and "MONGO_CONFIG_" in line:
- self._error(linenum, 'build/config_h_include',
- 'MONGO_CONFIG define used without prior inclusion of config.h.')
-
- def _check_for_generic_fcv(self, linenum):
- line = self.clean_lines[linenum]
- if _RE_GENERIC_FCV_REF.search(line):
- # Find the first generic FCV comment preceding the current line.
- i = bisect.bisect_right(self.generic_fcv_comments, linenum)
- if not i or self.generic_fcv_comments[i - 1] < (linenum - 10):
- self._error(
- linenum, 'mongodb/fcv',
- 'Please add a comment containing "(Generic FCV reference):" within 10 lines ' +
- 'before the generic FCV reference.')
-
- def _check_for_c_stdlib_headers(self, linenum):
- line = self.clean_lines[linenum]
-
- if match := _RE_CXX_COMPAT_HEADERS.match(line):
- self._error(
- linenum, 'mongodb/headers',
- f"Prohibited include of C header '<{match['base']}.h>'. " \
- f"Include C++ header '' instead.")
-
- def _check_for_feature_flag_ignore_fcv(self, linenum):
- line = self.clean_lines[linenum]
- if _RE_FEATURE_FLAG_IGNORE_FCV_CHECK_REF.search(line):
- # Find the first ignore FCV check comment preceding the current line.
- i = bisect.bisect_right(self.feature_flag_ignore_fcv_check_comments, linenum)
- if not i or self.feature_flag_ignore_fcv_check_comments[i - 1] < (linenum - 10):
- self._error(
- linenum, 'mongodb/fcv',
- 'Please add a comment containing "(Ignore FCV check)":" within 10 lines ' +
- 'before the isEnabledAndIgnoreFCVUnsafe() function call explaining why ' +
- 'the FCV check is ignored.')
-
- def _error(self, linenum, category, message):
- if linenum in self.nolint_suppression:
- return
-
- norm_file_name = self.file_name.replace('\\', '/')
-
- # Custom clang-tidy check tests purposefully produce errors for
- # tests to find. They should be ignored.
- if "mongo_tidy_checks/tests/" in norm_file_name:
- return
-
- if category == "legal/license":
- # Enterprise module does not have the SSPL license
- if "enterprise" in self.file_name:
- return
-
- # The following files are in the src/mongo/ directory but technically belong
- # in src/third_party/ because their copyright does not belong to MongoDB.
- files_to_ignore = set([
- 'src/mongo/scripting/mozjs/PosixNSPR.cpp',
- 'src/mongo/shell/linenoise.cpp',
- 'src/mongo/shell/linenoise.h',
- 'src/mongo/shell/mk_wcwidth.cpp',
- 'src/mongo/shell/mk_wcwidth.h',
- 'src/mongo/util/md5.cpp',
- 'src/mongo/util/md5.h',
- 'src/mongo/util/md5main.cpp',
- 'src/mongo/util/net/ssl_stream.cpp',
- 'src/mongo/util/scopeguard.h',
- ])
-
- for file_to_ignore in files_to_ignore:
- if file_to_ignore in norm_file_name:
- return
-
- # We count internally from 0 but users count from 1 for line numbers
- print("Error: %s:%d - %s - %s" % (self.file_name, linenum + 1, category, message))
- self._error_count += 1
-
-
-def lint_file(file_name):
- """Lint file and print errors to console."""
- with io.open(file_name, encoding='utf-8') as file_stream:
- raw_lines = file_stream.readlines()
-
- linter = Linter(file_name, raw_lines)
- return linter.lint()
-
-
-def main():
- # type: () -> int
- """Execute Main Entry point."""
- parser = argparse.ArgumentParser(description='MongoDB Simple C++ Linter.')
-
- parser.add_argument('file', type=str, help="C++ input file")
-
- parser.add_argument('-v', '--verbose', action='count', help="Enable verbose tracing")
-
- args = parser.parse_args()
-
- if args.verbose:
- logging.basicConfig(level=logging.DEBUG)
-
- try:
- error_count = lint_file(args.file)
- if error_count != 0:
- print('File "{}" failed with {} errors.'.format(args.file, error_count))
- return 1
- return 0
- except Exception as ex: # pylint: disable=broad-except
- print('Exception while checking file "{}": {}'.format(args.file, ex))
- return 2
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/buildscripts/lldb/lldb_printers.py b/buildscripts/lldb/lldb_printers.py
index 0c7bfb2650f7d..0bdf74304b2e4 100644
--- a/buildscripts/lldb/lldb_printers.py
+++ b/buildscripts/lldb/lldb_printers.py
@@ -112,7 +112,7 @@ def StringDataPrinter(valobj, *_args): # pylint: disable=invalid-name
return 'nullptr'
size1 = valobj.GetChildMemberWithName("_size").GetValueAsUnsigned(0)
- return '"{}"'.format(valobj.GetProcess().ReadMemory(ptr, size1, lldb.SBError()).encode("utf-8"))
+ return '"{}"'.format(valobj.GetProcess().ReadMemory(ptr, size1, lldb.SBError()).decode("utf-8"))
def read_memory_as_hex(process, address, size):
diff --git a/buildscripts/package_test.py b/buildscripts/package_test.py
index 94a385519feba..b01d2280479ed 100644
--- a/buildscripts/package_test.py
+++ b/buildscripts/package_test.py
@@ -56,7 +56,7 @@
'amazon2': ('amazonlinux:2', "yum",
frozenset(["python", "python3", "wget", "pkgconfig", "systemd", "procps", "file"]),
"python3"),
- 'amazon2022': ('amazonlinux:2022', "yum",
+ 'amazon2023': ('amazonlinux:2023', "yum",
frozenset(
["python", "python3", "wget", "pkgconfig", "systemd", "procps", "file"]),
"python3"),
@@ -160,6 +160,7 @@ class Test:
python_command: str = dataclasses.field(default="", repr=False)
packages_urls: List[str] = dataclasses.field(default_factory=list)
packages_paths: List[Path] = dataclasses.field(default_factory=list)
+ attempts: int = dataclasses.field(default=0)
def __post_init__(self) -> None:
assert OS_DOCKER_LOOKUP[self.os_name] is not None
@@ -206,6 +207,20 @@ def join_commands(commands: List[str], sep: str = ' && ') -> str:
return sep.join(commands)
+def run_test_with_timeout(test: Test, client: DockerClient, timeout: int) -> Result:
+ start_time = time.time()
+ with futures.ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(run_test, test, client)
+ try:
+ result = future.result(timeout=timeout)
+ except futures.TimeoutError:
+ end_time = time.time()
+ logging.debug("Test %s timed out", test)
+ result = Result(status="fail", test_file=test.name(), start=start_time,
+ log_raw="test timed out", end=end_time, exit_code=1)
+ return result
+
+
def run_test(test: Test, client: DockerClient) -> Result:
result = Result(status="pass", test_file=test.name(), start=time.time(), log_raw="")
@@ -215,7 +230,6 @@ def run_test(test: Test, client: DockerClient) -> Result:
test_external_root = Path(__file__).parent.resolve()
logging.debug(test_external_root)
log_external_path = Path.joinpath(test_external_root, log_name)
-
commands: List[str] = ["export PYTHONIOENCODING=UTF-8"]
if test.os_name.startswith('rhel'):
@@ -307,10 +321,6 @@ def iterate_over_downloads() -> Generator[Dict[str, Any], None, None]:
def get_tools_package(arch_name: str, os_name: str) -> Optional[str]:
- # TODO: MONGOSH-1308 - we need to sub the arch alias until package
- # architectures are named consistently with the server packages
- if arch_name == "aarch64" and os_name != "amazon2":
- arch_name = "arm64"
for download in current_tools_releases["versions"][0]["downloads"]:
if download["name"] == os_name and download["arch"] == arch_name:
return download["package"]["url"]
@@ -318,6 +328,8 @@ def get_tools_package(arch_name: str, os_name: str) -> Optional[str]:
def get_mongosh_package(arch_name: str, os_name: str) -> Optional[str]:
+ # TODO: MONGOSH-1308 - we need to sub the arch alias until package
+ # architectures are named consistently with the server packages
if arch_name == "aarch64":
arch_name = "arm64"
if arch_name in ("x86_64", "amd64"):
@@ -361,6 +373,8 @@ def get_edition_alias(edition_name: str) -> str:
'Test packages on various hosts. This will spin up docker containers and test the installs.')
parser.add_argument("--arch", type=str, help="Arch of packages to test",
choices=["auto"] + list(arches), default="auto")
+parser.add_argument("-r", "--retries", type=int, help="Number of times to retry failed tests",
+ default=3)
subparsers = parser.add_subparsers(dest="command")
release_test_parser = subparsers.add_parser("release")
release_test_parser.add_argument(
@@ -530,17 +544,42 @@ def get_edition_alias(edition_name: str) -> str:
logging.warning("Skipping docker login")
report = Report(results=[], failures=0)
-with futures.ThreadPoolExecutor() as tpe:
- test_futures = [tpe.submit(run_test, test, docker_client) for test in tests]
- completed_tests = 0 # pylint: disable=invalid-name
- for f in futures.as_completed(test_futures):
- completed_tests += 1
- test_result = f.result()
- if test_result["exit_code"] != 0:
- report["failures"] += 1
-
- report["results"].append(test_result)
- logging.info("Completed %s/%s tests", completed_tests, len(test_futures))
+with futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as tpe:
+ # Set a timeout of 10mins timeout for a single test
+ SINGLE_TEST_TIMEOUT = 10 * 60
+ test_futures = {
+ tpe.submit(run_test_with_timeout, test, docker_client, SINGLE_TEST_TIMEOUT): test
+ for test in tests
+ }
+ completed_tests: int = 0
+ retried_tests: int = 0
+ total_tests: int = len(tests)
+ while len(test_futures.keys()) > 0:
+ finished_futures, active_futures = futures.wait(test_futures.keys(), timeout=None,
+ return_when="FIRST_COMPLETED")
+ for f in finished_futures:
+ completed_test = test_futures.pop(f)
+ test_result = f.result()
+ if test_result["exit_code"] != 0:
+ if completed_test.attempts < args.retries:
+ retried_tests += 1
+ completed_test.attempts += 1
+ test_futures[tpe.submit(run_test, completed_test,
+ docker_client)] = completed_test
+ continue
+ report["failures"] += 1
+
+ completed_tests += 1
+ report["results"].append(test_result)
+
+ logging.info(
+ "Completed %s tests, retried %s tests, total %s tests, %s tests are in progress.",
+ completed_tests, retried_tests, total_tests, len(test_futures))
+
+ # We are printing here to help diagnose hangs
+ # This adds a bit of logging so we are only going to log running tests after a test completes
+ for active_test in test_futures.values():
+ logging.info("Test in progress: %s", active_test)
with open("report.json", "w") as fh:
json.dump(report, fh)
diff --git a/buildscripts/package_test_internal.py b/buildscripts/package_test_internal.py
index b923a0e6f7714..43d7af8b4296a 100644
--- a/buildscripts/package_test_internal.py
+++ b/buildscripts/package_test_internal.py
@@ -259,23 +259,6 @@ def get_package_name(package_file: str) -> str:
def setup(test_args: TestArgs):
logging.info("Setting up test environment.")
- # TODO SERVER-70425: We can remove these once we have figured out why
- # packager.py sometimes uses distro files from older revisions.
- # Remove the PIDFile, PermissionsStartOnly, and Type configurations from
- # the systemd service file because they are not needed for simple-type
- # (non-forking) services and confuse the systemd emulator script.
- run_and_log("sed -Ei '/^PIDFile=|PermissionsStartOnly=|Type=/d' {}/mongod.service".format(
- test_args["systemd_units_dir"]))
- # Ensure RuntimeDirectory has been added to the systemd unit file.
- run_and_log("sed -Ei '/^ExecStart=.*/a RuntimeDirectory=mongodb' {}/mongod.service".format(
- test_args["systemd_units_dir"]))
- # Remove the journal: line (and the next) from mongod.conf, which is a
- # removed configuration. The Debian version of the config never got updated.
- run_and_log("sed -i '/journal:/,+1d' /etc/mongod.conf")
- # Remove fork: and pidFilePath: from mongod.conf because we want mongod to be
- # a non-forking service under systemd.
- run_and_log("sed -Ei '/fork:|pidFilePath:/d' /etc/mongod.conf")
-
# Ensure systemd doesn't try to start anything automatically so we can do
# it in our tests
run_and_log("mkdir -p /run/systemd/system")
@@ -332,6 +315,7 @@ def test_install_is_complete(test_args: TestArgs):
required_dirs = [
pathlib.Path('/run/mongodb'),
+ pathlib.Path('/var/run/mongodb'),
pathlib.Path(test_args['mongo_work_dir']),
] # type: List[pathlib.Path]
diff --git a/buildscripts/packager.py b/buildscripts/packager.py
index 98ab952d619e4..3c08420cd229d 100755
--- a/buildscripts/packager.py
+++ b/buildscripts/packager.py
@@ -31,6 +31,7 @@
import argparse
import errno
+import git
from glob import glob
import os
import re
@@ -44,7 +45,7 @@
ARCH_CHOICES = ["x86_64", "arm64", "aarch64", "s390x"]
# Made up names for the flavors of distribution we package for.
-DISTROS = ["suse", "debian", "redhat", "ubuntu", "amazon", "amazon2", "amazon2022"]
+DISTROS = ["suse", "debian", "redhat", "ubuntu", "amazon", "amazon2", "amazon2023"]
class Spec(object):
@@ -289,8 +290,8 @@ def repo_os_version(self, build_os):
return "2013.03"
elif self.dname == 'amazon2':
return "2017.12"
- elif self.dname == 'amazon2022':
- return "2022.0"
+ elif self.dname == 'amazon2023':
+ return "2023.0"
elif self.dname == 'ubuntu':
if build_os == 'ubuntu1204':
return "precise"
@@ -353,7 +354,7 @@ def build_os(self, arch):
"rhel55",
"rhel67",
]
- elif self.dname in ['amazon', 'amazon2', 'amazon2022']:
+ elif self.dname in ['amazon', 'amazon2', 'amazon2023']:
return [self.dname]
elif self.dname == 'ubuntu':
return [
@@ -381,8 +382,8 @@ def release_dist(self, build_os):
return 'amzn1'
elif self.dname == 'amazon2':
return 'amzn2'
- elif self.dname == 'amazon2022':
- return 'amzn2022'
+ elif self.dname == 'amazon2023':
+ return 'amzn2023'
return re.sub(r'^rh(el\d).*$', r'\1', build_os)
@@ -750,10 +751,31 @@ def write_debian_changelog(path, spec, srcdir):
os.chdir(srcdir)
preamble = ""
try:
+
+ git_repo = git.Repo(srcdir)
+ # get the original HEAD position of repo
+ head_commit_sha = git_repo.head.object.hexsha
+
+ # add and commit the uncommited changes
+ print("Commiting uncommited changes")
+ git_repo.git.add(all=True)
+ # only commit changes if there are any
+ if len(git_repo.index.diff("HEAD")) != 0:
+ with git_repo.git.custom_environment(GIT_COMMITTER_NAME="Evergreen",
+ GIT_COMMITTER_EMAIL="evergreen@mongodb.com"):
+ git_repo.git.commit("--author='Evergreen <>'", "-m", "temp commit")
+
+ # original command to preserve functionality
+ # FIXME: make consistent with the rest of the code when we have more packaging testing
+ print("Getting changelog for specified gitspec:", spec.metadata_gitspec())
sb = preamble + backtick([
"sh", "-c",
"git archive %s debian/changelog | tar xOf -" % spec.metadata_gitspec()
]).decode('utf-8')
+
+ # reset branch to original state
+ print("Resetting branch to original state")
+ git_repo.git.reset("--mixed", head_commit_sha)
finally:
os.chdir(oldcwd)
lines = sb.split("\n")
diff --git a/buildscripts/packager_enterprise.py b/buildscripts/packager_enterprise.py
index ea89dc94633e8..c4f1c2417fa74 100755
--- a/buildscripts/packager_enterprise.py
+++ b/buildscripts/packager_enterprise.py
@@ -28,6 +28,7 @@
# echo "Now put the dist gnupg signing keys in ~root/.gnupg"
import errno
+import git
from glob import glob
import os
import re
@@ -44,7 +45,7 @@
ARCH_CHOICES = ["x86_64", "ppc64le", "s390x", "arm64", "aarch64"]
# Made up names for the flavors of distribution we package for.
-DISTROS = ["suse", "debian", "redhat", "ubuntu", "amazon", "amazon2", "amazon2022"]
+DISTROS = ["suse", "debian", "redhat", "ubuntu", "amazon", "amazon2", "amazon2023"]
class EnterpriseSpec(packager.Spec):
@@ -144,8 +145,8 @@ def build_os(self, arch):
return ["rhel82", "rhel90"]
if self.dname == 'amazon2':
return ["amazon2"]
- if self.dname == 'amazon2022':
- return ["amazon2022"]
+ if self.dname == 'amazon2023':
+ return ["amazon2023"]
return []
if re.search("(redhat|fedora|centos)", self.dname):
@@ -251,12 +252,33 @@ def make_package(distro, build_os, arch, spec, srcdir):
# innocuous in the debianoids' sdirs).
for pkgdir in ["debian", "rpm"]:
print("Copying packaging files from %s to %s" % ("%s/%s" % (srcdir, pkgdir), sdir))
+ git_repo = git.Repo(srcdir)
+ # get the original HEAD position of repo
+ head_commit_sha = git_repo.head.object.hexsha
+
+ # add and commit the uncommited changes
+ print("Commiting uncommited changes")
+ git_repo.git.add(all=True)
+ # only commit changes if there are any
+ if len(git_repo.index.diff("HEAD")) != 0:
+ with git_repo.git.custom_environment(GIT_COMMITTER_NAME="Evergreen",
+ GIT_COMMITTER_EMAIL="evergreen@mongodb.com"):
+ git_repo.git.commit("--author='Evergreen <>'", "-m", "temp commit")
+
+ # original command to preserve functionality
+ # FIXME: make consistent with the rest of the code when we have more packaging testing
# FIXME: sh-dash-cee is bad. See if tarfile can do this.
+ print("Copying packaging files from specified gitspec:", spec.metadata_gitspec())
packager.sysassert([
"sh", "-c",
"(cd \"%s\" && git archive %s %s/ ) | (cd \"%s\" && tar xvf -)" %
(srcdir, spec.metadata_gitspec(), pkgdir, sdir)
])
+
+ # reset branch to original state
+ print("Resetting branch to original state")
+ git_repo.git.reset("--mixed", head_commit_sha)
+
# Splat the binaries under sdir. The "build" stages of the
# packaging infrastructure will move the files to wherever they
# need to go.
diff --git a/buildscripts/patch_builds/change_data.py b/buildscripts/patch_builds/change_data.py
index 44af0fca70b21..0d7fee884968b 100644
--- a/buildscripts/patch_builds/change_data.py
+++ b/buildscripts/patch_builds/change_data.py
@@ -87,7 +87,7 @@ def find_changed_files(repo: Repo, revision_map: Optional[RevisionMap] = None) -
work_tree_files = _modified_files_for_diff(diff, LOGGER.bind(diff="working tree diff"))
commit = repo.index
- diff = commit.diff(revision_map.get(repo.git_dir, repo.head.commit))
+ diff = commit.diff(revision_map.get(repo.git_dir, repo.head.commit), R=True)
index_files = _modified_files_for_diff(diff, LOGGER.bind(diff="index diff"))
untracked_files = set(repo.untracked_files)
diff --git a/buildscripts/quickcpplint.py b/buildscripts/quickcpplint.py
deleted file mode 100755
index 8d78a9c9d5c66..0000000000000
--- a/buildscripts/quickcpplint.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python3
-"""Extensible script to run one or more simple C++ Linters across a subset of files in parallel."""
-
-import argparse
-import logging
-import os
-import re
-import sys
-import threading
-from typing import List
-
-# Get relative imports to work when the package is not installed on the PYTHONPATH.
-if __name__ == "__main__" and __package__ is None:
- sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
-
-from buildscripts.linter import git # pylint: disable=wrong-import-position
-from buildscripts.linter import parallel # pylint: disable=wrong-import-position
-from buildscripts.linter import simplecpplint # pylint: disable=wrong-import-position
-
-FILES_RE = re.compile('\\.(h|cpp)$')
-
-
-def is_interesting_file(file_name: str) -> bool:
- """Return true if this file should be checked."""
- return (file_name.startswith("jstests")
- or file_name.startswith("src") and not file_name.startswith("src/third_party/")
- and not file_name.startswith("src/mongo/gotools/")
- # TODO SERVER-49805: These files should be generated at compile time.
- and not file_name == "src/mongo/db/cst/parser_gen.cpp") and FILES_RE.search(file_name)
-
-
-def _lint_files(file_names: List[str]) -> None:
- """Lint a list of files with clang-format."""
- run_lint1 = lambda param1: simplecpplint.lint_file(param1) == 0
- if not parallel.parallel_process([os.path.abspath(f) for f in file_names], run_lint1):
- print("ERROR: Code Style does not match coding style")
- sys.exit(1)
-
-
-def lint_patch(file_name: str) -> None:
- """Lint patch command entry point."""
- file_names = git.get_files_to_check_from_patch(file_name, is_interesting_file)
-
- # Patch may have files that we do not want to check which is fine
- if file_names:
- _lint_files(file_names)
-
-
-def lint(file_names: List[str]) -> None:
- # type: (str, Dict[str, str], List[str]) -> None
- """Lint files command entry point."""
- all_file_names = git.get_files_to_check(file_names, is_interesting_file)
-
- _lint_files(all_file_names)
-
-
-def lint_all(file_names: List[str]) -> None:
- # pylint: disable=unused-argument
- """Lint files command entry point based on working tree."""
- all_file_names = git.get_files_to_check_working_tree(is_interesting_file)
-
- _lint_files(all_file_names)
-
-
-def lint_my(origin_branch: List[str]) -> None:
- """Lint files command based on local changes."""
- files = git.get_my_files_to_check(is_interesting_file, origin_branch)
- files = [f for f in files if os.path.exists(f)]
-
- _lint_files(files)
-
-
-def main() -> None:
- """Execute Main entry point."""
-
- parser = argparse.ArgumentParser(description='Quick C++ Lint frontend.')
-
- parser.add_argument('-v', "--verbose", action='store_true', help="Enable verbose logging")
-
- sub = parser.add_subparsers(title="Linter subcommands", help="sub-command help")
-
- parser_lint = sub.add_parser('lint', help='Lint only Git files')
- parser_lint.add_argument("file_names", nargs="*", help="Globs of files to check")
- parser_lint.set_defaults(func=lint)
-
- parser_lint_all = sub.add_parser('lint-all', help='Lint All files')
- parser_lint_all.add_argument("file_names", nargs="*", help="Globs of files to check")
- parser_lint_all.set_defaults(func=lint_all)
-
- parser_lint_patch = sub.add_parser('lint-patch', help='Lint the files in a patch')
- parser_lint_patch.add_argument("file_names", nargs="*", help="Globs of files to check")
- parser_lint_patch.set_defaults(func=lint_patch)
-
- parser_lint_my = sub.add_parser('lint-my', help='Lint my files')
- parser_lint_my.add_argument("--branch", dest="file_names", default="origin/master",
- help="Branch to compare against")
- parser_lint_my.set_defaults(func=lint_my)
-
- args = parser.parse_args()
-
- if args.verbose:
- logging.basicConfig(level=logging.DEBUG)
-
- args.func(args.file_names)
-
-
-if __name__ == "__main__":
- main()
diff --git a/buildscripts/quickmongolint.py b/buildscripts/quickmongolint.py
new file mode 100755
index 0000000000000..53d3461df6ee9
--- /dev/null
+++ b/buildscripts/quickmongolint.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python3
+"""Extensible script to run one or more simple C++ Linters across a subset of files in parallel."""
+
+import argparse
+import logging
+import os
+import re
+import sys
+import threading
+from typing import List
+
+# Get relative imports to work when the package is not installed on the PYTHONPATH.
+if __name__ == "__main__" and __package__ is None:
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
+
+from buildscripts.linter import git # pylint: disable=wrong-import-position
+from buildscripts.linter import parallel # pylint: disable=wrong-import-position
+from buildscripts.linter import mongolint # pylint: disable=wrong-import-position
+
+FILES_RE = re.compile('\\.(h|cpp)$')
+
+
+def is_interesting_file(file_name: str) -> bool:
+ """Return true if this file should be checked."""
+ return (file_name.startswith("jstests")
+ or file_name.startswith("src") and not file_name.startswith("src/third_party/")
+ and not file_name.startswith("src/mongo/gotools/")
+ and not file_name.startswith("src/streams/third_party")
+ # TODO SERVER-49805: These files should be generated at compile time.
+ and not file_name == "src/mongo/db/cst/parser_gen.cpp") and FILES_RE.search(file_name)
+
+
+def _lint_files(file_names: List[str]) -> None:
+ """Lint a list of files with clang-format."""
+ run_lint1 = lambda param1: mongolint.lint_file(param1) == 0
+ if not parallel.parallel_process([os.path.abspath(f) for f in file_names], run_lint1):
+ print("ERROR: Code Style does not match coding style")
+ sys.exit(1)
+
+
+def lint_patch(file_name: str) -> None:
+ """Lint patch command entry point."""
+ file_names = git.get_files_to_check_from_patch(file_name, is_interesting_file)
+
+ # Patch may have files that we do not want to check which is fine
+ if file_names:
+ _lint_files(file_names)
+
+
+def lint(file_names: List[str]) -> None:
+ # type: (str, Dict[str, str], List[str]) -> None
+ """Lint files command entry point."""
+ all_file_names = git.get_files_to_check(file_names, is_interesting_file)
+
+ _lint_files(all_file_names)
+
+
+def lint_all(file_names: List[str]) -> None:
+ # pylint: disable=unused-argument
+ """Lint files command entry point based on working tree."""
+ all_file_names = git.get_files_to_check_working_tree(is_interesting_file)
+
+ _lint_files(all_file_names)
+
+
+def lint_my(origin_branch: List[str]) -> None:
+ """Lint files command based on local changes."""
+ files = git.get_my_files_to_check(is_interesting_file, origin_branch)
+ files = [f for f in files if os.path.exists(f)]
+
+ _lint_files(files)
+
+
+def main() -> None:
+ """Execute Main entry point."""
+
+ parser = argparse.ArgumentParser(description='Quick C++ Lint frontend.')
+
+ parser.add_argument('-v', "--verbose", action='store_true', help="Enable verbose logging")
+
+ sub = parser.add_subparsers(title="Linter subcommands", help="sub-command help")
+
+ parser_lint = sub.add_parser('lint', help='Lint only Git files')
+ parser_lint.add_argument("file_names", nargs="*", help="Globs of files to check")
+ parser_lint.set_defaults(func=lint)
+
+ parser_lint_all = sub.add_parser('lint-all', help='Lint All files')
+ parser_lint_all.add_argument("file_names", nargs="*", help="Globs of files to check")
+ parser_lint_all.set_defaults(func=lint_all)
+
+ parser_lint_patch = sub.add_parser('lint-patch', help='Lint the files in a patch')
+ parser_lint_patch.add_argument("file_names", nargs="*", help="Globs of files to check")
+ parser_lint_patch.set_defaults(func=lint_patch)
+
+ parser_lint_my = sub.add_parser('lint-my', help='Lint my files')
+ parser_lint_my.add_argument("--branch", dest="file_names", default="origin/master",
+ help="Branch to compare against")
+ parser_lint_my.set_defaults(func=lint_my)
+
+ args = parser.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+
+ args.func(args.file_names)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/buildscripts/resmoke_proxy/resmoke_proxy.py b/buildscripts/resmoke_proxy/resmoke_proxy.py
index 42ceeb6c7575d..0074a47c23494 100644
--- a/buildscripts/resmoke_proxy/resmoke_proxy.py
+++ b/buildscripts/resmoke_proxy/resmoke_proxy.py
@@ -41,4 +41,4 @@ def read_suite_config(self, suite_name: str) -> Dict[str, Any]:
:param suite_name: Name of suite to read.
:return: Configuration of specified suite.
"""
- return self._suite_config.SuiteFinder.get_config_obj(suite_name)
+ return self._suite_config.SuiteFinder.get_config_obj_no_verify(suite_name)
diff --git a/buildscripts/resmokeconfig/evg_task_doc/evg_task_doc.yml b/buildscripts/resmokeconfig/evg_task_doc/evg_task_doc.yml
index 31387cd70548a..05248400df4c6 100644
--- a/buildscripts/resmokeconfig/evg_task_doc/evg_task_doc.yml
+++ b/buildscripts/resmokeconfig/evg_task_doc/evg_task_doc.yml
@@ -40,10 +40,3 @@ cqf_parallel: |-
parallel. The optimizer will use a default degree of parallelism of 5. Tests
in this suite are _forced_ to use the new query optimizer by using the server
parameter 'internalQueryFrameworkControl': "forceBonsai".
-
-cqf_passthrough: |-
- A passthrough suite of the tests in the core suite, but attempting usage of
- the new optimizer and the CQF framework using the server parameter
- internalQueryFrameworkControl: "tryBonsai". Queries that we believe should be
- able to correctly use the new optimizer will be routed via that path and
- should return the same results.
diff --git a/buildscripts/resmokeconfig/feature_flag_test.idl b/buildscripts/resmokeconfig/feature_flag_test.idl
new file mode 100644
index 0000000000000..763b0f4c8bcb6
--- /dev/null
+++ b/buildscripts/resmokeconfig/feature_flag_test.idl
@@ -0,0 +1,8 @@
+# This file is meant to add any feature flags needed for testing to
+# all_feature_flags.txt
+feature_flags:
+ featureFlagToaster:
+ description: "Create a feature flag"
+ cpp_varname: gFeatureFlagToaster
+ default: false
+ shouldBeFCVGated: true
diff --git a/buildscripts/resmokeconfig/fully_disabled_feature_flags.yml b/buildscripts/resmokeconfig/fully_disabled_feature_flags.yml
index ab21c338c8143..3a89f933deb2c 100644
--- a/buildscripts/resmokeconfig/fully_disabled_feature_flags.yml
+++ b/buildscripts/resmokeconfig/fully_disabled_feature_flags.yml
@@ -1,10 +1,12 @@
# Feature flags here are turned off even on the "all feature flags" build variants.
+# Feature flags here disable jstests that are tagged with these feature flags on all variants.
#
# These flags can be enabled on a per-task or per-build-variant basis
# by modifying their respective definitions in evergreen.yml.
- featureFlagFryer
- featureFlagCommonQueryFramework
+- featureFlagSearchInSbe
# This flag exists to help users in managed environments that upgraded to 6.0 before 6.0.0-rc8 was
# released create the transactions collection index and is only meant to be enabled adhoc, so only
# its targeted tests should enable it.
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_expression_multiversion_fuzzer_last_continuous.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_expression_multiversion_fuzzer_last_continuous.yml
new file mode 100644
index 0000000000000..dafecc971b156
--- /dev/null
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_expression_multiversion_fuzzer_last_continuous.yml
@@ -0,0 +1,26 @@
+##########################################################
+# THIS IS A GENERATED FILE -- DO NOT MODIFY.
+# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE
+# AND REGENERATE THE MATRIX SUITES.
+#
+# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_expression_multiversion_fuzzer_last_continuous.yml
+# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites
+##########################################################
+executor:
+ archive:
+ tests: true
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ clusterType: standalone
+ internalQueryAppendIdToSetWindowFieldsSort: true
+ internalQueryMaxAllowedDensifyDocs: 1000
+ traceExceptions: false
+ useRandomBinVersionsWithinReplicaSet: last-continuous
+ nodb: ''
+matrix_suite: true
+selector:
+ roots:
+ - jstestfuzz/out/*.js
+test_kind: js_test
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_expression_multiversion_fuzzer_last_lts.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_expression_multiversion_fuzzer_last_lts.yml
new file mode 100644
index 0000000000000..a56d13ced99e4
--- /dev/null
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_expression_multiversion_fuzzer_last_lts.yml
@@ -0,0 +1,26 @@
+##########################################################
+# THIS IS A GENERATED FILE -- DO NOT MODIFY.
+# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE
+# AND REGENERATE THE MATRIX SUITES.
+#
+# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_expression_multiversion_fuzzer_last_lts.yml
+# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites
+##########################################################
+executor:
+ archive:
+ tests: true
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ clusterType: standalone
+ internalQueryAppendIdToSetWindowFieldsSort: true
+ internalQueryMaxAllowedDensifyDocs: 1000
+ traceExceptions: false
+ useRandomBinVersionsWithinReplicaSet: last-lts
+ nodb: ''
+matrix_suite: true
+selector:
+ roots:
+ - jstestfuzz/out/*.js
+test_kind: js_test
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_multiversion_fuzzer_last_continuous.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_multiversion_fuzzer_last_continuous.yml
new file mode 100644
index 0000000000000..76da7c376d82f
--- /dev/null
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_multiversion_fuzzer_last_continuous.yml
@@ -0,0 +1,26 @@
+##########################################################
+# THIS IS A GENERATED FILE -- DO NOT MODIFY.
+# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE
+# AND REGENERATE THE MATRIX SUITES.
+#
+# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_continuous.yml
+# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites
+##########################################################
+executor:
+ archive:
+ tests: true
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ clusterType: standalone
+ internalQueryAppendIdToSetWindowFieldsSort: true
+ internalQueryMaxAllowedDensifyDocs: 1000
+ traceExceptions: false
+ useRandomBinVersionsWithinReplicaSet: last-continuous
+ nodb: ''
+matrix_suite: true
+selector:
+ roots:
+ - jstestfuzz/out/*.js
+test_kind: js_test
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_multiversion_fuzzer_last_lts.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_multiversion_fuzzer_last_lts.yml
new file mode 100644
index 0000000000000..d96066d3f3c89
--- /dev/null
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_multiversion_fuzzer_last_lts.yml
@@ -0,0 +1,26 @@
+##########################################################
+# THIS IS A GENERATED FILE -- DO NOT MODIFY.
+# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE
+# AND REGENERATE THE MATRIX SUITES.
+#
+# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_lts.yml
+# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites
+##########################################################
+executor:
+ archive:
+ tests: true
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ clusterType: standalone
+ internalQueryAppendIdToSetWindowFieldsSort: true
+ internalQueryMaxAllowedDensifyDocs: 1000
+ traceExceptions: false
+ useRandomBinVersionsWithinReplicaSet: last-lts
+ nodb: ''
+matrix_suite: true
+selector:
+ roots:
+ - jstestfuzz/out/*.js
+test_kind: js_test
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_continuous_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_continuous_new_old_old_new.yml
index 7c47c5e43b735..727c4fe27fb6d 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_continuous_new_old_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_continuous_new_old_old_new.yml
@@ -14,8 +14,8 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
+ globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');
global_vars:
@@ -67,6 +67,7 @@ executor:
global_vars:
TestData:
checkCollectionCounts: true
+ - class: CheckOrphansDeleted
matrix_suite: true
selector:
exclude_files:
@@ -264,7 +265,6 @@ selector:
- requires_fastcount
- requires_dbstats
- requires_collstats
- - requires_datasize
- uses_parallel_shell
- requires_profiling
- requires_capped
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_lts_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_lts_new_old_old_new.yml
index 8d61f758dc6ef..33dce07ddfd08 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_lts_new_old_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_lts_new_old_old_new.yml
@@ -14,8 +14,8 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
+ globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');
global_vars:
@@ -67,6 +67,7 @@ executor:
global_vars:
TestData:
checkCollectionCounts: true
+ - class: CheckOrphansDeleted
matrix_suite: true
selector:
exclude_files:
@@ -264,7 +265,6 @@ selector:
- requires_fastcount
- requires_dbstats
- requires_collstats
- - requires_datasize
- uses_parallel_shell
- requires_profiling
- requires_capped
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_new_old.yml
index 656a7bf7ce6ea..60137d4a6b5b4 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_new_old.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_new_old.yml
@@ -14,7 +14,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
TestData:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_old_new.yml
index 041668ff85c98..73dd55ef97d21 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_old_new.yml
@@ -14,7 +14,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
TestData:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_old_new_new.yml
index 33c5cce1bdc36..05564943ca2e9 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_old_new_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_old_new_new.yml
@@ -14,7 +14,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
TestData:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_new_old.yml
index ecc72c60c50b2..08fe65930071a 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_new_old.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_new_old.yml
@@ -14,7 +14,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
TestData:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_old_new.yml
index 676fd1aa10449..fbf521649bc65 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_old_new.yml
@@ -14,7 +14,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
TestData:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_old_new_new.yml
index 55b87cc488f53..fc6931e5cc5c5 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_old_new_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_old_new_new.yml
@@ -14,7 +14,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
TestData:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_mongos_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_mongos_passthrough.yml
index ab7e555698e38..108709aeddd25 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_mongos_passthrough.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_mongos_passthrough.yml
@@ -13,7 +13,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
TestData:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads.yml
index 036057bca395f..877576cec07f1 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads.yml
@@ -13,7 +13,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/set_read_preference_secondary.js');
global_vars:
TestData:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads_sharded_collections.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads_sharded_collections.yml
index 08f6e22b50c16..0d1225031554a 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads_sharded_collections.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads_sharded_collections.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/set_read_preference_secondary.js');; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');
global_vars:
TestData:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough.yml
index c7332c0994dd1..121a2c86c0bfd 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough.yml
@@ -13,7 +13,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');;
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_continuous_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_continuous_new_old_old_new.yml
index 8f3c7d4832f88..a51fe721cb6aa 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_continuous_new_old_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_continuous_new_old_old_new.yml
@@ -13,7 +13,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');;
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_lts_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_lts_new_old_old_new.yml
index 4eb5c298b3c3c..82e43b5019eff 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_lts_new_old_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_lts_new_old_old_new.yml
@@ -13,7 +13,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');;
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_mongos_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_mongos_passthrough.yml
index 03918906052fd..15d266e0e9e41 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_mongos_passthrough.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_mongos_passthrough.yml
@@ -13,7 +13,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/implicit_whole_cluster_changestreams.js');;
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_passthrough.yml
index 5e807bebd0609..05d7c1b13f3f6 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_passthrough.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_passthrough.yml
@@ -13,7 +13,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/set_read_preference_secondary.js');; load('jstests/libs/override_methods/implicit_whole_cluster_changestreams.js');
global_vars:
TestData:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_sharded_collections.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_sharded_collections.yml
index ddf91d4b209e7..cf00d0918ee1e 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_sharded_collections.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_sharded_collections.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/set_read_preference_secondary.js');; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');;
load('jstests/libs/override_methods/implicit_whole_cluster_changestreams.js');
global_vars:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_sharded_collections_passthrough.yml
index 246163ed62dfb..8c0562accab8b 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_sharded_collections_passthrough.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_sharded_collections_passthrough.yml
@@ -13,7 +13,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');;
load('jstests/libs/override_methods/implicit_whole_cluster_changestreams.js');;
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_mongos_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_mongos_passthrough.yml
index de8dbcb712499..86e726388497d 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_mongos_passthrough.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_mongos_passthrough.yml
@@ -13,7 +13,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/implicit_whole_db_changestreams.js');;
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_passthrough.yml
index 8606ce9dd7df4..ecd9d919bac99 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_passthrough.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_passthrough.yml
@@ -13,7 +13,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/set_read_preference_secondary.js');; load('jstests/libs/override_methods/implicit_whole_db_changestreams.js');
global_vars:
TestData:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_sharded_collections.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_sharded_collections.yml
index 7e623dcce0894..cac632e5fa6c0 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_sharded_collections.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_sharded_collections.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/set_read_preference_secondary.js');; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');;
load('jstests/libs/override_methods/implicit_whole_db_changestreams.js');
global_vars:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_sharded_collections_passthrough.yml
index ecde23d670d97..40bb6ecebe8a9 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_sharded_collections_passthrough.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_sharded_collections_passthrough.yml
@@ -13,7 +13,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');;
load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');;
load('jstests/libs/override_methods/implicit_whole_db_changestreams.js');;
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_new_old.yml
index 4837221913ccf..e11d04b36d3bb 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_new_old.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_new_old.yml
@@ -23,6 +23,8 @@ executor:
oplogSize: 1024
set_parameters:
enableTestCommands: 1
+ queryAnalysisSamplerConfigurationRefreshSecs: 1
+ queryAnalysisWriterIntervalSecs: 1
roleGraphInvalidationIsFatal: 1
num_nodes: 3
old_bin_version: last_continuous
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_old_new.yml
index 14d2327e9de20..295e825e543ad 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_old_new.yml
@@ -23,6 +23,8 @@ executor:
oplogSize: 1024
set_parameters:
enableTestCommands: 1
+ queryAnalysisSamplerConfigurationRefreshSecs: 1
+ queryAnalysisWriterIntervalSecs: 1
roleGraphInvalidationIsFatal: 1
num_nodes: 3
old_bin_version: last_continuous
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_old_new_new.yml
index e1c4ab6f997b6..afb80f07cf140 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_old_new_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_old_new_new.yml
@@ -23,6 +23,8 @@ executor:
oplogSize: 1024
set_parameters:
enableTestCommands: 1
+ queryAnalysisSamplerConfigurationRefreshSecs: 1
+ queryAnalysisWriterIntervalSecs: 1
roleGraphInvalidationIsFatal: 1
num_nodes: 3
old_bin_version: last_continuous
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_new_old.yml
index 33daa228cc190..3394b06632435 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_new_old.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_new_old.yml
@@ -23,6 +23,8 @@ executor:
oplogSize: 1024
set_parameters:
enableTestCommands: 1
+ queryAnalysisSamplerConfigurationRefreshSecs: 1
+ queryAnalysisWriterIntervalSecs: 1
roleGraphInvalidationIsFatal: 1
num_nodes: 3
old_bin_version: last_lts
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_old_new.yml
index c4b70373bf7d7..0ccf44385ad72 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_old_new.yml
@@ -23,6 +23,8 @@ executor:
oplogSize: 1024
set_parameters:
enableTestCommands: 1
+ queryAnalysisSamplerConfigurationRefreshSecs: 1
+ queryAnalysisWriterIntervalSecs: 1
roleGraphInvalidationIsFatal: 1
num_nodes: 3
old_bin_version: last_lts
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_old_new_new.yml
index 287fc21d3c303..c7677f3a944fd 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_old_new_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_old_new_new.yml
@@ -23,6 +23,8 @@ executor:
oplogSize: 1024
set_parameters:
enableTestCommands: 1
+ queryAnalysisSamplerConfigurationRefreshSecs: 1
+ queryAnalysisWriterIntervalSecs: 1
roleGraphInvalidationIsFatal: 1
num_nodes: 3
old_bin_version: last_lts
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_continuous_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_continuous_new_old_old_new.yml
index 1ceb5f6c3db7c..efa47665c93aa 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_continuous_new_old_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_continuous_new_old_old_new.yml
@@ -26,11 +26,12 @@ executor:
mongod_options:
set_parameters:
enableTestCommands: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
+ queryAnalysisWriterIntervalSecs: 1
roleGraphInvalidationIsFatal: 1
mongos_options:
set_parameters:
enableTestCommands: 1
+ queryAnalysisSamplerConfigurationRefreshSecs: 1
num_mongos: 2
num_rs_nodes_per_shard: 2
num_shards: 2
@@ -39,12 +40,12 @@ executor:
mongod_options:
oplogSize: 1024
hooks:
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHashInBackground
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections
- class: CleanupConcurrencyWorkloads
matrix_suite: true
@@ -81,11 +82,13 @@ selector:
- jstests/concurrency/fsm_workloads/yield_and_hashed.js
- jstests/concurrency/fsm_workloads/yield_and_sorted.js
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
- jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
- jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js
- jstests/concurrency/fsm_workloads/create_timeseries_collection.js
- jstests/concurrency/fsm_workloads/create_collection_and_view.js
+ - jstests/concurrency/fsm_workloads/map_reduce_drop.js
exclude_with_any_tags:
- requires_replication
- assumes_balancer_on
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_lts_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_lts_new_old_old_new.yml
index d2f377dae017c..826c8a8d15128 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_lts_new_old_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_lts_new_old_old_new.yml
@@ -26,11 +26,12 @@ executor:
mongod_options:
set_parameters:
enableTestCommands: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
+ queryAnalysisWriterIntervalSecs: 1
roleGraphInvalidationIsFatal: 1
mongos_options:
set_parameters:
enableTestCommands: 1
+ queryAnalysisSamplerConfigurationRefreshSecs: 1
num_mongos: 2
num_rs_nodes_per_shard: 2
num_shards: 2
@@ -39,12 +40,12 @@ executor:
mongod_options:
oplogSize: 1024
hooks:
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHashInBackground
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections
- class: CleanupConcurrencyWorkloads
matrix_suite: true
@@ -81,11 +82,13 @@ selector:
- jstests/concurrency/fsm_workloads/yield_and_hashed.js
- jstests/concurrency/fsm_workloads/yield_and_sorted.js
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
- jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
- jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js
- jstests/concurrency/fsm_workloads/create_timeseries_collection.js
- jstests/concurrency/fsm_workloads/create_collection_and_view.js
+ - jstests/concurrency/fsm_workloads/map_reduce_drop.js
exclude_with_any_tags:
- requires_replication
- assumes_balancer_on
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_ese_gcm.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_ese_gcm.yml
index e356846269ad8..f5e86ccf17926 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_ese_gcm.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_ese_gcm.yml
@@ -35,6 +35,7 @@ matrix_suite: true
selector:
exclude_files:
- jstests/core/txns/**/*.js
+ - jstests/core/queryable_encryption/**/*.js
exclude_with_any_tags:
- does_not_support_encrypted_storage_engine
roots:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_min_batch_repeat_queries_ese_gsm.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_min_batch_repeat_queries_ese_gsm.yml
index d4d1df4da3441..75094c6e83845 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_min_batch_repeat_queries_ese_gsm.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_min_batch_repeat_queries_ese_gsm.yml
@@ -42,6 +42,7 @@ matrix_suite: true
selector:
exclude_files:
- jstests/core/txns/**/*.js
+ - jstests/core/queryable_encryption/**/*.js
- jstests/core/**/profile1.js
- jstests/core/**/profile2.js
- jstests/core/**/find9.js
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_minimum_batch_size.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_minimum_batch_size.yml
index 44cfc82076c57..0bc508270977d 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_minimum_batch_size.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_minimum_batch_size.yml
@@ -36,6 +36,7 @@ matrix_suite: true
selector:
exclude_files:
- jstests/core/txns/**/*.js
+ - jstests/core/queryable_encryption/**/*.js
- jstests/core/**/profile1.js
- jstests/core/**/profile2.js
- jstests/core/**/find9.js
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_repeat_queries.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_repeat_queries.yml
index 3254155d4a44d..c455f4f1e78c4 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_repeat_queries.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_repeat_queries.yml
@@ -33,6 +33,7 @@ matrix_suite: true
selector:
exclude_files:
- jstests/core/txns/**/*.js
+ - jstests/core/queryable_encryption/**/*.js
exclude_with_any_tags:
- does_not_support_repeated_reads
- requires_profiling
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/generational_fuzzer_last_continuous.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/generational_fuzzer_last_continuous.yml
deleted file mode 100644
index 12e35d0acd72c..0000000000000
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/generational_fuzzer_last_continuous.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-##########################################################
-# THIS IS A GENERATED FILE -- DO NOT MODIFY.
-# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE
-# AND REGENERATE THE MATRIX SUITES.
-#
-# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/generational_fuzzer_last_continuous.yml
-# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites
-##########################################################
-executor:
- archive:
- tests: true
- config:
- shell_options:
- global_vars:
- TestData:
- clusterType: standalone
- internalQueryAppendIdToSetWindowFieldsSort: true
- internalQueryMaxAllowedDensifyDocs: 1000
- traceExceptions: false
- useRandomBinVersionsWithinReplicaSet: last-continuous
- nodb: ''
-matrix_suite: true
-selector:
- roots:
- - jstestfuzz/out/*.js
-test_kind: js_test
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/generational_fuzzer_last_lts.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/generational_fuzzer_last_lts.yml
deleted file mode 100644
index 6dd4fbd42d072..0000000000000
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/generational_fuzzer_last_lts.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-##########################################################
-# THIS IS A GENERATED FILE -- DO NOT MODIFY.
-# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE
-# AND REGENERATE THE MATRIX SUITES.
-#
-# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/generational_fuzzer_last_lts.yml
-# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites
-##########################################################
-executor:
- archive:
- tests: true
- config:
- shell_options:
- global_vars:
- TestData:
- clusterType: standalone
- internalQueryAppendIdToSetWindowFieldsSort: true
- internalQueryMaxAllowedDensifyDocs: 1000
- traceExceptions: false
- useRandomBinVersionsWithinReplicaSet: last-lts
- nodb: ''
-matrix_suite: true
-selector:
- roots:
- - jstestfuzz/out/*.js
-test_kind: js_test
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_auth_future_git_tag.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_auth_future_git_tag.yml
index e66071c2edbff..69b4e10574dc0 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_auth_future_git_tag.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_auth_future_git_tag.yml
@@ -26,7 +26,6 @@ selector:
exclude_files:
- jstests/multiVersion/libs/*.js
- jstests/multiVersion/targetedTestsLastContinuousFeatures/*.js
- - jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js
- jstests/multiVersion/genericBinVersion/minor_version_tags_new_old_new.js
- jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js
- jstests/multiVersion/genericBinVersion/load_keys_on_upgrade.js
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_future_git_tag.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_future_git_tag.yml
index ad0315a7904d0..027e53ed8713d 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_future_git_tag.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_future_git_tag.yml
@@ -19,7 +19,6 @@ selector:
- jstests/multiVersion/libs/*.js
- jstests/multiVersion/targetedTestsLastContinuousFeatures/*.js
- jstests/multiVersion/targetedTestsLastLtsFeatures/*.js
- - jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js
- jstests/multiVersion/genericBinVersion/minor_version_tags_new_old_new.js
- jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js
exclude_with_any_tags:
@@ -28,4 +27,5 @@ selector:
roots:
- jstests/multiVersion/**/*.js
- src/mongo/db/modules/*/jstests/hot_backups/multiVersion/*.js
+ - src/mongo/db/modules/*/jstests/audit/multiVersion/*.js
test_kind: js_test
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check.yml
index ac95ff9541252..4e4dcef6d1d06 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mongod_options:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_new_old.yml
index 877a942d4e40c..a92d992343b3a 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_new_old.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_new_old.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mixed_bin_versions: new_new_old
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_old_new.yml
index b67ec92d8bc7e..8eb95ae671da2 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_old_new.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mixed_bin_versions: new_old_new
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_old_new_new.yml
index efca99bdd3993..d02aaaae8f9d2 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_old_new_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_old_new_new.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mixed_bin_versions: old_new_new
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_new_old.yml
index 01a7b6b18bf76..608d3b6f8cdb8 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_new_old.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_new_old.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mixed_bin_versions: new_new_old
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_old_new.yml
index 3432c5dc7894f..4c117a655456b 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_old_new.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mixed_bin_versions: new_old_new
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_old_new_new.yml
index b3bb9d3e5d0f3..7dc4e7ed24c48 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_old_new_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_old_new_new.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mixed_bin_versions: old_new_new
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_new_old.yml
index 7a284924c02c5..9f490b1336d73 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_new_old.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_new_old.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mixed_bin_versions: new_new_old
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_old_new.yml
index 2625d13d68e79..2033ca4fe566d 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_old_new.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mixed_bin_versions: new_old_new
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_old_new_new.yml
index 0e1ec0d866302..1d365c4581e13 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_old_new_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_old_new_new.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mixed_bin_versions: old_new_new
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_new_old.yml
index c0cc6f9692ac5..d95c4576ba254 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_new_old.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_new_old.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mixed_bin_versions: new_new_old
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_old_new.yml
index 1e486636b9bd1..68c5ca18a1821 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_old_new.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mixed_bin_versions: new_old_new
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_old_new_new.yml
index 9d3429adf3ac9..5066c58c90d5c 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_old_new_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_old_new_new.yml
@@ -16,7 +16,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: testingReplication = true;
+ eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mixed_bin_versions: old_new_new
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_kill_primary_jscore_passthrough.yml
index d9f2b8d04af6d..2819fdd620c47 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_kill_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_kill_primary_jscore_passthrough.yml
@@ -15,8 +15,8 @@ executor:
tests: true
config:
shell_options:
- eval: testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
+ globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
global_vars:
@@ -111,7 +111,6 @@ selector:
- requires_fastcount
- requires_dbstats
- requires_collstats
- - requires_datasize
- operations_longer_than_stepdown_interval
- uses_parallel_shell
roots:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_reconfig_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_reconfig_kill_primary_jscore_passthrough.yml
index 159e3b130379d..049bb83d682c2 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_reconfig_kill_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_reconfig_kill_primary_jscore_passthrough.yml
@@ -15,8 +15,8 @@ executor:
tests: true
config:
shell_options:
- eval: testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
+ globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
global_vars:
@@ -117,7 +117,6 @@ selector:
- requires_fastcount
- requires_dbstats
- requires_collstats
- - requires_datasize
- operations_longer_than_stepdown_interval
- uses_parallel_shell
roots:
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_continuous.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_continuous.yml
index f435b3daa7bb4..a0ab97003b76b 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_continuous.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_continuous.yml
@@ -15,8 +15,8 @@ executor:
tests: true
config:
shell_options:
- eval: testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
+ globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
global_vars:
@@ -107,7 +107,6 @@ selector:
- requires_fastcount
- requires_dbstats
- requires_collstats
- - requires_datasize
- operations_longer_than_stepdown_interval
- uses_parallel_shell
- cannot_run_during_upgrade_downgrade
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_lts.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_lts.yml
index bede1bc20109c..d5e5f7ff61c7d 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_lts.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_lts.yml
@@ -15,8 +15,8 @@ executor:
tests: true
config:
shell_options:
- eval: testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
+ globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
global_vars:
@@ -107,7 +107,6 @@ selector:
- requires_fastcount
- requires_dbstats
- requires_collstats
- - requires_datasize
- operations_longer_than_stepdown_interval
- uses_parallel_shell
- cannot_run_during_upgrade_downgrade
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_continuous_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_continuous_new_old_old_new.yml
index f9675fc9510e6..a2d49f3c5579d 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_continuous_new_old_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_continuous_new_old_old_new.yml
@@ -32,6 +32,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
matrix_suite: true
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_lts_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_lts_new_old_old_new.yml
index fd2562ac8b6c6..de194b1018bce 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_lts_new_old_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_lts_new_old_old_new.yml
@@ -32,6 +32,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
matrix_suite: true
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_continuous_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_continuous_new_old_old_new.yml
index 2887689392f34..844c85ce63074 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_continuous_new_old_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_continuous_new_old_old_new.yml
@@ -15,8 +15,8 @@ executor:
tests: true
config:
shell_options:
- eval: testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
+ globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
global_vars:
@@ -73,6 +73,7 @@ executor:
- class: CheckReplOplogs
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
matrix_suite: true
@@ -115,6 +116,7 @@ selector:
- jstests/core/**/geo_s2sparse.js
- jstests/core/**/mixed_version_replica_set.js
- jstests/core/timeseries/timeseries_merge.js
+ - jstests/core/**/command_let_variables.js
exclude_with_any_tags:
- assumes_against_mongod_not_mongos
- assumes_standalone_mongod
@@ -132,13 +134,10 @@ selector:
- requires_fastcount
- requires_dbstats
- requires_collstats
- - requires_datasize
- operations_longer_than_stepdown_interval
- uses_parallel_shell
- cannot_run_during_upgrade_downgrade
- requires_timeseries
roots:
- jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
test_kind: js_test
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_lts_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_lts_new_old_old_new.yml
index 5091d0f011dca..7251d5511a217 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_lts_new_old_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_lts_new_old_old_new.yml
@@ -15,8 +15,8 @@ executor:
tests: true
config:
shell_options:
- eval: testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
+ eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js');
+ globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
global_vars:
@@ -73,6 +73,7 @@ executor:
- class: CheckReplOplogs
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
matrix_suite: true
@@ -115,6 +116,7 @@ selector:
- jstests/core/**/geo_s2sparse.js
- jstests/core/**/mixed_version_replica_set.js
- jstests/core/timeseries/timeseries_merge.js
+ - jstests/core/**/command_let_variables.js
exclude_with_any_tags:
- assumes_against_mongod_not_mongos
- assumes_standalone_mongod
@@ -132,13 +134,10 @@ selector:
- requires_fastcount
- requires_dbstats
- requires_collstats
- - requires_datasize
- operations_longer_than_stepdown_interval
- uses_parallel_shell
- cannot_run_during_upgrade_downgrade
- requires_timeseries
roots:
- jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
test_kind: js_test
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_auth_audit.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_auth_audit.yml
index 5d92678604505..fe4e27b8b54e5 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_auth_audit.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_auth_audit.yml
@@ -23,8 +23,6 @@ executor:
keyFile: jstests/libs/authTestsKey
keyFileData: Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly
roleGraphInvalidationIsFatal: true
- setParameters:
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
nodb: ''
matrix_suite: true
selector:
@@ -47,6 +45,7 @@ selector:
- jstests/sharding/movechunk_parallel.js
- jstests/sharding/migration_critical_section_concurrency.js
- jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
+ - jstests/sharding/cluster_time_across_add_shard.js
- jstests/sharding/set_user_write_block_mode.js
roots:
- jstests/sharding/**/*.js
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_continuous_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_continuous_new_old_old_new.yml
index d24bdf66855a4..f0335672780fd 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_continuous_new_old_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_continuous_new_old_old_new.yml
@@ -31,6 +31,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
matrix_suite: true
diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_lts_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_lts_new_old_old_new.yml
index 7a81e66c9471b..aeb4db92d4fba 100644
--- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_lts_new_old_old_new.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_lts_new_old_old_new.yml
@@ -31,6 +31,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
matrix_suite: true
diff --git a/buildscripts/resmokeconfig/matrix_suites/mappings/generational_fuzzer_last_continuous.yml b/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_expression_multiversion_fuzzer_last_continuous.yml
similarity index 100%
rename from buildscripts/resmokeconfig/matrix_suites/mappings/generational_fuzzer_last_continuous.yml
rename to buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_expression_multiversion_fuzzer_last_continuous.yml
diff --git a/buildscripts/resmokeconfig/matrix_suites/mappings/generational_fuzzer_last_lts.yml b/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_expression_multiversion_fuzzer_last_lts.yml
similarity index 100%
rename from buildscripts/resmokeconfig/matrix_suites/mappings/generational_fuzzer_last_lts.yml
rename to buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_expression_multiversion_fuzzer_last_lts.yml
diff --git a/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_continuous.yml b/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_continuous.yml
new file mode 100644
index 0000000000000..8e38541e94515
--- /dev/null
+++ b/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_continuous.yml
@@ -0,0 +1,3 @@
+base_suite: generational_fuzzer
+overrides:
+ - "multiversion.replica_sets_multiversion_testdata_last_continuous"
diff --git a/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_lts.yml b/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_lts.yml
new file mode 100644
index 0000000000000..d3863bfd6afbe
--- /dev/null
+++ b/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_lts.yml
@@ -0,0 +1,3 @@
+base_suite: generational_fuzzer
+overrides:
+ - "multiversion.replica_sets_multiversion_testdata_last_lts"
diff --git a/buildscripts/resmokeconfig/matrix_suites/overrides/audit.yml b/buildscripts/resmokeconfig/matrix_suites/overrides/audit.yml
index f04c78aaef68b..074d8e47f2773 100644
--- a/buildscripts/resmokeconfig/matrix_suites/overrides/audit.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/overrides/audit.yml
@@ -7,5 +7,3 @@
global_vars:
TestData:
auditDestination: 'console'
- setParameters:
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
diff --git a/buildscripts/resmokeconfig/matrix_suites/overrides/change_streams.yml b/buildscripts/resmokeconfig/matrix_suites/overrides/change_streams.yml
index 4b06f4d4a66a4..87316cda12dfd 100644
--- a/buildscripts/resmokeconfig/matrix_suites/overrides/change_streams.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/overrides/change_streams.yml
@@ -7,7 +7,7 @@
# We do not always want all of the eval statements from the base suite so we override
# the ones we always want
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
- name: causal_consistency
@@ -22,7 +22,7 @@
# this is not under the eval section on purpose, we want to override this
# to get rid of causal consistency
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
- name: mongos_passthrough
diff --git a/buildscripts/resmokeconfig/matrix_suites/overrides/kill_primary.yml b/buildscripts/resmokeconfig/matrix_suites/overrides/kill_primary.yml
index b632927e91e37..0bab07b64d532 100644
--- a/buildscripts/resmokeconfig/matrix_suites/overrides/kill_primary.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/overrides/kill_primary.yml
@@ -9,9 +9,9 @@
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
diff --git a/buildscripts/resmokeconfig/matrix_suites/overrides/multiversion.yml b/buildscripts/resmokeconfig/matrix_suites/overrides/multiversion.yml
index 11956227a03e0..a3c3d501b1718 100644
--- a/buildscripts/resmokeconfig/matrix_suites/overrides/multiversion.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/overrides/multiversion.yml
@@ -80,9 +80,6 @@
# Exclude last-lts specific tests
- jstests/multiVersion/targetedTestsLastLtsFeatures/*.js
- # TODO: SERVER-21578
- - jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js
-
# TODO: SERVER-28104
- jstests/multiVersion/genericBinVersion/minor_version_tags_new_old_new.js
@@ -100,9 +97,6 @@
# Exclude last-continuous specific tests
- jstests/multiVersion/targetedTestsLastContinuousFeatures/*.js
- # TODO: SERVER-21578
- - jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js
-
# TODO: SERVER-28104
- jstests/multiVersion/genericBinVersion/minor_version_tags_new_old_new.js
diff --git a/buildscripts/resmokeconfig/matrix_suites/overrides/replica_sets_stepdown_selector.yml b/buildscripts/resmokeconfig/matrix_suites/overrides/replica_sets_stepdown_selector.yml
index cca7e08ed840a..159658337cea8 100644
--- a/buildscripts/resmokeconfig/matrix_suites/overrides/replica_sets_stepdown_selector.yml
+++ b/buildscripts/resmokeconfig/matrix_suites/overrides/replica_sets_stepdown_selector.yml
@@ -178,8 +178,6 @@
- requires_dbstats
# "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..."
- requires_collstats
- # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..."
- - requires_datasize
## The next tag corresponds to long running-operations, as they may exhaust their number
# of retries and result in a network error being thrown.
- operations_longer_than_stepdown_interval
diff --git a/buildscripts/resmokeconfig/powercycle/powercycle_tasks.yml b/buildscripts/resmokeconfig/powercycle/powercycle_tasks.yml
index c5cceef6749c1..75946b4a634c2 100644
--- a/buildscripts/resmokeconfig/powercycle/powercycle_tasks.yml
+++ b/buildscripts/resmokeconfig/powercycle/powercycle_tasks.yml
@@ -28,7 +28,7 @@ tasks:
crash_method: kill
- name: powercycle_last_lts_fcv
- fcv: "6.0"
+ fcv: "7.0"
- name: powercycle_replication
repl_set: powercycle
diff --git a/buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml
index 770cc05268551..53b4b1044277d 100644
--- a/buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml
@@ -34,6 +34,7 @@ executor:
hooks:
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml b/buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml
index eeb0e097e2f04..697fe52940459 100644
--- a/buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml
@@ -36,6 +36,7 @@ executor:
hooks:
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml
index e0b510c5da9de..d3af2a9dc0ee2 100644
--- a/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml
@@ -37,7 +37,7 @@ executor:
defaultReadConcernLevel: majority
enableMajorityReadConcern: ''
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
diff --git a/buildscripts/resmokeconfig/suites/aggregation_secondary_reads.yml b/buildscripts/resmokeconfig/suites/aggregation_secondary_reads.yml
index 150de7ad3075b..2b9843776115c 100644
--- a/buildscripts/resmokeconfig/suites/aggregation_secondary_reads.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation_secondary_reads.yml
@@ -44,7 +44,7 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/enable_causal_consistency.js');
load('jstests/libs/override_methods/detect_spawning_own_mongod.js');
hooks:
diff --git a/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_causally_consistent_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_causally_consistent_passthrough.yml
index dbca71df6de3c..d62c66e4c061f 100644
--- a/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_causally_consistent_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_causally_consistent_passthrough.yml
@@ -58,7 +58,7 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/enable_causal_consistency.js');
load('jstests/libs/override_methods/detect_spawning_own_mongod.js');
load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js")
@@ -69,6 +69,7 @@ executor:
- class: CheckReplOplogs
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml
index 384f812a21b50..c60be3bb2b68c 100644
--- a/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml
@@ -38,6 +38,7 @@ executor:
hooks:
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/analyze_shard_key_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/analyze_shard_key_jscore_passthrough.yml
index 48283fb067b66..893539d06e8d8 100644
--- a/buildscripts/resmokeconfig/suites/analyze_shard_key_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/analyze_shard_key_jscore_passthrough.yml
@@ -103,6 +103,9 @@ selector:
# The following test requires the collection to be unsharded.
- jstests/core/txns/finished_transaction_error_handling.js
+ # The following test fails because configureQueryAnalyzer is not allowed on QE collections
+ - jstests/core/queryable_encryption/**/*.js
+
exclude_with_any_tags:
- assumes_against_mongod_not_mongos
- assumes_standalone_mongod
@@ -142,6 +145,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
@@ -158,6 +162,8 @@ executor:
queryAnalysisSamplerConfigurationRefreshSecs: 1
queryAnalysisWriterIntervalSecs: 5
analyzeShardKeyNumRanges: 10
+ analyzeShardKeySplitPointExpirationSecs: 10
+ ttlMonitorSleepSecs: 5
logComponentVerbosity:
verbosity: 0
sharding: 2
diff --git a/buildscripts/resmokeconfig/suites/benchmarks.yml b/buildscripts/resmokeconfig/suites/benchmarks.yml
index 88a8a3a48899e..594a928b1bf5b 100644
--- a/buildscripts/resmokeconfig/suites/benchmarks.yml
+++ b/buildscripts/resmokeconfig/suites/benchmarks.yml
@@ -18,6 +18,9 @@ selector:
- build/install/bin/simple8b_bm*
# Hash table benchmark is really slow, don't run on evergreen
- build/install/bin/hash_table_bm*
+ # These benchmarks are being run as part of the benchmarks_query.yml
+ - build/install/bin/percentile_algo_bm*
+ - build/install/bin/window_function_percentile_bm*
# These benchmarks are being run as part of the benchmarks_expression*.yml
- build/install/bin/expression_bm*
- build/install/bin/sbe_expression_bm*
@@ -26,7 +29,8 @@ selector:
# These benchmarks are being run as part of the benchmarks_streams.yml test suite.
- build/install/bin/streams_operator_dag_bm*
# These benchmarks are only run when modifying or upgrading the immutable library.
- - build/install/bin/absl_comparison_bm*
+ - build/install/bin/immutable_absl_comparison_bm*
+ - build/install/bin/immutable_std_comparison_bm*
# These benchmarks are being run as part of the benchmarks_replication.yml test suite.
- build/install/bin/oplog_application_bm*
diff --git a/buildscripts/resmokeconfig/suites/benchmarks_query.yml b/buildscripts/resmokeconfig/suites/benchmarks_query.yml
new file mode 100644
index 0000000000000..d8e1fd3f93f62
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/benchmarks_query.yml
@@ -0,0 +1,14 @@
+# Query related google micro-benchmarks unless run in separate dedicated suites.
+test_kind: benchmark_test
+
+selector:
+ root: build/benchmarks.txt
+ include_files:
+ # The trailing asterisk is for handling the .exe extension on Windows.
+ - build/install/bin/percentile_algo_bm*
+ - build/install/bin/window_function_percentile_bm*
+
+executor:
+ config: {}
+ hooks:
+ - class: CombineBenchmarkResults
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_hedged_reads_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/causally_consistent_hedged_reads_jscore_passthrough.yml
index 88052220fd0bd..fd4bf68c9bc6b 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_hedged_reads_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_hedged_reads_jscore_passthrough.yml
@@ -73,6 +73,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
index c8a496bd521dc..4b75aad624de7 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml
@@ -74,6 +74,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
index 3caf7b719e3b2..0811ffc4ee49b 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml
@@ -143,19 +143,25 @@ executor:
shell_options:
global_vars:
TestData: *TestData
- eval: jsTest.authenticate(db.getMongo())
+ eval: jsTest.authenticate(db.getMongo())
<<: *authOptions
- class: CheckMetadataConsistencyInBackground
shell_options:
global_vars:
TestData: *TestData
- eval: jsTest.authenticate(db.getMongo())
+ eval: jsTest.authenticate(db.getMongo())
<<: *authOptions
- class: ValidateCollections
shell_options:
global_vars:
TestData: *TestData
- eval: jsTest.authenticate(db.getMongo())
+ eval: jsTest.authenticate(db.getMongo())
+ <<: *authOptions
+ - class: CheckOrphansDeleted
+ shell_options:
+ global_vars:
+ TestData: *TestData
+ eval: jsTest.authenticate(db.getMongo())
<<: *authOptions
- class: CleanEveryN
n: 20
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_txns_passthrough.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_txns_passthrough.yml
index cc4f4ae090011..2a5d91d3fe60f 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_txns_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_txns_passthrough.yml
@@ -28,7 +28,7 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
hooks:
# We don't execute dbHash or oplog consistency checks since there is only a single replica set
diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_read_concern_snapshot_passthrough.yml b/buildscripts/resmokeconfig/suites/causally_consistent_read_concern_snapshot_passthrough.yml
index 9a9d6a9193ed7..88b77b83ed19d 100644
--- a/buildscripts/resmokeconfig/suites/causally_consistent_read_concern_snapshot_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/causally_consistent_read_concern_snapshot_passthrough.yml
@@ -41,7 +41,7 @@ executor:
TestData:
defaultReadConcernLevel: snapshot
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load("jstests/libs/override_methods/enable_causal_consistency.js");
hooks:
diff --git a/buildscripts/resmokeconfig/suites/change_streams.yml b/buildscripts/resmokeconfig/suites/change_streams.yml
index 1b8715b573226..1da3a4c9abcd9 100644
--- a/buildscripts/resmokeconfig/suites/change_streams.yml
+++ b/buildscripts/resmokeconfig/suites/change_streams.yml
@@ -34,7 +34,7 @@ executor:
# are bound to the oplog visibility rules. Using causal consistency forces the visibility
# point to advance to the timestamp of the last write before doing a new read.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
hooks:
diff --git a/buildscripts/resmokeconfig/suites/change_streams_downgrade.yml b/buildscripts/resmokeconfig/suites/change_streams_downgrade.yml
index 6075956ce23fb..74c575b9be77c 100644
--- a/buildscripts/resmokeconfig/suites/change_streams_downgrade.yml
+++ b/buildscripts/resmokeconfig/suites/change_streams_downgrade.yml
@@ -329,8 +329,6 @@ selector:
- requires_dbstats
# "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..."
- requires_collstats
- # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..."
- - requires_datasize
# "Cowardly fail if startParallelShell is run with a mongod that had an unclean shutdown: ..."
- uses_parallel_shell
@@ -363,9 +361,9 @@ executor:
# shutdown). Workaround by relying on the requires_fastcount/dbstats/collstats/datasize and
# uses_parallel_shell tags to denylist tests that uses them unsafely.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
@@ -404,6 +402,7 @@ executor:
global_vars:
TestData:
checkCollectionCounts: true
+ - class: CheckOrphansDeleted
fixture:
class: ShardedClusterFixture
mongos_options:
diff --git a/buildscripts/resmokeconfig/suites/change_streams_mongos_sessions_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_mongos_sessions_passthrough.yml
index 1d9f280832169..ed30992c76684 100644
--- a/buildscripts/resmokeconfig/suites/change_streams_mongos_sessions_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/change_streams_mongos_sessions_passthrough.yml
@@ -35,13 +35,14 @@ executor:
# Enable causal consistency for change streams suites using 1 node replica sets. See
# change_streams.yml for detailed explanation.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/enable_sessions.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
hooks:
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_mongos_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_mongos_passthrough.yml
index 1626b64271984..4d07e1f3a8d89 100644
--- a/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_mongos_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_mongos_passthrough.yml
@@ -32,7 +32,7 @@ executor:
wrapCRUDinTransactions: true
# Enable the transactions passthrough.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/enable_sessions.js');
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
load('jstests/libs/override_methods/network_error_and_txn_override.js');
@@ -43,6 +43,7 @@ executor:
- class: CheckReplOplogs
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_passthrough.yml
index db5ffedc85097..4569f7045f6da 100644
--- a/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_passthrough.yml
@@ -30,7 +30,7 @@ executor:
wrapCRUDinTransactions: true
# Enable the transactions passthrough.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/enable_sessions.js');
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
load('jstests/libs/override_methods/network_error_and_txn_override.js');
diff --git a/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_sharded_collections_passthrough.yml
index 17dea97fd4286..6ad5be5bc6330 100644
--- a/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_sharded_collections_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_sharded_collections_passthrough.yml
@@ -33,7 +33,7 @@ executor:
wrapCRUDinTransactions: true
# Enable the transactions passthrough.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/enable_sessions.js');
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
load('jstests/libs/override_methods/network_error_and_txn_override.js');
@@ -44,6 +44,7 @@ executor:
- class: CheckReplOplogs
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/change_streams_multitenant_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_multitenant_passthrough.yml
index 17a29d4f9cfb3..68cbea7cea7b8 100644
--- a/buildscripts/resmokeconfig/suites/change_streams_multitenant_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/change_streams_multitenant_passthrough.yml
@@ -7,8 +7,6 @@ selector:
##
# TODO SERVER-68341: Implement enable/disable command for mongoQ in the serverless.
- jstests/change_streams/projection_fakes_internal_event.js
- # TODO SERVER-69959: Implement a majority-committed insert listener.
- - jstests/change_streams/only_wake_getmore_for_relevant_changes.js
##
# TODO SERVER-70760: This test creates its own sharded cluster and uses transaction. The support
@@ -47,6 +45,10 @@ selector:
# This test uses 'system' database and '$tenant' cannot be injected in 'system.$cmd' namespace.
- jstests/change_streams/global_index.js
+ # Queryable encryption test requires an internal connection for the keyvault that does not
+ # inject a $tenant.
+ - jstests/change_streams/queryable_encryption_change_stream.js
+
exclude_with_any_tags:
##
# The next tags correspond to the special errors thrown by the
@@ -72,7 +74,7 @@ executor:
# Enable causal consistency for change streams suites using 1 node replica sets. See
# change_streams.yml for detailed explanation.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
load('jstests/libs/override_methods/inject_dollar_tenant.js');
diff --git a/buildscripts/resmokeconfig/suites/change_streams_multitenant_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_multitenant_sharded_collections_passthrough.yml
index 73adfbc19b4e0..b7c3c0ced95ea 100644
--- a/buildscripts/resmokeconfig/suites/change_streams_multitenant_sharded_collections_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/change_streams_multitenant_sharded_collections_passthrough.yml
@@ -4,8 +4,8 @@ selector:
roots:
- jstests/change_streams/**/*.js
exclude_files:
- # TODO SERVER-69959: Implement a majority-committed insert listener.
- - jstests/change_streams/only_wake_getmore_for_relevant_changes.js
+ # TODO SERVER-68341: Implement enable/disable command for mongoQ in the serverless.
+ - jstests/change_streams/**/*.js
# TODO SERVER-68341: Implement enable/disable command for mongoQ in the serverless.
- jstests/change_streams/projection_fakes_internal_event.js
# TODO SERVER-68557 This test list databases that does not work in the sharded-cluster. This test
@@ -43,7 +43,7 @@ executor:
# Enable causal consistency for change streams suites using 1 node replica sets. See
# change_streams.yml for detailed explanation.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');
@@ -52,6 +52,7 @@ executor:
- class: EnableChangeStream
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/change_streams_per_shard_cursor_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_per_shard_cursor_passthrough.yml
index 09d056f1e5423..7790edeabfa4f 100644
--- a/buildscripts/resmokeconfig/suites/change_streams_per_shard_cursor_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/change_streams_per_shard_cursor_passthrough.yml
@@ -33,7 +33,7 @@ executor:
# Enable causal consistency for change streams suites using 1 node replica sets. See
# change_streams.yml for detailed explanation.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
@@ -41,6 +41,7 @@ executor:
hooks:
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/change_streams_v1_resume_token_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_v1_resume_token_passthrough.yml
deleted file mode 100644
index f94799973e24c..0000000000000
--- a/buildscripts/resmokeconfig/suites/change_streams_v1_resume_token_passthrough.yml
+++ /dev/null
@@ -1,70 +0,0 @@
-test_kind: js_test
-selector:
- roots:
- - jstests/change_streams/**/*.js
- exclude_files:
- # This test explicitly compares v1 and v2 tokens, and must be able to generate the former.
- - jstests/change_streams/generate_v1_resume_token.js
-
- # The following tests run in a sharded fixture where the mongod generates a new shard detected
- # internal event, which needs to be swallowed by the mongos. This is not supported here, because
- # this suite will return the event op name as 'kNewShardDetected', but the mongos expects the
- # event op name to be 'migrateChunkToNewShard'.
- - jstests/change_streams/create_event_from_chunk_migration.js
- - jstests/change_streams/migrate_last_chunk_from_shard_event.js
- - jstests/change_streams/oplog_rewrite/match_pushdown_namespace_rewrite_with_expanded_events.js
- - jstests/change_streams/projection_fakes_internal_event.js
- # The following test uses the '$changeStreamSplitLargeEvents' stage which requires v2 token.
- - jstests/change_streams/split_large_event.js
-
- exclude_with_any_tags:
- ##
- # The next tags correspond to the special errors thrown by the
- # set_read_and_write_concerns.js override when it refuses to replace the readConcern or
- # writeConcern of a particular command. Above each tag are the message(s) that cause the tag to be
- # warranted.
- ##
- # "Cowardly refusing to override write concern of command: ..."
- - assumes_write_concern_unchanged
-
-executor:
- archive:
- hooks:
- - CheckReplDBHash
- - CheckReplOplogs
- - ValidateCollections
- config:
- shell_options:
- global_vars:
- TestData:
- defaultReadConcernLevel: null
- enableMajorityReadConcern: ''
- # Enable causal consistency for change streams suites using 1 node replica sets. Some tests
- # rely on the assumption that a w:majority write will be visible immediately in a subsequently
- # opened change stream. In 1 node replica sets, an operation that majority commits at
- # timestamp T will force the majority snapshot to advance to T, but the oplog visibility point
- # may not have advanced to T yet. Subsequent majority snapshot reads will see this write in
- # the oplog, but speculative majority reads may not, since they read from a local snapshot and
- # are bound to the oplog visibility rules. Using causal consistency forces the visibility
- # point to advance to the timestamp of the last write before doing a new read.
- eval: >-
- var testingReplication = true;
- load('jstests/libs/override_methods/set_read_and_write_concerns.js');
- load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
- load('jstests/libs/override_methods/implicit_v1_resume_token_changestreams.js');
- hooks:
- # The CheckReplDBHash hook waits until all operations have replicated to and have been applied
- # on the secondaries, so we run the ValidateCollections hook after it to ensure we're
- # validating the entire contents of the collection.
- - class: CheckReplOplogs
- - class: CheckReplDBHash
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ReplicaSetFixture
- mongod_options:
- bind_ip_all: ''
- set_parameters:
- enableTestCommands: 1
- num_nodes: 2
diff --git a/buildscripts/resmokeconfig/suites/change_streams_v1_resume_token_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_v1_resume_token_sharded_collections_passthrough.yml
deleted file mode 100644
index d91b200e1fb2a..0000000000000
--- a/buildscripts/resmokeconfig/suites/change_streams_v1_resume_token_sharded_collections_passthrough.yml
+++ /dev/null
@@ -1,63 +0,0 @@
-test_kind: js_test
-selector:
- roots:
- - jstests/change_streams/**/*.js
- exclude_files:
- # This test explicitly compares v1 and v2 tokens, and must be able to generate the former.
- - jstests/change_streams/generate_v1_resume_token.js
- # This test uses the '$changeStreamSplitLargeEvents' stage which requires v2 token.
- - jstests/change_streams/split_large_event.js
-
- exclude_with_any_tags:
- ##
- # The next tags correspond to the special errors thrown by the
- # set_read_and_write_concerns.js override when it refuses to replace the readConcern or
- # writeConcern of a particular command. Above each tag are the message(s) that cause the tag to be
- # warranted.
- ##
- # "Cowardly refusing to override write concern of command: ..."
- - assumes_write_concern_unchanged
- # Exclude any that assume sharding is disabled
- - assumes_against_mongod_not_mongos
- - assumes_unsharded_collection
-
-executor:
- archive:
- hooks:
- - CheckReplDBHash
- - ValidateCollections
- config:
- shell_options:
- global_vars:
- TestData:
- defaultReadConcernLevel: null
- enableMajorityReadConcern: ''
- # Enable causal consistency for change streams suites using 1 node replica sets. See
- # change_streams.yml for detailed explanation.
- eval: >-
- var testingReplication = true;
- load('jstests/libs/override_methods/set_read_and_write_concerns.js');
- load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');
- load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
- load('jstests/libs/override_methods/implicit_v1_resume_token_changestreams.js');
- hooks:
- - class: CheckReplDBHash
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ShardedClusterFixture
- mongos_options:
- bind_ip_all: ''
- set_parameters:
- enableTestCommands: 1
- mongod_options:
- bind_ip_all: ''
- set_parameters:
- enableTestCommands: 1
- writePeriodicNoops: 1
- periodicNoopIntervalSecs: 1
- coordinateCommitReturnImmediatelyAfterPersistingDecision: true
- num_shards: 2
- enable_sharding:
- - test
diff --git a/buildscripts/resmokeconfig/suites/change_streams_whole_cluster_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_whole_cluster_passthrough.yml
index 1a5763bf5de80..f9718ae9ad638 100644
--- a/buildscripts/resmokeconfig/suites/change_streams_whole_cluster_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/change_streams_whole_cluster_passthrough.yml
@@ -30,7 +30,7 @@ executor:
# Enable causal consistency for change streams suites using 1 node replica sets. See
# change_streams.yml for detailed explanation.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/implicit_whole_cluster_changestreams.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
diff --git a/buildscripts/resmokeconfig/suites/change_streams_whole_db_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_whole_db_passthrough.yml
index 068fb6293d2ee..439d88f00e412 100644
--- a/buildscripts/resmokeconfig/suites/change_streams_whole_db_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/change_streams_whole_db_passthrough.yml
@@ -31,7 +31,7 @@ executor:
# Enable causal consistency for change streams suites using 1 node replica sets. See
# change_streams.yml for detailed explanation.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/implicit_whole_db_changestreams.js');
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
diff --git a/buildscripts/resmokeconfig/suites/clustered_collection_passthrough.yml b/buildscripts/resmokeconfig/suites/clustered_collection_passthrough.yml
index 2bbfaf7866e97..b01916e2fc9bc 100644
--- a/buildscripts/resmokeconfig/suites/clustered_collection_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/clustered_collection_passthrough.yml
@@ -60,9 +60,11 @@ selector:
- jstests/core/**/index_bounds_pipe.js
# Expects an index on _id to cover the query.
- jstests/core/**/covered_index_simple_id.js
- # TODO (SERVER-61259): $text not supported: "No query solutions"
+ # TODO (SERVER-78045): $text not supported: "No query solutions"
- jstests/core/**/fts6.js
- jstests/core/**/fts_projection.js
+ # Assumes there is one collection that is not clustered.
+ - jstests/core/find_with_resume_after_param.js
exclude_with_any_tags:
- assumes_standalone_mongod
@@ -82,13 +84,11 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mongod_options:
set_parameters:
enableTestCommands: 1
- # SBE is not compatible with clustered collections
- internalQueryFrameworkControl: "forceClassicEngine"
failpoint.clusterAllCollectionsByDefault: "{mode: 'alwaysOn'}"
num_nodes: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml
index 0b0d67e706f0e..78076c90ab1db 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml
@@ -3,6 +3,7 @@ test_kind: fsm_workload_test
selector:
roots:
- jstests/concurrency/fsm_workloads/**/*.js
+ - src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js
exclude_files:
##
# Disabled due to MongoDB restrictions and/or workload restrictions
@@ -25,7 +26,6 @@ selector:
# collStats is not causally consistent
- requires_collstats
- requires_dbstats
- - requires_datasize
- requires_sharding
# Tests which use $currentOp. Running an aggregation with $currentOp and read preference
# secondary doesn't make much sense, since there's no guarantee *which* secondary you get results
diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml
index 71fcdccb0cc8a..7371f582447c4 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml
@@ -25,7 +25,6 @@ selector:
# collStats is not causally consistent
- requires_collstats
- requires_dbstats
- - requires_datasize
- requires_sharding
# Tests which use $currentOp. Running an aggregation with $currentOp and read preference
# secondary doesn't make much sense, since there's no guarantee *which* secondary you get results
diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_cursor_sweeps.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_cursor_sweeps.yml
index 5a67527cecb0e..52e711d7e31a3 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_cursor_sweeps.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_cursor_sweeps.yml
@@ -53,6 +53,8 @@ executor:
oplogSize: 1024
set_parameters:
enableTestCommands: 1
+ queryAnalysisSamplerConfigurationRefreshSecs: 1
+ queryAnalysisWriterIntervalSecs: 1
# Setting this parameter to "1" disables cursor caching in WiredTiger, and sets the cache
# size to "1" in MongoDB. This forces all resources to be released when done.
wiredTigerCursorCacheSize: 1
diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_eviction_debug.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_eviction_debug.yml
deleted file mode 100644
index c8c732b6990c8..0000000000000
--- a/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_eviction_debug.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-test_kind: fsm_workload_test
-
-selector:
- roots:
- - jstests/concurrency/fsm_workloads/**/*.js
- exclude_files:
- ##
- # Disabled due to MongoDB restrictions and/or workload restrictions
- ##
- # These workloads use >100MB of data, which can overwhelm test hosts.
- - jstests/concurrency/fsm_workloads/agg_group_external.js
- - jstests/concurrency/fsm_workloads/agg_sort_external.js
-
- # The findAndModify_update_grow.js workload can cause OOM kills on test hosts.
- - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
-
- # These workloads run the reIndex command, which is only allowed on a standalone node.
- - jstests/concurrency/fsm_workloads/reindex.js
- - jstests/concurrency/fsm_workloads/reindex_background.js
- - jstests/concurrency/fsm_workloads/reindex_writeconflict.js
-
- exclude_with_any_tags:
- - requires_sharding
-
-executor:
- archive:
- hooks:
- - CheckReplDBHashInBackground
- - ValidateCollectionsInBackground
- - CheckReplDBHash
- - ValidateCollections
- tests: true
- config: {}
- hooks:
- # The CheckReplDBHash hook waits until all operations have replicated to and have been applied
- # on the secondaries, so we run the ValidateCollections hook after it to ensure we're
- # validating the entire contents of the collection.
- - class: CheckReplDBHashInBackground
- - class: ValidateCollectionsInBackground
- - class: CheckReplDBHash
- - class: ValidateCollections
- - class: CleanupConcurrencyWorkloads
- fixture:
- class: ReplicaSetFixture
- mongod_options:
- oplogSize: 1024
- set_parameters:
- enableTestCommands: 1
- # Enable aggressive WiredTiger eviction.
- wiredTigerEvictionDebugMode: true
- roleGraphInvalidationIsFatal: 1
- num_nodes: 3
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml
index 22a2a92c40cab..b85581b395d7a 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml
@@ -109,12 +109,12 @@ executor:
runningWithCausalConsistency: true
runningWithBalancer: false
hooks:
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHashInBackground
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -131,7 +131,6 @@ executor:
set_parameters:
enableTestCommands: 1
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml
index e7c35cbf0d3df..08cae19b0aa54 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml
@@ -86,9 +86,15 @@ selector:
# Time-series collections are not supported on mongos.
- jstests/concurrency/fsm_workloads/create_timeseries_collection.js
- # Performs finds with $where, which are slow. Because 'orphanCleanupDelaySecs' is set to 1 second,
- # this may cause the finds to return incomplete results.
+ # Because 'orphanCleanupDelaySecs' is set to 1 second, this may cause the finds to return
+ # incomplete results.
+ # TODO SERVER-77354: Allow the following tests to run in this suite after 'orphanCleanupDelaySecs'
+ # is increased.
- jstests/concurrency/fsm_workloads/indexed_insert_where.js
+ - jstests/concurrency/fsm_workloads/agg_sort.js
+
+ # TODO Undenylist (SERVER-71819).
+ - jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js
exclude_with_any_tags:
- does_not_support_causal_consistency
@@ -119,12 +125,12 @@ executor:
runningWithCausalConsistency: true
runningWithBalancer: true
hooks:
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHashInBackground
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -140,7 +146,6 @@ executor:
set_parameters:
enableTestCommands: 1
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml
index 7a7c7f24b7d8d..afee7a780574d 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml
@@ -20,12 +20,12 @@ executor:
tests: true
config: {}
hooks:
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHashInBackground
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -41,7 +41,6 @@ executor:
set_parameters:
enableTestCommands: 1
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_initial_sync.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_initial_sync.yml
index 972af5b6e69ec..f5c72ea9cff88 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_initial_sync.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_initial_sync.yml
@@ -82,6 +82,7 @@ selector:
# TODO Undenylist (SERVER-38852).
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
# serverStatus does not include transaction metrics on mongos.
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
@@ -124,6 +125,7 @@ selector:
- jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
- jstests/concurrency/fsm_workloads/indexed_insert_where.js
- jstests/concurrency/fsm_workloads/list_indexes.js
+ - jstests/concurrency/fsm_workloads/query_stats_concurrent.js
- jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js
- jstests/concurrency/fsm_workloads/reindex.js
- jstests/concurrency/fsm_workloads/reindex_background.js
@@ -158,6 +160,7 @@ selector:
- jstests/concurrency/fsm_workloads/collmod_separate_collections.js
- jstests/concurrency/fsm_workloads/collmod_writeconflict.js
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
- jstests/concurrency/fsm_workloads/invalidated_cursors.js
- jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js
- jstests/concurrency/fsm_workloads/view_catalog.js
@@ -217,7 +220,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load("jstests/libs/override_methods/set_read_preference_secondary.js");
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
global_vars:
@@ -233,11 +236,11 @@ executor:
- class: ContinuousInitialSync
use_action_permitted_file: true
sync_interval_secs: 15
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: CheckClusterIndexConsistency
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
shell_options:
@@ -274,7 +277,6 @@ executor:
enableTestCommands: 1
enableElectionHandoff: 0
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
collectionClonerBatchSize: 10
initialSyncOplogFetcherBatchSize: 10
queryAnalysisWriterIntervalSecs: 1
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml
index 5e8f0258da127..13f5b7ebf1045 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml
@@ -111,6 +111,7 @@ selector:
- jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
- jstests/concurrency/fsm_workloads/indexed_insert_where.js
- jstests/concurrency/fsm_workloads/list_indexes.js
+ - jstests/concurrency/fsm_workloads/query_stats_concurrent.js
- jstests/concurrency/fsm_workloads/reindex.js
- jstests/concurrency/fsm_workloads/reindex_background.js
- jstests/concurrency/fsm_workloads/remove_multiple_documents.js
@@ -139,6 +140,7 @@ selector:
- jstests/concurrency/fsm_workloads/collmod.js
- jstests/concurrency/fsm_workloads/collmod_separate_collections.js
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
- jstests/concurrency/fsm_workloads/invalidated_cursors.js
- jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js
- jstests/concurrency/fsm_workloads/view_catalog.js
@@ -198,11 +200,11 @@ executor:
shard_stepdown: true
use_action_permitted_file: true
kill: true
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
shell_options:
global_vars:
@@ -237,7 +239,6 @@ executor:
enableTestCommands: 1
enableElectionHandoff: 0
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml
index 0def4bb83c0d5..9cf3a2eae31a1 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml
@@ -81,6 +81,7 @@ selector:
# TODO Undenylist (SERVER-38852).
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
# serverStatus does not include transaction metrics on mongos.
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
@@ -167,12 +168,12 @@ executor:
traceExceptions: false
hooks:
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHashInBackground
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -189,7 +190,6 @@ executor:
set_parameters:
enableTestCommands: 1
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml
index 9010a3b3b2b99..da4f9be7d055a 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml
@@ -81,6 +81,7 @@ selector:
# TODO Undenylist (SERVER-38852).
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
# serverStatus does not include transaction metrics on mongos.
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
@@ -166,12 +167,12 @@ executor:
traceExceptions: false
runningWithBalancer: true
hooks:
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHashInBackground
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -187,7 +188,6 @@ executor:
set_parameters:
enableTestCommands: 1
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml
index c43514566b771..d5eaa754d3deb 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml
@@ -81,6 +81,7 @@ selector:
# TODO Undenylist (SERVER-38852).
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
# serverStatus does not include transaction metrics on mongos.
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
@@ -157,12 +158,12 @@ executor:
runningWithSessions: true
traceExceptions: false
hooks:
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHashInBackground
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -179,7 +180,6 @@ executor:
set_parameters:
enableTestCommands: 1
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml
index 63cdc532b5d27..ac5a47e830ad1 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml
@@ -97,6 +97,7 @@ selector:
# TODO Undenylist (SERVER-38852).
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
# serverStatus does not include transaction metrics on mongos.
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
@@ -153,6 +154,7 @@ selector:
# Uses non retryable commands.
- jstests/concurrency/fsm_workloads/agg_out.js
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
- jstests/concurrency/fsm_workloads/agg_sort.js
- jstests/concurrency/fsm_workloads/collmod.js
- jstests/concurrency/fsm_workloads/collmod_separate_collections.js
@@ -179,6 +181,7 @@ selector:
- jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
- jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
- jstests/concurrency/fsm_workloads/list_indexes.js
+ - jstests/concurrency/fsm_workloads/query_stats_concurrent.js
# Uses non-retryable commands in the same state function as a command not supported in a
# transaction.
@@ -248,11 +251,11 @@ executor:
shard_stepdown: true
stepdown_interval_ms: 15000
use_action_permitted_file: true
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
shell_options:
global_vars:
@@ -288,7 +291,6 @@ executor:
enableTestCommands: 1
enableElectionHandoff: 0
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml
index 73269fdedba1d..1f42d70170afa 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml
@@ -97,6 +97,7 @@ selector:
# TODO Undenylist (SERVER-38852).
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
# serverStatus does not include transaction metrics on mongos.
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
@@ -153,6 +154,7 @@ selector:
# Uses non retryable commands.
- jstests/concurrency/fsm_workloads/agg_out.js
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
- jstests/concurrency/fsm_workloads/agg_sort.js
- jstests/concurrency/fsm_workloads/collmod.js
- jstests/concurrency/fsm_workloads/collmod_separate_collections.js
@@ -179,6 +181,7 @@ selector:
- jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
- jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
- jstests/concurrency/fsm_workloads/list_indexes.js
+ - jstests/concurrency/fsm_workloads/query_stats_concurrent.js
# Uses non-retryable commands in the same state function as a command not supported in a
# transaction.
@@ -248,11 +251,11 @@ executor:
stepdown_interval_ms: 15000
terminate: true
use_action_permitted_file: true
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -283,7 +286,6 @@ executor:
enableTestCommands: 1
enableElectionHandoff: 0
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml
index 27fe0fb8a3a92..90756a7333006 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml
@@ -81,6 +81,7 @@ selector:
# TODO Undenylist (SERVER-38852).
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
# serverStatus does not include transaction metrics on mongos.
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
@@ -161,12 +162,12 @@ executor:
traceExceptions: false
runningWithBalancer: true
hooks:
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHashInBackground
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -182,7 +183,6 @@ executor:
set_parameters:
enableTestCommands: 1
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml
index af46e1e0b76a4..6bb88b3f35fea 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml
@@ -83,6 +83,7 @@ selector:
# TODO Undenylist (SERVER-38852).
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
# serverStatus does not include transaction metrics on mongos.
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
@@ -135,6 +136,7 @@ selector:
# Uses non retryable commands.
- jstests/concurrency/fsm_workloads/agg_out.js
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
- jstests/concurrency/fsm_workloads/agg_sort.js
- jstests/concurrency/fsm_workloads/collmod.js
- jstests/concurrency/fsm_workloads/collmod_separate_collections.js
@@ -220,11 +222,11 @@ executor:
config_stepdown: true
shard_stepdown: true
use_action_permitted_file: true
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -255,7 +257,6 @@ executor:
enableTestCommands: 1
enableElectionHandoff: 0
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml
index b629912ccb8e1..a3e8153c625dd 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml
@@ -74,6 +74,7 @@ selector:
# TODO Undenylist (SERVER-38852).
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
# serverStatus does not include transaction metrics on mongos.
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
@@ -92,6 +93,9 @@ selector:
# for dbCheck. TODO (SERVER-63951): Remove this exclusion.
- jstests/concurrency/fsm_workloads/create_collection_and_view.js
+ # The test may spuriously fail when run against sharded clusters, due to limitations of the
+ # infrastructure. See SERVER-77039 for full details.
+ - jstests/concurrency/fsm_workloads/map_reduce_drop.js
exclude_with_any_tags:
- requires_replication
@@ -116,6 +120,7 @@ executor:
TestData:
runningWithBalancer: false
hooks:
+ - class: CheckShardFilteringMetadata
# TODO (SERVER-63855): remove 'RunDBCheckInBackground' or put it back.
# - class: RunDBCheckInBackground
- class: CheckReplDBHashInBackground
@@ -123,7 +128,6 @@ executor:
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -140,7 +144,6 @@ executor:
set_parameters:
enableTestCommands: 1
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml
index 29eeda51de542..439067088ec05 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml
@@ -80,6 +80,7 @@ selector:
# TODO Undenylist (SERVER-38852).
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
# serverStatus does not include transaction metrics on mongos.
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
@@ -94,6 +95,13 @@ selector:
# Time-series collections are not supported on mongos.
- jstests/concurrency/fsm_workloads/create_timeseries_collection.js
+ # The test may spuriously fail when run against sharded clusters, due to limitations of the
+ # infrastructure. See SERVER-77039 for full details.
+ - jstests/concurrency/fsm_workloads/map_reduce_drop.js
+
+ # TODO Undenylist (SERVER-71819).
+ - jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js
+
exclude_with_any_tags:
- assumes_balancer_off
- requires_replication
@@ -115,12 +123,12 @@ executor:
TestData:
runningWithBalancer: true
hooks:
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHashInBackground
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -136,7 +144,6 @@ executor:
set_parameters:
enableTestCommands: 1
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml
index a239ce66d5534..3e02ef81ec9ab 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml
@@ -111,6 +111,7 @@ selector:
- jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
- jstests/concurrency/fsm_workloads/indexed_insert_where.js
- jstests/concurrency/fsm_workloads/list_indexes.js
+ - jstests/concurrency/fsm_workloads/query_stats_concurrent.js
- jstests/concurrency/fsm_workloads/reindex.js
- jstests/concurrency/fsm_workloads/reindex_background.js
- jstests/concurrency/fsm_workloads/remove_multiple_documents.js
@@ -139,6 +140,7 @@ selector:
- jstests/concurrency/fsm_workloads/collmod.js
- jstests/concurrency/fsm_workloads/collmod_separate_collections.js
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
- jstests/concurrency/fsm_workloads/invalidated_cursors.js
- jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js
- jstests/concurrency/fsm_workloads/view_catalog.js
@@ -198,11 +200,11 @@ executor:
shard_stepdown: true
use_action_permitted_file: true
terminate: true
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -232,7 +234,6 @@ executor:
enableTestCommands: 1
enableElectionHandoff: 0
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_balancer_and_catalog_shard.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_balancer_and_catalog_shard.yml
deleted file mode 100644
index 6a867b9629a9f..0000000000000
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_balancer_and_catalog_shard.yml
+++ /dev/null
@@ -1,148 +0,0 @@
-test_kind: fsm_workload_test
-
-selector:
- roots:
- - jstests/concurrency/fsm_workloads/**/*.js
- - src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js
- exclude_files:
- # SERVER-13116 distinct isn't sharding aware
- - jstests/concurrency/fsm_workloads/distinct.js
- - jstests/concurrency/fsm_workloads/distinct_noindex.js
- - jstests/concurrency/fsm_workloads/distinct_projection.js
-
- # SERVER-14669 Multi-removes that use $where miscount removed documents
- - jstests/concurrency/fsm_workloads/remove_where.js
-
- # Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
- # collections'. This bug is problematic for these workloads because they assert on count()
- # values:
- - jstests/concurrency/fsm_workloads/agg_match.js
-
- # Disabled due to MongoDB restrictions and/or workload restrictions
-
- # These workloads sometimes trigger 'Could not lock auth data update lock'
- # errors because the AuthorizationManager currently waits for only five
- # seconds to acquire the lock for authorization documents
- - jstests/concurrency/fsm_workloads/auth_create_role.js
- - jstests/concurrency/fsm_workloads/auth_create_user.js
- - jstests/concurrency/fsm_workloads/auth_drop_role.js
- - jstests/concurrency/fsm_workloads/auth_drop_user.js
-
- # uses >100MB of data, which can overwhelm test hosts
- - jstests/concurrency/fsm_workloads/agg_group_external.js
- - jstests/concurrency/fsm_workloads/agg_sort_external.js
-
- # compact can only be run against a standalone mongod
- - jstests/concurrency/fsm_workloads/compact.js
- - jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js
-
- # convertToCapped can't be run on mongos processes
- - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
- - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
-
- # findAndModify requires a shard key
- - jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
- - jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
- - jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
- - jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
- - jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
-
- # remove cannot be {} for findAndModify
- - jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
-
- # can cause OOM kills on test hosts
- - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
-
-
- # cannot createIndex after dropDatabase without sharding first
- - jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
-
- # reIndex is not supported in mongos.
- - jstests/concurrency/fsm_workloads/reindex.js
- - jstests/concurrency/fsm_workloads/reindex_background.js
- - jstests/concurrency/fsm_workloads/reindex_writeconflict.js
-
- # The WTWriteConflictException failpoint is not supported on mongos.
- - jstests/concurrency/fsm_workloads/collmod_writeconflict.js
-
- # our .remove(query, {justOne: true}) calls lack shard keys
- - jstests/concurrency/fsm_workloads/remove_single_document.js
-
- # SERVER-20361 Improve the behaviour of multi-update/delete against a sharded collection
- - jstests/concurrency/fsm_workloads/update_where.js
-
- # cannot use upsert command with $where with sharded collections
- - jstests/concurrency/fsm_workloads/upsert_where.js
-
- # stagedebug can only be run against a standalone mongod
- - jstests/concurrency/fsm_workloads/yield_and_hashed.js
- - jstests/concurrency/fsm_workloads/yield_and_sorted.js
-
- # TODO Undenylist (SERVER-38852).
- - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
-
- # serverStatus does not include transaction metrics on mongos.
- - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
-
- # Uses the same transaction id across different routers, which is not allowed because when either
- # router tries to commit, it may not know the full participant list.
- - jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
-
- # Inserts directly into system.views using applyOps, which is not available on mongos.
- - jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js
-
- # Time-series collections are not supported on mongos.
- - jstests/concurrency/fsm_workloads/create_timeseries_collection.js
-
- exclude_with_any_tags:
- - assumes_balancer_off
- - requires_replication
- # mongos has no system.profile collection.
- - requires_profiling
- - assumes_unsharded_collection
- # TODO SERVER-73279: Remove after branching 7.0 and a catalog shard can be downgraded.
- - multiversion_incompatible
-
-executor:
- archive:
- hooks:
- - CheckReplDBHashInBackground
- - CheckReplDBHash
- # TODO (SERVER-74534): Enable when this will work with co-located configsvr.
- # - CheckMetadataConsistencyInBackground
- - ValidateCollections
- tests: true
- config:
- shell_options:
- global_vars:
- TestData:
- runningWithBalancer: true
- hooks:
- - class: CheckReplDBHashInBackground
- - class: CheckReplDBHash
- # TODO (SERVER-74534): Enable when this will work with co-located configsvr.
- # - class: CheckMetadataConsistencyInBackground
- - class: CheckOrphansDeleted
- - class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- - class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- - class: CleanupConcurrencyWorkloads
- fixture:
- class: ShardedClusterFixture
- catalog_shard: "any"
- mongos_options:
- set_parameters:
- enableTestCommands: 1
- queryAnalysisSamplerConfigurationRefreshSecs: 1
- shard_options:
- mongod_options:
- oplogSize: 1024
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
- queryAnalysisWriterIntervalSecs: 1
- num_rs_nodes_per_shard: 3
- num_shards: 2
- num_mongos: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_balancer_and_config_shard.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_balancer_and_config_shard.yml
new file mode 100644
index 0000000000000..2456f25d9b2a6
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_balancer_and_config_shard.yml
@@ -0,0 +1,147 @@
+test_kind: fsm_workload_test
+
+selector:
+ roots:
+ - jstests/concurrency/fsm_workloads/**/*.js
+ - src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js
+ exclude_files:
+ # SERVER-13116 distinct isn't sharding aware
+ - jstests/concurrency/fsm_workloads/distinct.js
+ - jstests/concurrency/fsm_workloads/distinct_noindex.js
+ - jstests/concurrency/fsm_workloads/distinct_projection.js
+
+ # SERVER-14669 Multi-removes that use $where miscount removed documents
+ - jstests/concurrency/fsm_workloads/remove_where.js
+
+ # Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
+ # collections'. This bug is problematic for these workloads because they assert on count()
+ # values:
+ - jstests/concurrency/fsm_workloads/agg_match.js
+
+ # Disabled due to MongoDB restrictions and/or workload restrictions
+
+ # These workloads sometimes trigger 'Could not lock auth data update lock'
+ # errors because the AuthorizationManager currently waits for only five
+ # seconds to acquire the lock for authorization documents
+ - jstests/concurrency/fsm_workloads/auth_create_role.js
+ - jstests/concurrency/fsm_workloads/auth_create_user.js
+ - jstests/concurrency/fsm_workloads/auth_drop_role.js
+ - jstests/concurrency/fsm_workloads/auth_drop_user.js
+
+ # uses >100MB of data, which can overwhelm test hosts
+ - jstests/concurrency/fsm_workloads/agg_group_external.js
+ - jstests/concurrency/fsm_workloads/agg_sort_external.js
+
+ # compact can only be run against a standalone mongod
+ - jstests/concurrency/fsm_workloads/compact.js
+ - jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js
+
+ # convertToCapped can't be run on mongos processes
+ - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
+ - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
+
+ # findAndModify requires a shard key
+ - jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
+ - jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
+
+ # remove cannot be {} for findAndModify
+ - jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
+
+ # can cause OOM kills on test hosts
+ - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+
+
+ # cannot createIndex after dropDatabase without sharding first
+ - jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
+
+ # reIndex is not supported in mongos.
+ - jstests/concurrency/fsm_workloads/reindex.js
+ - jstests/concurrency/fsm_workloads/reindex_background.js
+ - jstests/concurrency/fsm_workloads/reindex_writeconflict.js
+
+ # The WTWriteConflictException failpoint is not supported on mongos.
+ - jstests/concurrency/fsm_workloads/collmod_writeconflict.js
+
+ # our .remove(query, {justOne: true}) calls lack shard keys
+ - jstests/concurrency/fsm_workloads/remove_single_document.js
+
+ # SERVER-20361 Improve the behaviour of multi-update/delete against a sharded collection
+ - jstests/concurrency/fsm_workloads/update_where.js
+
+ # cannot use upsert command with $where with sharded collections
+ - jstests/concurrency/fsm_workloads/upsert_where.js
+
+ # stagedebug can only be run against a standalone mongod
+ - jstests/concurrency/fsm_workloads/yield_and_hashed.js
+ - jstests/concurrency/fsm_workloads/yield_and_sorted.js
+
+ # TODO Undenylist (SERVER-38852).
+ - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
+
+ # serverStatus does not include transaction metrics on mongos.
+ - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
+
+ # Uses the same transaction id across different routers, which is not allowed because when either
+ # router tries to commit, it may not know the full participant list.
+ - jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
+
+ # Inserts directly into system.views using applyOps, which is not available on mongos.
+ - jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js
+
+ # Time-series collections are not supported on mongos.
+ - jstests/concurrency/fsm_workloads/create_timeseries_collection.js
+
+ # TODO Undenylist (SERVER-71819).
+ - jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js
+
+ exclude_with_any_tags:
+ - assumes_balancer_off
+ - requires_replication
+ # mongos has no system.profile collection.
+ - requires_profiling
+ - assumes_unsharded_collection
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHashInBackground
+ - CheckReplDBHash
+ - CheckMetadataConsistencyInBackground
+ - ValidateCollections
+ tests: true
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ runningWithBalancer: true
+ hooks:
+ - class: CheckShardFilteringMetadata
+ - class: CheckReplDBHashInBackground
+ - class: CheckReplDBHash
+ - class: CheckMetadataConsistencyInBackground
+ - class: CheckOrphansDeleted
+ - class: CheckRoutingTableConsistency
+ - class: ValidateCollections # Validation can interfere with other operations, so this goes last.
+ - class: CleanupConcurrencyWorkloads
+ fixture:
+ class: ShardedClusterFixture
+ config_shard: "any"
+ mongos_options:
+ set_parameters:
+ enableTestCommands: 1
+ queryAnalysisSamplerConfigurationRefreshSecs: 1
+ shard_options:
+ mongod_options:
+ oplogSize: 1024
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ roleGraphInvalidationIsFatal: 1
+ queryAnalysisWriterIntervalSecs: 1
+ num_rs_nodes_per_shard: 3
+ num_shards: 2
+ num_mongos: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_catalog_shard.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_catalog_shard.yml
deleted file mode 100644
index d087bd29819ad..0000000000000
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_catalog_shard.yml
+++ /dev/null
@@ -1,152 +0,0 @@
-test_kind: fsm_workload_test
-
-selector:
- roots:
- - jstests/concurrency/fsm_workloads/**/*.js
- - src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js
- exclude_files:
- # SERVER-13116 distinct isn't sharding aware
- - jstests/concurrency/fsm_workloads/distinct.js
- - jstests/concurrency/fsm_workloads/distinct_noindex.js
- - jstests/concurrency/fsm_workloads/distinct_projection.js
-
- # Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
- # collections'. This bug is problematic for these workloads because they assert on count()
- # values:
- - jstests/concurrency/fsm_workloads/agg_match.js
-
- # Disabled due to MongoDB restrictions and/or workload restrictions
-
- # These workloads sometimes trigger 'Could not lock auth data update lock'
- # errors because the AuthorizationManager currently waits for only five
- # seconds to acquire the lock for authorization documents
- - jstests/concurrency/fsm_workloads/auth_create_role.js
- - jstests/concurrency/fsm_workloads/auth_create_user.js
- - jstests/concurrency/fsm_workloads/auth_drop_role.js
- - jstests/concurrency/fsm_workloads/auth_drop_user.js
-
- # uses >100MB of data, which can overwhelm test hosts
- - jstests/concurrency/fsm_workloads/agg_group_external.js
- - jstests/concurrency/fsm_workloads/agg_sort_external.js
-
- # compact can only be run against a standalone mongod
- - jstests/concurrency/fsm_workloads/compact.js
- - jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js
-
- # convertToCapped can't be run on mongos processes
- - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
- - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
-
- # findAndModify requires a shard key
- - jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
- - jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
- - jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
- - jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
- - jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
-
- # remove cannot be {} for findAndModify
- - jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
-
- # can cause OOM kills on test hosts
- - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
-
-
- # cannot createIndex after dropDatabase without sharding first
- - jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
-
- # reIndex is not supported in mongos.
- - jstests/concurrency/fsm_workloads/reindex.js
- - jstests/concurrency/fsm_workloads/reindex_background.js
- - jstests/concurrency/fsm_workloads/reindex_writeconflict.js
-
- # The WTWriteConflictException failpoint is not supported on mongos.
- - jstests/concurrency/fsm_workloads/collmod_writeconflict.js
-
- # our .remove(query, {justOne: true}) calls lack shard keys
- - jstests/concurrency/fsm_workloads/remove_single_document.js
-
- # cannot use upsert command with $where with sharded collections
- - jstests/concurrency/fsm_workloads/upsert_where.js
-
- # stagedebug can only be run against a standalone mongod
- - jstests/concurrency/fsm_workloads/yield_and_hashed.js
- - jstests/concurrency/fsm_workloads/yield_and_sorted.js
-
- # TODO Undenylist (SERVER-38852).
- - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
-
- # serverStatus does not include transaction metrics on mongos.
- - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
-
- # Uses the same transaction id across different routers, which is not allowed because when either
- # router tries to commit, it may not know the full participant list.
- - jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
-
- # Inserts directly into system.views using applyOps, which is not available on mongos.
- - jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js
-
- # Time-series collections are not supported on mongos.
- - jstests/concurrency/fsm_workloads/create_timeseries_collection.js
-
- # This test concurrently creates views and collections on the same namespace, which causes issues
- # for dbCheck. TODO (SERVER-63951): Remove this exclusion.
- - jstests/concurrency/fsm_workloads/create_collection_and_view.js
-
-
- exclude_with_any_tags:
- - requires_replication
- - assumes_balancer_on
- # mongos has no system.profile collection.
- - requires_profiling
- - assumes_unsharded_collection
- # TODO SERVER-73279: Remove after branching 7.0 and a catalog shard can be downgraded.
- - multiversion_incompatible
-
-executor:
- archive:
- hooks:
- # TODO (SERVER-63855): remove 'RunDBCheckInBackground' or put it back.
- # - RunDBCheckInBackground
- - CheckReplDBHashInBackground
- - CheckReplDBHash
- # TODO (SERVER-74534): Enable when this will work with co-located configsvr.
- # - CheckMetadataConsistencyInBackground
- - ValidateCollections
- tests: true
- config:
- shell_options:
- global_vars:
- TestData:
- runningWithBalancer: false
- hooks:
- # TODO (SERVER-63855): remove 'RunDBCheckInBackground' or put it back.
- # - class: RunDBCheckInBackground
- - class: CheckReplDBHashInBackground
- - class: CheckReplDBHash
- # TODO (SERVER-74534): Enable when this will work with co-located configsvr.
- # - class: CheckMetadataConsistencyInBackground
- - class: CheckOrphansDeleted
- - class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- - class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- - class: CleanupConcurrencyWorkloads
- fixture:
- class: ShardedClusterFixture
- catalog_shard: "any"
- enable_balancer: false
- mongos_options:
- set_parameters:
- enableTestCommands: 1
- queryAnalysisSamplerConfigurationRefreshSecs: 1
- shard_options:
- mongod_options:
- oplogSize: 1024
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
- queryAnalysisWriterIntervalSecs: 1
- num_rs_nodes_per_shard: 3
- num_shards: 2
- num_mongos: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_config_shard.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_config_shard.yml
new file mode 100644
index 0000000000000..fb8c1d97f0637
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_config_shard.yml
@@ -0,0 +1,148 @@
+test_kind: fsm_workload_test
+
+selector:
+ roots:
+ - jstests/concurrency/fsm_workloads/**/*.js
+ - src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js
+ exclude_files:
+ # SERVER-13116 distinct isn't sharding aware
+ - jstests/concurrency/fsm_workloads/distinct.js
+ - jstests/concurrency/fsm_workloads/distinct_noindex.js
+ - jstests/concurrency/fsm_workloads/distinct_projection.js
+
+ # Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded
+ # collections'. This bug is problematic for these workloads because they assert on count()
+ # values:
+ - jstests/concurrency/fsm_workloads/agg_match.js
+
+ # Disabled due to MongoDB restrictions and/or workload restrictions
+
+ # These workloads sometimes trigger 'Could not lock auth data update lock'
+ # errors because the AuthorizationManager currently waits for only five
+ # seconds to acquire the lock for authorization documents
+ - jstests/concurrency/fsm_workloads/auth_create_role.js
+ - jstests/concurrency/fsm_workloads/auth_create_user.js
+ - jstests/concurrency/fsm_workloads/auth_drop_role.js
+ - jstests/concurrency/fsm_workloads/auth_drop_user.js
+
+ # uses >100MB of data, which can overwhelm test hosts
+ - jstests/concurrency/fsm_workloads/agg_group_external.js
+ - jstests/concurrency/fsm_workloads/agg_sort_external.js
+
+ # compact can only be run against a standalone mongod
+ - jstests/concurrency/fsm_workloads/compact.js
+ - jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js
+
+ # convertToCapped can't be run on mongos processes
+ - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
+ - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
+
+ # findAndModify requires a shard key
+ - jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
+ - jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
+ - jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
+
+ # remove cannot be {} for findAndModify
+ - jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
+
+ # can cause OOM kills on test hosts
+ - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+
+
+ # cannot createIndex after dropDatabase without sharding first
+ - jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
+
+ # reIndex is not supported in mongos.
+ - jstests/concurrency/fsm_workloads/reindex.js
+ - jstests/concurrency/fsm_workloads/reindex_background.js
+ - jstests/concurrency/fsm_workloads/reindex_writeconflict.js
+
+ # The WTWriteConflictException failpoint is not supported on mongos.
+ - jstests/concurrency/fsm_workloads/collmod_writeconflict.js
+
+ # our .remove(query, {justOne: true}) calls lack shard keys
+ - jstests/concurrency/fsm_workloads/remove_single_document.js
+
+ # cannot use upsert command with $where with sharded collections
+ - jstests/concurrency/fsm_workloads/upsert_where.js
+
+ # stagedebug can only be run against a standalone mongod
+ - jstests/concurrency/fsm_workloads/yield_and_hashed.js
+ - jstests/concurrency/fsm_workloads/yield_and_sorted.js
+
+ # TODO Undenylist (SERVER-38852).
+ - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
+
+ # serverStatus does not include transaction metrics on mongos.
+ - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
+
+ # Uses the same transaction id across different routers, which is not allowed because when either
+ # router tries to commit, it may not know the full participant list.
+ - jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
+
+ # Inserts directly into system.views using applyOps, which is not available on mongos.
+ - jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js
+
+ # Time-series collections are not supported on mongos.
+ - jstests/concurrency/fsm_workloads/create_timeseries_collection.js
+
+ # This test concurrently creates views and collections on the same namespace, which causes issues
+ # for dbCheck. TODO (SERVER-63951): Remove this exclusion.
+ - jstests/concurrency/fsm_workloads/create_collection_and_view.js
+
+
+ exclude_with_any_tags:
+ - requires_replication
+ - assumes_balancer_on
+ # mongos has no system.profile collection.
+ - requires_profiling
+ - assumes_unsharded_collection
+
+executor:
+ archive:
+ hooks:
+ # TODO (SERVER-63855): remove 'RunDBCheckInBackground' or put it back.
+ # - RunDBCheckInBackground
+ - CheckReplDBHashInBackground
+ - CheckReplDBHash
+ - CheckMetadataConsistencyInBackground
+ - ValidateCollections
+ tests: true
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ runningWithBalancer: false
+ hooks:
+ - class: CheckShardFilteringMetadata
+ # TODO (SERVER-63855): remove 'RunDBCheckInBackground' or put it back.
+ # - class: RunDBCheckInBackground
+ - class: CheckReplDBHashInBackground
+ - class: CheckReplDBHash
+ - class: CheckMetadataConsistencyInBackground
+ - class: CheckOrphansDeleted
+ - class: CheckRoutingTableConsistency
+ - class: ValidateCollections # Validation can interfere with other operations, so this goes last.
+ - class: CleanupConcurrencyWorkloads
+ fixture:
+ class: ShardedClusterFixture
+ config_shard: "any"
+ enable_balancer: false
+ mongos_options:
+ set_parameters:
+ enableTestCommands: 1
+ queryAnalysisSamplerConfigurationRefreshSecs: 1
+ shard_options:
+ mongod_options:
+ oplogSize: 1024
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ roleGraphInvalidationIsFatal: 1
+ queryAnalysisWriterIntervalSecs: 1
+ num_rs_nodes_per_shard: 3
+ num_shards: 2
+ num_mongos: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml
index 060071fed1c24..6579a095d8654 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml
@@ -97,6 +97,7 @@ selector:
- jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
- jstests/concurrency/fsm_workloads/indexed_insert_where.js
- jstests/concurrency/fsm_workloads/list_indexes.js
+ - jstests/concurrency/fsm_workloads/query_stats_concurrent.js
- jstests/concurrency/fsm_workloads/reindex.js
- jstests/concurrency/fsm_workloads/reindex_background.js
- jstests/concurrency/fsm_workloads/reindex_writeconflict.js
@@ -128,6 +129,7 @@ selector:
- jstests/concurrency/fsm_workloads/collmod_separate_collections.js
- jstests/concurrency/fsm_workloads/collmod_writeconflict.js
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
- jstests/concurrency/fsm_workloads/invalidated_cursors.js
- jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js
- jstests/concurrency/fsm_workloads/view_catalog.js
@@ -154,6 +156,10 @@ selector:
# Time-series collections are not supported on mongos.
- jstests/concurrency/fsm_workloads/create_timeseries_collection.js
+ # The test may spuriously fail when run against sharded clusters, due to limitations of the
+ # infrastructure. See SERVER-77039 for full details.
+ - jstests/concurrency/fsm_workloads/map_reduce_drop.js
+
exclude_with_any_tags:
- requires_replication
- requires_non_retryable_writes
@@ -185,11 +191,11 @@ executor:
config_stepdown: true
shard_stepdown: true
use_action_permitted_file: true
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -220,7 +226,6 @@ executor:
enableTestCommands: 1
enableElectionHandoff: 0
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml
index 027380c37b08b..0d869984fc430 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml
@@ -103,6 +103,7 @@ selector:
- jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
- jstests/concurrency/fsm_workloads/indexed_insert_where.js
- jstests/concurrency/fsm_workloads/list_indexes.js
+ - jstests/concurrency/fsm_workloads/query_stats_concurrent.js
- jstests/concurrency/fsm_workloads/reindex.js
- jstests/concurrency/fsm_workloads/reindex_background.js
- jstests/concurrency/fsm_workloads/reindex_writeconflict.js
@@ -133,6 +134,7 @@ selector:
- jstests/concurrency/fsm_workloads/collmod_separate_collections.js
- jstests/concurrency/fsm_workloads/collmod_writeconflict.js
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+ - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
- jstests/concurrency/fsm_workloads/invalidated_cursors.js
- jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js
- jstests/concurrency/fsm_workloads/view_catalog.js
@@ -159,6 +161,10 @@ selector:
# Time-series collections are not supported on mongos.
- jstests/concurrency/fsm_workloads/create_timeseries_collection.js
+ # The test may spuriously fail when run against sharded clusters, due to limitations of the
+ # infrastructure. See SERVER-77039 for full details.
+ - jstests/concurrency/fsm_workloads/map_reduce_drop.js
+
exclude_with_any_tags:
- assumes_balancer_off
- requires_replication
@@ -190,11 +196,11 @@ executor:
config_stepdown: true
shard_stepdown: true
use_action_permitted_file: true
+ - class: CheckShardFilteringMetadata
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: CheckOrphansDeleted
- class: CheckRoutingTableConsistency
- - class: CheckShardFilteringMetadata
- class: ValidateCollections # Validation can interfere with other operations, so this goes last.
- class: CleanupConcurrencyWorkloads
fixture:
@@ -224,7 +230,6 @@ executor:
enableTestCommands: 1
enableElectionHandoff: 0
roleGraphInvalidationIsFatal: 1
- receiveChunkWaitForRangeDeleterTimeoutMS: 90000
queryAnalysisWriterIntervalSecs: 1
num_rs_nodes_per_shard: 3
num_shards: 2
diff --git a/buildscripts/resmokeconfig/suites/concurrency_simultaneous.yml b/buildscripts/resmokeconfig/suites/concurrency_simultaneous.yml
index 379505a99ea12..f5044cb0c3fbf 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_simultaneous.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_simultaneous.yml
@@ -69,6 +69,6 @@ executor:
# system running out of WiredTiger write tickets. We intentionally lower the number of
# WiredTiger write tickets available to below the maximum number of database clients to
# trigger this situation at least some of the time.
- storageEngineConcurrencyAdjustmentAlgorithm: ""
+ storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions"
wiredTigerConcurrentWriteTransactions: 64
roleGraphInvalidationIsFatal: 1
diff --git a/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication.yml b/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication.yml
index 7affb118495ce..c711806ffa9c3 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication.yml
@@ -94,7 +94,7 @@ executor:
# system running out of WiredTiger write tickets. We intentionally lower the number of
# WiredTiger write tickets available to below the maximum number of database clients to
# trigger this situation at least some of the time.
- storageEngineConcurrencyAdjustmentAlgorithm: ""
+ storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions"
wiredTigerConcurrentWriteTransactions: 64
roleGraphInvalidationIsFatal: 1
num_nodes: 3
diff --git a/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_cursor_sweeps.yml b/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_cursor_sweeps.yml
index 7fb977e6296f3..9853e9f22b632 100644
--- a/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_cursor_sweeps.yml
+++ b/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_cursor_sweeps.yml
@@ -97,7 +97,7 @@ executor:
# system running out of WiredTiger write tickets. We intentionally lower the number of
# WiredTiger write tickets available to below the maximum number of database clients to
# trigger this situation at least some of the time.
- storageEngineConcurrencyAdjustmentAlgorithm: ""
+ storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions"
wiredTigerConcurrentWriteTransactions: 64
# Setting this parameter to "1" disables cursor caching in WiredTiger, and sets the cache
# size to "1" in MongoDB. This forces all resources to be released when done.
diff --git a/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_eviction_debug.yml b/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_eviction_debug.yml
deleted file mode 100644
index 12d9de079df3c..0000000000000
--- a/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_eviction_debug.yml
+++ /dev/null
@@ -1,99 +0,0 @@
-test_kind: parallel_fsm_workload_test
-
-selector:
- roots:
- - jstests/concurrency/fsm_workloads/**/*.js
- exclude_files:
- # These workloads implicitly assume that their tid ranges are [0, $config.threadCount). This
- # isn't guaranteed to be true when they are run in parallel with other workloads.
- - jstests/concurrency/fsm_workloads/findAndModify_inc_pipeline.js
- - jstests/concurrency/fsm_workloads/list_indexes.js
- - jstests/concurrency/fsm_workloads/secondary_reads.js
- - jstests/concurrency/fsm_workloads/update_inc_capped.js
- - jstests/concurrency/fsm_workloads/update_inc_pipeline.js
- # These workloads implicitly assume that their tid ranges are [0, $config.threadCount), as above,
- # but additionally require multiple threads to run, which also isn't guaranteed here.
- - jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js
- - jstests/concurrency/fsm_workloads/create_index_background_wildcard.js
- - jstests/concurrency/fsm_workloads/create_index_background.js
-
- # These workloads uses >100MB of data, which can overwhelm test hosts.
- - jstests/concurrency/fsm_workloads/agg_group_external.js
- - jstests/concurrency/fsm_workloads/agg_sort_external.js
- # The findAndModify_update_grow.js workload can cause OOM kills on test hosts.
- - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
-
- # convertToCapped requires a global lock and any background operations on the database causes it
- # to fail due to not finishing quickly enough.
- - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
- - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
-
- # This workload kills random cursors which takes a collection lock.
- # TODO: SERVER-39939.
- - jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
-
- # This workload may restart running transactions on a different client, causing deadlock if
- # there is a concurrent dropDatabase waiting for the global X lock.
- # TODO: SERVER-37876
- - jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
-
- # This workload assumes no locks are taken outside of the workload.
- - jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js
-
- # SERVER-43053 These workloads set a failpoint that causes intermittent WriteConflict errors,
- # which presently can cause other simultaneous workloads to fail.
- - jstests/concurrency/fsm_workloads/collmod_writeconflict.js
-
- # These workloads run the compact command, which takes the checkpoint mutex, thus slowing
- # checkpoints. This suite also makes checkpoints slower, and the combination can result in
- # timeouts.
- - jstests/concurrency/fsm_workloads/compact.js
- - jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js
- - jstests/concurrency/fsm_workloads/reindex_writeconflict.js
-
- # These workloads run the reIndex command, which is only allowed on a standalone node.
- - jstests/concurrency/fsm_workloads/reindex.js
- - jstests/concurrency/fsm_workloads/reindex_background.js
-
- exclude_with_any_tags:
- - requires_sharding
- - kills_random_sessions
- - incompatible_with_concurrency_simultaneous
-
- group_size: 10
- group_count_multiplier: 1.0
-
-executor:
- archive:
- hooks:
- - CheckReplDBHashInBackground
- - CheckReplDBHash
- - ValidateCollections
- tests: true
- config:
- shell_options:
- global_vars:
- TestData:
- skipDropDatabaseOnDatabaseDropPending: true
- setShellParameter: skipShellCursorFinalize=true
- hooks:
- # The CheckReplDBHash hook waits until all operations have replicated to and have been applied
- # on the secondaries, so we run the ValidateCollections hook after it to ensure we're
- # validating the entire contents of the collection.
- - class: CheckReplDBHashInBackground
- - class: CheckReplDBHash
- - class: ValidateCollections
- - class: CleanupConcurrencyWorkloads
- fixture:
- class: ReplicaSetFixture
- mongod_options:
- oplogSize: 1024
- set_parameters:
- # Increase the timeout of the cursor so that the cursor will continue to stay alive even
- # when there is a delay in lock acquisition during a getMore command.
- cursorTimeoutMillis: 3600000
- enableTestCommands: 1
- # Enable aggressive WiredTiger eviction.
- wiredTigerEvictionDebugMode: true
- roleGraphInvalidationIsFatal: 1
- num_nodes: 3
diff --git a/buildscripts/resmokeconfig/suites/core.yml b/buildscripts/resmokeconfig/suites/core.yml
index 23a08a1b47dae..23f5f30a7ae23 100644
--- a/buildscripts/resmokeconfig/suites/core.yml
+++ b/buildscripts/resmokeconfig/suites/core.yml
@@ -8,6 +8,8 @@ selector:
# Transactions are not supported on MongoDB standalone nodes, so we do not run these tests in the
# 'core' suite. Instead we run them against a 1-node replica set in the 'core_txns' suite.
- jstests/core/txns/**/*.js
+ # Queryable encryption is not supported on standalone
+ - jstests/core/queryable_encryption/**/*.js
executor:
archive:
hooks:
diff --git a/buildscripts/resmokeconfig/suites/core_auth.yml b/buildscripts/resmokeconfig/suites/core_auth.yml
index f416c77fa0b5f..5359129be969a 100644
--- a/buildscripts/resmokeconfig/suites/core_auth.yml
+++ b/buildscripts/resmokeconfig/suites/core_auth.yml
@@ -20,6 +20,8 @@ selector:
- jstests/core/**/*[aA]uth*.js
# Commands using UUIDs are not compatible with name-based auth
- jstests/core/**/commands_with_uuid.js
+ # Queryable encryption is not supported on standalone
+ - jstests/core/queryable_encryption/**/*.js
exclude_with_any_tags:
# Multiple users cannot be authenticated on one connection within a session.
- creates_and_authenticates_user
diff --git a/buildscripts/resmokeconfig/suites/core_column_store_indexes.yml b/buildscripts/resmokeconfig/suites/core_column_store_indexes.yml
index ccef09e4a248e..b5fba6c7f39a4 100644
--- a/buildscripts/resmokeconfig/suites/core_column_store_indexes.yml
+++ b/buildscripts/resmokeconfig/suites/core_column_store_indexes.yml
@@ -29,7 +29,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load("jstests/libs/override_methods/hide_column_store_indexes_from_get_indexes.js");
hooks:
- class: ValidateCollections
diff --git a/buildscripts/resmokeconfig/suites/core_ese.yml b/buildscripts/resmokeconfig/suites/core_ese.yml
index 355dba8f6b6b9..114d6e6a7e712 100644
--- a/buildscripts/resmokeconfig/suites/core_ese.yml
+++ b/buildscripts/resmokeconfig/suites/core_ese.yml
@@ -10,6 +10,9 @@ selector:
exclude_files:
# Transactions are not supported on MongoDB standalone nodes.
- jstests/core/txns/**/*.js
+ # Queryable encryption is not supported on standalone
+ - jstests/core/queryable_encryption/**/*.js
+
exclude_with_any_tags:
- does_not_support_encrypted_storage_engine
executor:
diff --git a/buildscripts/resmokeconfig/suites/core_txns.yml b/buildscripts/resmokeconfig/suites/core_txns.yml
index c396fd7027b2b..a6d73f5785f0c 100644
--- a/buildscripts/resmokeconfig/suites/core_txns.yml
+++ b/buildscripts/resmokeconfig/suites/core_txns.yml
@@ -19,7 +19,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: "testingReplication = true;"
+ eval: "globalThis.testingReplication = true;"
hooks:
# We don't execute dbHash or oplog consistency checks since there is only a single replica set
# node.
diff --git a/buildscripts/resmokeconfig/suites/core_txns_large_txns_format.yml b/buildscripts/resmokeconfig/suites/core_txns_large_txns_format.yml
index a0121a51f3501..e3d5a3be14a3d 100644
--- a/buildscripts/resmokeconfig/suites/core_txns_large_txns_format.yml
+++ b/buildscripts/resmokeconfig/suites/core_txns_large_txns_format.yml
@@ -20,7 +20,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: "testingReplication = true;"
+ eval: "globalThis.testingReplication = true;"
hooks:
# We don't execute dbHash or oplog consistency checks since there is only a single replica set
# node.
diff --git a/buildscripts/resmokeconfig/suites/core_wildcard_indexes.yml b/buildscripts/resmokeconfig/suites/core_wildcard_indexes.yml
index f13dc111743fa..29342de466d1a 100644
--- a/buildscripts/resmokeconfig/suites/core_wildcard_indexes.yml
+++ b/buildscripts/resmokeconfig/suites/core_wildcard_indexes.yml
@@ -30,6 +30,9 @@ selector:
- jstests/core/index/wildcard/compound_wildcard_index_hint.js
# This test expects a certain number of indexes at the start.
- jstests/core/administrative/check_shard_index.js
+ # This test checks explain, and creating an implicit index results in the following failure:
+ # "IndexOptionsConflict" that generates an unexpected plan.
+ - jstests/core/query/or_use_clustered_collection.js
# Latency histogram statistics are affected by the creation of an implicit index.
- jstests/core/top.js
# Creating an implicit index results in the following failure: "add index fails, too many indexes
@@ -58,7 +61,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load("jstests/libs/override_methods/implicit_wildcard_indexes.js");
hooks:
- class: ValidateCollections
diff --git a/buildscripts/resmokeconfig/suites/cqf.yml b/buildscripts/resmokeconfig/suites/cqf.yml
index 67a5281be5c1e..71718dc170413 100644
--- a/buildscripts/resmokeconfig/suites/cqf.yml
+++ b/buildscripts/resmokeconfig/suites/cqf.yml
@@ -30,4 +30,5 @@ executor:
set_parameters:
enableTestCommands: 1
featureFlagCommonQueryFramework: true
+ internalQueryCardinalityEstimatorMode: "sampling"
internalQueryFrameworkControl: "forceBonsai"
diff --git a/buildscripts/resmokeconfig/suites/cqf_disabled_pipeline_opt.yml b/buildscripts/resmokeconfig/suites/cqf_disabled_pipeline_opt.yml
index 3dc5fbb67ee72..df27b8b9511ae 100644
--- a/buildscripts/resmokeconfig/suites/cqf_disabled_pipeline_opt.yml
+++ b/buildscripts/resmokeconfig/suites/cqf_disabled_pipeline_opt.yml
@@ -33,7 +33,7 @@ executor:
set_parameters:
enableTestCommands: 1
featureFlagCommonQueryFramework: true
- # This flag disables the fallback path that may hide bugs in CQF.
+ internalQueryCardinalityEstimatorMode: "sampling"
internalQueryFrameworkControl: "forceBonsai"
failpoint.disablePipelineOptimization:
mode: alwaysOn
diff --git a/buildscripts/resmokeconfig/suites/cqf_experimental_aggregation_passthrough.yml b/buildscripts/resmokeconfig/suites/cqf_experimental_aggregation_passthrough.yml
new file mode 100644
index 0000000000000..df0e8f7d7c9b6
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/cqf_experimental_aggregation_passthrough.yml
@@ -0,0 +1,32 @@
+# This is equivalent to the aggregation suite, but runs with experimental CQF features enabled.
+
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/aggregation/**/*.js
+ exclude_files:
+ - jstests/aggregation/extras/*.js
+ - jstests/aggregation/data/*.js
+ exclude_with_any_tags:
+ - cqf_experimental_incompatible
+ - cqf_incompatible
+
+executor:
+ archive:
+ hooks:
+ - ValidateCollections
+ config:
+ shell_options:
+ eval: |
+ load("jstests/libs/override_methods/detect_spawning_own_mongod.js");
+ load("jstests/libs/set_try_bonsai_experimental.js");
+ hooks:
+ - class: ValidateCollections
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/cqf_experimental_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/cqf_experimental_jscore_passthrough.yml
new file mode 100644
index 0000000000000..538506c7ab296
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/cqf_experimental_jscore_passthrough.yml
@@ -0,0 +1,42 @@
+# This is equivalent to the core suite, but runs with experimental CQF features enabled.
+
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/core/**/*.js
+ - jstests/core_standalone/**/*.js
+ exclude_files:
+ # Transactions are not supported on MongoDB standalone nodes, so we do not run these tests in the
+ # 'core' suite. Instead we run them against a 1-node replica set in the 'core_txns' suite.
+ - jstests/core/txns/**/*.js
+ # Queryable encryption is not supported on standalone
+ - jstests/core/queryable_encryption/**/*.js
+ exclude_with_any_tags:
+ - cqf_experimental_incompatible
+ - cqf_incompatible
+
+executor:
+ archive:
+ hooks:
+ - ValidateCollections
+ config:
+ shell_options:
+ crashOnInvalidBSONError: ""
+ objcheck: ""
+ eval: |
+ load("jstests/libs/override_methods/detect_spawning_own_mongod.js");
+ load("jstests/libs/set_try_bonsai_experimental.js");
+ hooks:
+ - class: ValidateCollections
+ shell_options:
+ global_vars:
+ TestData:
+ skipValidationOnNamespaceNotFound: false
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/cqf_experimental_no_passthrough.yml b/buildscripts/resmokeconfig/suites/cqf_experimental_no_passthrough.yml
new file mode 100644
index 0000000000000..1b1aa6785d6b2
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/cqf_experimental_no_passthrough.yml
@@ -0,0 +1,33 @@
+# This is equivalent to the noPassthrough suite, but runs with experimental CQF features enabled.
+
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/noPassthrough/**/*.js
+ - src/mongo/db/modules/*/jstests/hot_backups/*.js
+ - src/mongo/db/modules/*/jstests/live_import/*.js
+ - src/mongo/db/modules/*/jstests/no_passthrough/*.js
+
+ # Self-tests for the Concurrency testing framework are run as part of this test suite.
+ - jstests/concurrency/*.js
+ exclude_files:
+ - jstests/noPassthrough/libs/*.js
+ # Disable inmem_full as per SERVER-27014
+ - jstests/noPassthrough/inmem_full.js
+ exclude_with_any_tags:
+ - cqf_experimental_incompatible
+ - cqf_incompatible
+
+# noPassthrough tests start their own mongod's.
+executor:
+ archive:
+ tests:
+ - jstests/noPassthrough/backup*.js
+ - jstests/noPassthrough/oplog_writes_only_permitted_on_standalone.js
+ - jstests/noPassthrough/wt_unclean_shutdown.js
+ - src/mongo/db/modules/enterprise/jstests/hot_backups/*.js
+ config:
+ shell_options:
+ nodb: ''
+ eval: load("jstests/libs/set_try_bonsai_experimental.js");
diff --git a/buildscripts/resmokeconfig/suites/cqf_parallel.yml b/buildscripts/resmokeconfig/suites/cqf_parallel.yml
index 9c8456853c6b9..bed4f702a8359 100644
--- a/buildscripts/resmokeconfig/suites/cqf_parallel.yml
+++ b/buildscripts/resmokeconfig/suites/cqf_parallel.yml
@@ -29,8 +29,9 @@ executor:
set_parameters:
enableTestCommands: 1
featureFlagCommonQueryFramework: true
+ internalQueryCardinalityEstimatorMode: "sampling"
internalQueryFrameworkControl: "forceBonsai"
internalQueryDefaultDOP: 5
# TODO: SERVER-75423: Allow exchange to work independently on the storage concurrency settings.
- storageEngineConcurrencyAdjustmentAlgorithm: ""
+ storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions"
storageEngineConcurrentReadTransactions: 128
diff --git a/buildscripts/resmokeconfig/suites/cqf_passthrough.yml b/buildscripts/resmokeconfig/suites/cqf_passthrough.yml
deleted file mode 100644
index 3443f8a7522c5..0000000000000
--- a/buildscripts/resmokeconfig/suites/cqf_passthrough.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/aggregation/**/*.js
- - jstests/core/**/*.js
- exclude_with_any_tags:
- - cqf_incompatible
- exclude_files:
- # Transactions are not supported on MongoDB standalone nodes, so we do not run these tests in the
- # 'core' suite. Instead we run them against a 1-node replica set in the 'core_txns' suite.
- - jstests/core/txns/**/*.js
-
-executor:
- archive:
- hooks:
- - ValidateCollections
- config:
- shell_options:
- crashOnInvalidBSONError: ""
- objcheck: ""
- eval: load("jstests/libs/override_methods/detect_spawning_own_mongod.js");
- hooks:
- - class: ValidateCollections
- shell_options:
- global_vars:
- TestData:
- skipValidationOnNamespaceNotFound: false
- - class: CleanEveryN
- n: 20
- fixture:
- class: MongoDFixture
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- featureFlagCommonQueryFramework: true
- internalQueryFrameworkControl: "tryBonsai"
diff --git a/buildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml
index 0bb4882ae5cab..3e91db56e7484 100755
--- a/buildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml
@@ -14,6 +14,9 @@ selector:
# 'core' suite. Instead we run them against a 1-node replica set in the 'core_txns' suite.
- jstests/core/txns/**/*.js
+ # Queryable encryption is not supported on standalone
+ - jstests/core/queryable_encryption/**/*.js
+
# These tests produce different error codes depending on which parser implementation.
- jstests/core/**/sort_with_meta_operator.js
diff --git a/buildscripts/resmokeconfig/suites/cwrwc_passthrough.yml b/buildscripts/resmokeconfig/suites/cwrwc_passthrough.yml
index fc20fd90aef37..aa2c3ea414223 100644
--- a/buildscripts/resmokeconfig/suites/cwrwc_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/cwrwc_passthrough.yml
@@ -18,7 +18,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
hooks:
- class: ValidateCollections
- class: CleanEveryN
diff --git a/buildscripts/resmokeconfig/suites/cwrwc_rc_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/cwrwc_rc_majority_passthrough.yml
index a75f65281ea43..0c80e5610749e 100644
--- a/buildscripts/resmokeconfig/suites/cwrwc_rc_majority_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/cwrwc_rc_majority_passthrough.yml
@@ -36,7 +36,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
# on the secondaries, so we run the ValidateCollections hook after it to ensure we're
diff --git a/buildscripts/resmokeconfig/suites/cwrwc_wc_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/cwrwc_wc_majority_passthrough.yml
index 5941432e45f93..8eadccbde7c18 100644
--- a/buildscripts/resmokeconfig/suites/cwrwc_wc_majority_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/cwrwc_wc_majority_passthrough.yml
@@ -46,7 +46,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_preference_secondary.js');
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
diff --git a/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_replica_sets_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_replica_sets_jscore_passthrough.yml
new file mode 100644
index 0000000000000..435ba271cef78
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_replica_sets_jscore_passthrough.yml
@@ -0,0 +1,116 @@
+test_kind: js_test
+
+# Cloned from buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml
+# to run FCV upgrade downgrade in the background
+
+selector:
+ roots:
+ - jstests/core/**/*.js
+ - jstests/fle2/**/*.js
+ - jstests/aggregation/**/*.js
+ - src/mongo/db/modules/*/jstests/fle/**/*.js
+ exclude_with_any_tags:
+ - assumes_standalone_mongod
+ # columnstore indexes are under development and cannot be used without enabling the feature flag
+ - featureFlagColumnstoreIndexes
+ # TODO SERVER-68303 Remove this tag.
+ - featureFlagCompoundWildcardIndexes
+ # TODO SERVER-52419 Remove this tag.
+ - featureFlagBulkWriteCommand
+ - featureFlagFLE2CleanupCommand
+ # Transactions are aborted upon fcv upgrade or downgrade.
+ - uses_transactions
+ # Exclude tests with the latest fcv.
+ - requires_fcv_71
+
+ exclude_files:
+ # The set_param1.js test attempts to compare the response from running the {getParameter: "*"}
+ # command multiple times, which may observe the change to the "transactionLifetimeLimitSeconds"
+ # server parameter.
+ - jstests/core/**/set_param1.js
+ # Different explain format
+ - jstests/core/**/or_to_in.js
+ # SERVER-34772 Tailable Cursors are not allowed with snapshot readconcern.
+ - jstests/core/**/awaitdata_getmore_cmd.js
+ - jstests/core/**/getmore_cmd_maxtimems.js
+ - jstests/core/**/tailable_cursor_invalidation.js
+ - jstests/core/**/tailable_getmore_batch_size.js
+
+ # TODO (SERVER-78220): Investigate failing api version tests in the fcv jscore passthrough suite.
+ - jstests/core/api/api_version_unstable_indexes.js
+
+ # TODO (SERVER-78202): Investigate failing timeseries tests in the fcv jscore passthrough suite.
+ - jstests/core/timeseries/timeseries_update.js
+ - jstests/core/timeseries/timeseries_update_concurrent.js
+ - jstests/core/timeseries/timeseries_update_one.js
+ - jstests/core/timeseries/timeseries_update_multi.js
+ - jstests/core/timeseries/timeseries_find_and_modify_update.js
+ - jstests/core/timeseries/timeseries_delete_compressed_buckets.js
+ - jstests/core/timeseries/timeseries_bucket_limit_count.js
+
+ # These use "columnstore indexes are under development and cannot be used without enabling the feature flag"
+ - jstests/core/query/null_query_semantics.js
+ - jstests/core/query/project/projection_semantics.js
+ - jstests/core/index/hidden_index.js
+
+ # TODO: Remove after fixing SERVER-78201: the following received command without explicit readConcern.
+ - jstests/aggregation/sources/densify/internal_parse.js
+ - jstests/aggregation/api_version_stage_allowance_checks.js
+
+ # TODO (SERVER-78200): The tests below sometimes hang when they run concurrently with a setFCV command.
+ - src/mongo/db/modules/enterprise/jstests/fle/fle_admin_e2e.js
+ - src/mongo/db/modules/enterprise/jstests/fle/fle_implicit_encryption.js
+ - src/mongo/db/modules/enterprise/jstests/fle/fle_use_cases.js
+ - src/mongo/db/modules/enterprise/jstests/fle/fle_drivers_integration.js
+
+ # The test uses a resumeToken from previous calls so FCV change will make the token invalid and fail.
+ - jstests/core/resume_query_from_non_existent_record.js
+ - jstests/core/resume_query.js
+ - jstests/core/find_with_resume_after_param.js
+
+ # TODO (SERVER-78417): Mongod invariant while running bucket_timestamp_rounding.js with new fcv upgrade downgrade suite.
+ - jstests/core/timeseries/bucket_timestamp_rounding.js
+ - jstests/core/timeseries/timeseries_filter_extended_range.js
+
+ # Expected failure due to command count being not precise due to potential retry of index build.
+ - jstests/core/operation_latency_histogram.js
+
+ # Expected failures due to unexpected query execution stats from restarted operations from fcv upgrade.
+ - jstests/aggregation/sources/lookup/lookup_query_stats.js
+ - jstests/aggregation/sources/facet/facet_stats.js
+ - jstests/aggregation/sources/unionWith/unionWith_query_stats.js
+
+ # The tests below use transactions (which are aborted on fcv upgrade/downgrade) and thus are expected to fail.
+ - jstests/core/role_management_helpers.js
+ - jstests/core/roles_info.js
+ - jstests/core/views/views_all_commands.js
+
+ # Queryable encryption uses internal transactions (which are aborted on fcv upgrade/downgrade)
+ - jstests/core/queryable_encryption/**/*.js
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHash
+ - CheckReplOplogs
+ - ValidateCollections
+ - FCVUpgradeDowngradeInBackground
+ config:
+ shell_options:
+ eval: >-
+ globalThis.testingReplication = true;
+ load("jstests/libs/override_methods/retry_aborted_db_and_index_creation.js");
+ hooks:
+ - class: CheckReplOplogs
+ - class: CheckReplDBHash
+ - class: ValidateCollections
+ - class: FCVUpgradeDowngradeInBackground
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: ReplicaSetFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ disableTransitionFromLatestToLastContinuous: False
+ num_nodes: 2
diff --git a/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_sharded_collections_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_sharded_collections_jscore_passthrough.yml
new file mode 100644
index 0000000000000..33ace6259369d
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_sharded_collections_jscore_passthrough.yml
@@ -0,0 +1,206 @@
+test_kind: js_test
+
+# Cloned from buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
+# to run FCV upgrade downgrade in the background
+
+selector:
+ roots:
+ - jstests/core/**/*.js
+ - jstests/fle2/**/*.js
+ - jstests/aggregation/**/*.js
+ - src/mongo/db/modules/*/jstests/fle/**/*.js
+
+ exclude_files:
+ # The following tests fail because a certain command or functionality is not supported on
+ # mongos. This command or functionality is placed in a comment next to the failing test.
+ - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine.
+ - jstests/core/**/awaitdata_getmore_cmd.js # capped collections.
+ - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted
+ - jstests/core/**/check_shard_index.js # checkShardingIndex.
+ - jstests/core/**/collection_truncate.js # emptycapped.
+ - jstests/core/**/compact_keeps_indexes.js # compact.
+ - jstests/core/**/currentop.js # uses fsync.
+ - jstests/core/**/dbhash.js # dbhash.
+ - jstests/core/**/dbhash2.js # dbhash.
+ - jstests/core/**/fsync.js # uses fsync.
+ - jstests/core/**/geo_s2cursorlimitskip.js # profiling.
+ - jstests/core/**/geo_update_btree2.js # notablescan.
+ - jstests/core/**/index9.js # "local" database.
+ - jstests/core/**/queryoptimizera.js # "local" database.
+ - jstests/core/**/stages*.js # stageDebug.
+ - jstests/core/**/startup_log.js # "local" database.
+ - jstests/core/**/tailable_cursor_invalidation.js # capped collections.
+ - jstests/core/**/tailable_getmore_batch_size.js # capped collections.
+ - jstests/core/**/tailable_skip_limit.js # capped collections.
+ - jstests/core/**/top.js # top.
+ # The following tests fail because mongos behaves differently from mongod when testing certain
+ # functionality. The differences are in a comment next to the failing test.
+ - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047.
+ - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain().
+ - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain().
+ - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate().
+ - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880.
+ # The following tests fail because they count indexes. These counts do not take into account the
+ # additional hashed shard key indexes that are automatically added by this passthrough.
+ - jstests/core/**/apitest_dbcollection.js
+ - jstests/core/**/bad_index_plugin.js
+ - jstests/core/**/create_indexes.js
+ - jstests/core/**/list_indexes_non_existent_ns.js
+ - jstests/core/**/mr_preserve_indexes.js
+ # The following tests fail because they expect no databases to be created. However a DB is created
+ # automatically when we shard a collection.
+ - jstests/core/**/dbcase.js
+ - jstests/core/**/dbcase2.js
+ - jstests/core/**/no_db_created.js
+ - jstests/core/**/killop_drop_collection.js # Uses fsyncLock.
+ # These tests fail because sharded clusters do not clean up correctly after failed index builds.
+ # See SERVER-33207 as an example.
+ - jstests/core/**/geo_borders.js
+ # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded
+ # queries with a limit or for distinct commands.
+ - jstests/core/**/distinct_index1.js
+ - jstests/core/**/explain1.js
+ - jstests/core/**/explain4.js
+ - jstests/core/**/sortk.js
+ # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is
+ # incorrect on sharded collections.
+ - jstests/core/**/explain_count.js
+ - jstests/core/**/explain_server_params.js
+ # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output.
+ - jstests/core/**/expr_index_use.js
+ - jstests/core/**/index_multikey.js
+ - jstests/core/**/optimized_match_explain.js
+ - jstests/core/**/sort_array.js
+
+ # Excludes from fcv_upgrade_downgrade_jscore_passthrough.yml
+ #
+ # TODO (SERVER-78220): Investigate failing api version tests in the fcv jscore passthrough suite.
+ - jstests/core/api/api_version_unstable_indexes.js
+
+ # TODO (SERVER-78202): Investigate failing timeseries tests in the fcv jscore passthrough suite.
+ - jstests/core/timeseries/timeseries_update.js
+ - jstests/core/timeseries/timeseries_update_concurrent.js
+ - jstests/core/timeseries/timeseries_update_one.js
+ - jstests/core/timeseries/timeseries_update_multi.js
+ - jstests/core/timeseries/timeseries_find_and_modify_update.js
+ - jstests/core/timeseries/timeseries_delete_compressed_buckets.js
+ - jstests/core/timeseries/timeseries_bucket_limit_count.js
+
+ # These use "columnstore indexes are under development and cannot be used without enabling the feature flag"
+ - jstests/core/query/null_query_semantics.js
+ - jstests/core/query/project/projection_semantics.js
+ - jstests/core/index/hidden_index.js
+
+ # TODO: Remove after fixing SERVER-78201: the following received command without explicit readConcern.
+ - jstests/aggregation/sources/densify/internal_parse.js
+ - jstests/aggregation/api_version_stage_allowance_checks.js
+
+ # TODO (SERVER-78200): The tests below sometimes hang when they run concurrently with a setFCV command.
+ - src/mongo/db/modules/enterprise/jstests/fle/fle_admin_e2e.js
+ - src/mongo/db/modules/enterprise/jstests/fle/fle_implicit_encryption.js
+ - src/mongo/db/modules/enterprise/jstests/fle/fle_use_cases.js
+ - src/mongo/db/modules/enterprise/jstests/fle/fle_drivers_integration.js
+
+ # The test uses a resumeToken from previous calls so FCV change will make the token invalid and fail.
+ - jstests/core/resume_query_from_non_existent_record.js
+ - jstests/core/resume_query.js
+ - jstests/core/find_with_resume_after_param.js
+
+ # TODO (SERVER-78417): Mongod invariant while running bucket_timestamp_rounding.js with new fcv upgrade downgrade suite.
+ - jstests/core/timeseries/bucket_timestamp_rounding.js
+ - jstests/core/timeseries/timeseries_explicit_unpack_bucket.js
+
+ # Expected failure due to command count being not precise due to potential retry of index build.
+ - jstests/core/operation_latency_histogram.js
+
+ # Expected failures due to unexpected query execution stats from restarted operations from fcv upgrade.
+ - jstests/aggregation/sources/lookup/lookup_query_stats.js
+ - jstests/aggregation/sources/facet/facet_stats.js
+ - jstests/aggregation/sources/unionWith/unionWith_query_stats.js
+
+ # The tests below use transactions (which are aborted on fcv upgrade/downgrade) and thus are expected to fail.
+ - jstests/core/role_management_helpers.js
+ - jstests/core/roles_info.js
+ - jstests/core/views/views_all_commands.js
+
+ # Sharding specific failed:
+ #
+ # $unionWith explain output does not check whether the collection is sharded in a sharded
+ # cluster.
+ - jstests/aggregation/sources/unionWith/unionWith_explain.js
+ # Cannot specify runtime constants option to a mongos
+ - jstests/aggregation/expressions/internal_js_emit_with_scope.js
+ - jstests/aggregation/accumulators/internal_js_reduce_with_scope.js
+ # The tests below depend on internal transactions which are expected to get interrupted during an FCV upgrade or downgrade, but they do not have the 'uses_transactions' tag so are not excluded by default in this suite.
+ - jstests/aggregation/sources/lookup/lookup_non_correlated.js
+ - jstests/core/ddl/collection_uuid_index_commands.js
+ # Queryable encryption tests create internal transactions which are expected to fail.
+ - jstests/core/queryable_encryption/*.js
+ # TODO (SERVER-78753): setFeatureCompatibilityVersion times out waiting for replication
+ - jstests/core/transaction_too_large_for_cache.js
+ # TODO (SERVER-77910): Find out why spill_to_disk.js fails to spill to disk after FCV down.
+ - jstests/aggregation/spill_to_disk.js
+ # TODO (SERVER-32311): These tests use getAggPlanStage(), which can't handle sharded explain output.
+ - jstests/aggregation/match_swapping_renamed_fields.js
+ - jstests/aggregation/use_query_project_and_sort.js
+ - jstests/aggregation/use_query_projection.js
+ - jstests/aggregation/use_query_sort.js
+ # TODO: Remove when SERVER-23229 is fixed.
+ - jstests/aggregation/bugs/groupMissing.js
+ - jstests/aggregation/sources/graphLookup/variables.js
+ # TODO (SERVER-77935): Investigate timeout from fcv downgrade in jstests/core/query/push/push2.js.
+ - jstests/core/query/push/push2.js
+ - jstests/core/write/update/update_addToSet2.js
+
+ exclude_with_any_tags:
+ - assumes_standalone_mongod
+ - assumes_against_mongod_not_mongos
+ # Tests tagged with the following will fail because they assume collections are not sharded.
+ - assumes_no_implicit_collection_creation_after_drop
+ - assumes_no_implicit_index_creation
+ - assumes_unsharded_collection
+ - cannot_create_unique_index_when_using_hashed_shard_key
+ # system.profile collection doesn't exist on mongos.
+ - requires_profiling
+ # columnstore indexes are under development and cannot be used without enabling the feature flag
+ - featureFlagColumnstoreIndexes
+ # TODO SERVER-52419 Remove this tag.
+ - featureFlagBulkWriteCommand
+ - featureFlagFLE2CleanupCommand
+ # Transactions are aborted upon fcv upgrade or downgrade.
+ - uses_transactions
+ # Exclude tests with the latest fcv.
+ - requires_fcv_71
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHash
+ - CheckMetadataConsistencyInBackground
+ - ValidateCollections
+ - FCVUpgradeDowngradeInBackground
+ config:
+ shell_options:
+ eval: >-
+ load("jstests/libs/override_methods/retry_aborted_db_and_index_creation.js");
+ load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js");
+ hooks:
+ - class: CheckReplDBHash
+ - class: CheckMetadataConsistencyInBackground
+ - class: ValidateCollections
+ - class: FCVUpgradeDowngradeInBackground
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: ShardedClusterFixture
+ num_shards: 2
+ enable_balancer: false
+ mongos_options:
+ set_parameters:
+ enableTestCommands: 1
+ disableTransitionFromLatestToLastContinuous: False
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ disableTransitionFromLatestToLastContinuous: False
+ num_rs_nodes_per_shard: 2
diff --git a/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_sharding_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_sharding_jscore_passthrough.yml
new file mode 100644
index 0000000000000..314d1a504d9aa
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_sharding_jscore_passthrough.yml
@@ -0,0 +1,160 @@
+test_kind: js_test
+
+# Cloned from buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
+# to run FCV upgrade downgrade in the background
+
+selector:
+ roots:
+ - jstests/core/**/*.js
+ - jstests/fle2/**/*.js
+ - jstests/aggregation/**/*.js
+ - src/mongo/db/modules/*/jstests/fle/**/*.js
+
+ exclude_files:
+ # The following tests fail because a certain command or functionality is not supported on
+ # mongos. This command or functionality is placed in a comment next to the failing test.
+ - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine.
+ - jstests/core/**/check_shard_index.js # checkShardingIndex.
+ - jstests/core/**/collection_truncate.js # emptycapped.
+ - jstests/core/**/compact_keeps_indexes.js # compact.
+ - jstests/core/**/currentop.js # uses fsync.
+ - jstests/core/**/dbhash.js # dbhash.
+ - jstests/core/**/dbhash2.js # dbhash.
+ - jstests/core/**/fsync.js # uses fsync.
+ - jstests/core/**/geo_s2cursorlimitskip.js # profiling.
+ - jstests/core/**/geo_update_btree2.js # notablescan.
+ - jstests/core/**/index9.js # "local" database.
+ - jstests/core/**/queryoptimizera.js # "local" database.
+ - jstests/core/**/stages*.js # stageDebug.
+ - jstests/core/**/startup_log.js # "local" database.
+ - jstests/core/**/top.js # top.
+ # The following tests fail because mongos behaves differently from mongod when testing certain
+ # functionality. The differences are in a comment next to the failing test.
+ - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos.
+ - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain().
+ - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain().
+ - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate().
+ - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880.
+ - jstests/core/**/killop_drop_collection.js # Uses fsyncLock.
+ - jstests/core/**/or_to_in.js # queryPlanner in different spot in explain()
+ # The following tests fail because of divergent dropCollection behavior between standalones and
+ # sharded clusters. These tests expect a second drop command to error, whereas in sharded clusters
+ # we expect a second drop to return status OK.
+ - jstests/core/**/explain_upsert.js
+
+ # Excludes from fcv_upgrade_downgrade_jscore_passthrough.yml
+ #
+ # TODO (SERVER-78220): Investigate failing api version tests in the fcv jscore passthrough suite.
+ - jstests/core/api/api_version_unstable_indexes.js
+
+ # TODO (SERVER-78202): Investigate failing timeseries tests in the fcv jscore passthrough suite.
+ - jstests/core/timeseries/timeseries_update.js
+ - jstests/core/timeseries/timeseries_update_concurrent.js
+ - jstests/core/timeseries/timeseries_update_one.js
+ - jstests/core/timeseries/timeseries_update_multi.js
+ - jstests/core/timeseries/timeseries_find_and_modify_update.js
+ - jstests/core/timeseries/timeseries_delete_compressed_buckets.js
+ - jstests/core/timeseries/timeseries_bucket_limit_count.js
+
+ # These use "columnstore indexes are under development and cannot be used without enabling the feature flag"
+ - jstests/core/query/null_query_semantics.js
+ - jstests/core/query/project/projection_semantics.js
+ - jstests/core/index/hidden_index.js
+
+ # TODO: Remove after fixing SERVER-78201: the following received command without explicit readConcern.
+ - jstests/aggregation/sources/densify/internal_parse.js
+ - jstests/aggregation/api_version_stage_allowance_checks.js
+
+ # TODO (SERVER-78200): The tests below sometimes hang when they run concurrently with a setFCV command.
+ - src/mongo/db/modules/enterprise/jstests/fle/fle_admin_e2e.js
+ - src/mongo/db/modules/enterprise/jstests/fle/fle_implicit_encryption.js
+ - src/mongo/db/modules/enterprise/jstests/fle/fle_use_cases.js
+ - src/mongo/db/modules/enterprise/jstests/fle/fle_drivers_integration.js
+
+ # The test uses a resumeToken from previous calls so FCV change will make the token invalid and fail.
+ - jstests/core/resume_query_from_non_existent_record.js
+ - jstests/core/resume_query.js
+ - jstests/core/find_with_resume_after_param.js
+
+ # TODO (SERVER-78417): Mongod invariant while running bucket_timestamp_rounding.js with new fcv upgrade downgrade suite.
+ - jstests/core/timeseries/bucket_timestamp_rounding.js
+ - jstests/core/timeseries/timeseries_explicit_unpack_bucket.js
+
+ # Expected failure due to command count being not precise due to potential retry of index build.
+ - jstests/core/operation_latency_histogram.js
+
+ # Expected failures due to unexpected query execution stats from restarted operations from fcv upgrade.
+ - jstests/aggregation/sources/lookup/lookup_query_stats.js
+ - jstests/aggregation/sources/facet/facet_stats.js
+ - jstests/aggregation/sources/unionWith/unionWith_query_stats.js
+
+ # The tests below use transactions (which are aborted on fcv upgrade/downgrade) and thus are expected to fail.
+ - jstests/core/role_management_helpers.js
+ - jstests/core/roles_info.js
+ - jstests/core/views/views_all_commands.js
+
+ # Sharding specific failed:
+ #
+ # $unionWith explain output does not check whether the collection is sharded in a sharded
+ # cluster.
+ - jstests/aggregation/sources/unionWith/unionWith_explain.js
+ # Cannot specify runtime constants option to a mongos
+ - jstests/aggregation/expressions/internal_js_emit_with_scope.js
+ - jstests/aggregation/accumulators/internal_js_reduce_with_scope.js
+ # The tests below depend on internal transactions which are expected to get interrupted during an FCV upgrade or downgrade, but they do not have the 'uses_transactions' tag so are not excluded by default in this suite.
+ - jstests/aggregation/sources/lookup/lookup_non_correlated.js
+ - jstests/core/ddl/collection_uuid_index_commands.js
+ # Queryable encryption tests create internal transactions which are expected to fail.
+ - jstests/core/queryable_encryption/*.js
+ # TODO (SERVER-78753): setFeatureCompatibilityVersion times out waiting for replication
+ - jstests/core/transaction_too_large_for_cache.js
+ # TODO (SERVER-77910): Find out why spill_to_disk.js fails to spill to disk after FCV down.
+ - jstests/aggregation/spill_to_disk.js
+
+ exclude_with_any_tags:
+ - assumes_standalone_mongod
+ - assumes_against_mongod_not_mongos
+ # system.profile collection doesn't exist on mongos.
+ - requires_profiling
+ # columnstore indexes are under development and cannot be used without enabling the feature flag
+ - featureFlagColumnstoreIndexes
+ # TODO SERVER-52419 Remove this tag.
+ - featureFlagBulkWriteCommand
+ - featureFlagFLE2CleanupCommand
+ # Transactions are aborted upon fcv upgrade or downgrade.
+ - uses_transactions
+ # Exclude tests with the latest fcv.
+ - requires_fcv_71
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHash
+ - CheckMetadataConsistencyInBackground
+ - ValidateCollections
+ - FCVUpgradeDowngradeInBackground
+ config:
+ shell_options:
+ eval: >-
+ load("jstests/libs/override_methods/retry_aborted_db_and_index_creation.js");
+ hooks:
+ - class: CheckReplDBHash
+ - class: CheckMetadataConsistencyInBackground
+ - class: ValidateCollections
+ - class: FCVUpgradeDowngradeInBackground
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: ShardedClusterFixture
+ num_shards: 1
+ mongos_options:
+ set_parameters:
+ enableTestCommands: 1
+ disableTransitionFromLatestToLastContinuous: False
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ disableTransitionFromLatestToLastContinuous: False
+ num_rs_nodes_per_shard: 2
+ enable_sharding:
+ - test
diff --git a/buildscripts/resmokeconfig/suites/fle2.yml b/buildscripts/resmokeconfig/suites/fle2.yml
index c109b6542e4eb..fa8d55e2f6b83 100644
--- a/buildscripts/resmokeconfig/suites/fle2.yml
+++ b/buildscripts/resmokeconfig/suites/fle2.yml
@@ -11,10 +11,6 @@ executor:
archive:
hooks:
- ValidateCollections
- config:
- shell_options:
- eval: "testingReplication = true; testingFLE2Range = true;"
- setShellParameter: featureFlagFLE2Range=true
hooks:
# We don't execute dbHash or oplog consistency checks since there is only a single replica set
# node.
@@ -26,6 +22,5 @@ executor:
mongod_options:
set_parameters:
enableTestCommands: 1
- featureFlagFLE2Range: true
# Use a 2-node replica set.
num_nodes: 2
diff --git a/buildscripts/resmokeconfig/suites/fle2_high_cardinality.yml b/buildscripts/resmokeconfig/suites/fle2_high_cardinality.yml
index d9176f718b838..8b1582ec19f7f 100644
--- a/buildscripts/resmokeconfig/suites/fle2_high_cardinality.yml
+++ b/buildscripts/resmokeconfig/suites/fle2_high_cardinality.yml
@@ -12,10 +12,6 @@ executor:
archive:
hooks:
- ValidateCollections
- config:
- shell_options:
- eval: "testingReplication = true; testingFLE2Range = true;"
- setShellParameter: featureFlagFLE2Range=true
hooks:
# We don't execute dbHash or oplog consistency checks since there is only a single replica set
# node.
@@ -27,7 +23,6 @@ executor:
mongod_options:
set_parameters:
enableTestCommands: 1
- featureFlagFLE2Range: true
internalQueryFLEAlwaysUseEncryptedCollScanMode: 1
# Use a 2-node replica set.
num_nodes: 2
diff --git a/buildscripts/resmokeconfig/suites/fle2_query_analysis.yml b/buildscripts/resmokeconfig/suites/fle2_query_analysis.yml
index 894cef89d1d37..cabc1903d9526 100644
--- a/buildscripts/resmokeconfig/suites/fle2_query_analysis.yml
+++ b/buildscripts/resmokeconfig/suites/fle2_query_analysis.yml
@@ -18,5 +18,3 @@ executor:
global_vars:
TestData:
useFle2Protocol: true
- setParametersMongocryptd:
- featureFlagFLE2Range: true
diff --git a/buildscripts/resmokeconfig/suites/fle2_sharding.yml b/buildscripts/resmokeconfig/suites/fle2_sharding.yml
index c4658aedca13c..76c6f9a061fec 100644
--- a/buildscripts/resmokeconfig/suites/fle2_sharding.yml
+++ b/buildscripts/resmokeconfig/suites/fle2_sharding.yml
@@ -3,6 +3,8 @@ selector:
roots:
- jstests/fle2/*.js
- src/mongo/db/modules/*/jstests/fle2/**/*.js
+ exclude_files:
+ - src/mongo/db/modules/enterprise/jstests/fle2/bulk_write_insert.js # TODO SERVER-77497 to support mongos
exclude_with_any_tags:
# Don't run tests that require the encrypted collscan mode in this suite.
- requires_fle2_encrypted_collscan
@@ -12,13 +14,10 @@ executor:
hooks:
- CheckReplDBHash
- ValidateCollections
- config:
- shell_options:
- eval: "testingReplication = false; testingFLE2Range = true;"
- setShellParameter: featureFlagFLE2Range=true
hooks:
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
@@ -29,7 +28,6 @@ executor:
mongod_options:
set_parameters:
enableTestCommands: 1
- featureFlagFLE2Range: true
num_rs_nodes_per_shard: 2
enable_sharding:
- test
diff --git a/buildscripts/resmokeconfig/suites/fle2_sharding_high_cardinality.yml b/buildscripts/resmokeconfig/suites/fle2_sharding_high_cardinality.yml
index 8760caa26bc71..dd29e251b3d59 100644
--- a/buildscripts/resmokeconfig/suites/fle2_sharding_high_cardinality.yml
+++ b/buildscripts/resmokeconfig/suites/fle2_sharding_high_cardinality.yml
@@ -3,23 +3,23 @@ selector:
roots:
- jstests/fle2/*.js
- src/mongo/db/modules/*/jstests/fle2/**/*.js
+ exclude_files:
+ - src/mongo/db/modules/enterprise/jstests/fle2/bulk_write_insert.js # TODO SERVER-77497 to support mongos
exclude_with_any_tags:
# Not compatible with tests the expect fle to always using $in in queries,
# i.e. verify explain output
- requires_fle2_in_always
+ - fle2_no_mongos
executor:
archive:
hooks:
- CheckReplDBHash
- ValidateCollections
- config:
- shell_options:
- eval: "testingReplication = false; testingFLE2Range = true;"
- setShellParameter: featureFlagFLE2Range=true
hooks:
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
@@ -32,7 +32,6 @@ executor:
set_parameters:
enableTestCommands: 1
internalQueryFLEAlwaysUseEncryptedCollScanMode: 1
- featureFlagFLE2Range: true
num_rs_nodes_per_shard: 2
enable_sharding:
- test
diff --git a/buildscripts/resmokeconfig/suites/integration_tests_replset_ssl_auth.yml b/buildscripts/resmokeconfig/suites/integration_tests_replset_ssl_auth.yml
index b6bae6b1ab4c6..98f1a82048fd4 100644
--- a/buildscripts/resmokeconfig/suites/integration_tests_replset_ssl_auth.yml
+++ b/buildscripts/resmokeconfig/suites/integration_tests_replset_ssl_auth.yml
@@ -34,19 +34,19 @@ executor:
shell_options:
global_vars:
TestData: *TestData
- eval: jsTest.authenticate(db.getMongo())
+ eval: jsTest.authenticate(db.getMongo())
<<: *authOptions
- class: CheckReplDBHash
shell_options:
global_vars:
TestData: *TestData
- eval: jsTest.authenticate(db.getMongo())
+ eval: jsTest.authenticate(db.getMongo())
<<: *authOptions
- class: ValidateCollections
shell_options:
global_vars:
TestData: *TestData
- eval: jsTest.authenticate(db.getMongo())
+ eval: jsTest.authenticate(db.getMongo())
<<: *authOptions
fixture:
class: ReplicaSetFixture
diff --git a/buildscripts/resmokeconfig/suites/integration_tests_sharded.yml b/buildscripts/resmokeconfig/suites/integration_tests_sharded.yml
index 7aabaa4d96e74..7c86fd80cf518 100644
--- a/buildscripts/resmokeconfig/suites/integration_tests_sharded.yml
+++ b/buildscripts/resmokeconfig/suites/integration_tests_sharded.yml
@@ -16,6 +16,7 @@ executor:
hooks:
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
fixture:
class: ShardedClusterFixture
mongod_options:
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_100ms_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_replication_100ms_refresh_jscore_passthrough.yml
deleted file mode 100644
index 02b9116d197c0..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_100ms_refresh_jscore_passthrough.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
- exclude_files:
- # The set_param1.js test attempts to compare the response from running the {getParameter: "*"}
- # command multiple times, which may observe the change to the "transactionLifetimeLimitSeconds"
- # server parameter.
- - jstests/core/**/set_param1.js
- # These tests expect the logical session cache refresh thread to be turned off, so that refreshes
- # can be triggered deterministically.
- - jstests/core/**/list_all_local_sessions.js
- - jstests/core/**/list_all_sessions.js
- - jstests/core/**/list_sessions.js
- # The awaitdata_getmore_cmd.js test tails the oplog and waits for the getMore batch size to equal
- # zero. The CheckReplDBHashInBackground hook consistently runs and creates sessions. At the same
- # time, the logical session cache refresh thread will flush these sessions to disk, creating more
- # opLog entries. To avoid this infinite loop, we will denylist the test from this suite.
- - jstests/core/**/awaitdata_getmore_cmd.js
- # These tests verify that an expected number of update operations were tracked in the server
- # status metrics, but the logical session cache refresh causes additional updates to be recorded.
- - jstests/core/**/find_and_modify_metrics.js
- - jstests/core/**/update_metrics.js
-
- exclude_with_any_tags:
- - assumes_standalone_mongod
-
-executor:
- archive:
- hooks:
- - CheckReplDBHashInBackground
- - CheckReplDBHash
- - CheckReplOplogs
- - ValidateCollections
- config:
- shell_options:
- eval: "testingReplication = true;"
- hooks:
- # The CheckReplDBHash hook waits until all operations have replicated to and have been applied
- # on the secondaries, so we run the ValidateCollections hook after it to ensure we're
- # validating the entire contents of the collection.
- - class: CheckReplDBHashInBackground
- - class: CheckReplOplogs
- - class: CheckReplDBHash
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ReplicaSetFixture
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 100
- num_nodes: 3
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_10sec_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_replication_10sec_refresh_jscore_passthrough.yml
deleted file mode 100644
index 1cb8d42ce2d2a..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_10sec_refresh_jscore_passthrough.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
- exclude_files:
- # The set_param1.js test attempts to compare the response from running the {getParameter: "*"}
- # command multiple times, which may observe the change to the "transactionLifetimeLimitSeconds"
- # server parameter.
- - jstests/core/**/set_param1.js
- # These tests expect the logical session cache refresh thread to be turned off, so that refreshes
- # can be triggered deterministically.
- - jstests/core/**/list_all_local_sessions.js
- - jstests/core/**/list_all_sessions.js
- - jstests/core/**/list_sessions.js
- # The awaitdata_getmore_cmd.js test tails the oplog and waits for the getMore batch size to equal
- # zero. The CheckReplDBHashInBackground hook consistently runs and creates sessions. At the same
- # time, the logical session cache refresh thread will flush these sessions to disk, creating more
- # opLog entries. To avoid this infinite loop, we will denylist the test from this suite.
- - jstests/core/**/awaitdata_getmore_cmd.js
- # These tests verify that an expected number of update operations were tracked in the server
- # status metrics, but the logical session cache refresh causes additional updates to be recorded.
- - jstests/core/**/find_and_modify_metrics.js
- - jstests/core/**/update_metrics.js
-
- exclude_with_any_tags:
- - assumes_standalone_mongod
-
-executor:
- archive:
- hooks:
- - CheckReplDBHashInBackground
- - CheckReplDBHash
- - CheckReplOplogs
- - ValidateCollections
- config:
- shell_options:
- eval: "testingReplication = true;"
- hooks:
- # The CheckReplDBHash hook waits until all operations have replicated to and have been applied
- # on the secondaries, so we run the ValidateCollections hook after it to ensure we're
- # validating the entire contents of the collection.
- - class: CheckReplDBHashInBackground
- - class: CheckReplOplogs
- - class: CheckReplDBHash
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ReplicaSetFixture
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 10000
- num_nodes: 3
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_1sec_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_replication_1sec_refresh_jscore_passthrough.yml
deleted file mode 100644
index 95e276ed112cf..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_1sec_refresh_jscore_passthrough.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
- exclude_files:
- # The set_param1.js test attempts to compare the response from running the {getParameter: "*"}
- # command multiple times, which may observe the change to the "transactionLifetimeLimitSeconds"
- # server parameter.
- - jstests/core/**/set_param1.js
- # These tests expect the logical session cache refresh thread to be turned off, so that refreshes
- # can be triggered deterministically.
- - jstests/core/**/list_all_local_sessions.js
- - jstests/core/**/list_all_sessions.js
- - jstests/core/**/list_sessions.js
- # The awaitdata_getmore_cmd.js test tails the oplog and waits for the getMore batch size to equal
- # zero. The CheckReplDBHashInBackground hook consistently runs and creates sessions. At the same
- # time, the logical session cache refresh thread will flush these sessions to disk, creating more
- # opLog entries. To avoid this infinite loop, we will denylist the test from this suite.
- - jstests/core/**/awaitdata_getmore_cmd.js
- # These tests verify that an expected number of update operations were tracked in the server
- # status metrics, but the logical session cache refresh causes additional updates to be recorded.
- - jstests/core/**/find_and_modify_metrics.js
- - jstests/core/**/update_metrics.js
-
- exclude_with_any_tags:
- - assumes_standalone_mongod
-
-executor:
- archive:
- hooks:
- - CheckReplDBHashInBackground
- - CheckReplDBHash
- - CheckReplOplogs
- - ValidateCollections
- config:
- shell_options:
- eval: "testingReplication = true;"
- hooks:
- # The CheckReplDBHash hook waits until all operations have replicated to and have been applied
- # on the secondaries, so we run the ValidateCollections hook after it to ensure we're
- # validating the entire contents of the collection.
- - class: CheckReplDBHashInBackground
- - class: CheckReplOplogs
- - class: CheckReplDBHash
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ReplicaSetFixture
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 1000
- num_nodes: 3
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_default_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_replication_default_refresh_jscore_passthrough.yml
deleted file mode 100644
index a8434414a2282..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_default_refresh_jscore_passthrough.yml
+++ /dev/null
@@ -1,59 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
- exclude_files:
- # The set_param1.js test attempts to compare the response from running the {getParameter: "*"}
- # command multiple times, which may observe the change to the "transactionLifetimeLimitSeconds"
- # server parameter.
- - jstests/core/**/set_param1.js
- # These tests expect the logical session cache refresh thread to be turned off, so that refreshes
- # can be triggered deterministically.
- - jstests/core/**/list_all_local_sessions.js
- - jstests/core/**/list_all_sessions.js
- - jstests/core/**/list_sessions.js
- # The awaitdata_getmore_cmd.js test tails the oplog and waits for the getMore batch size to equal
- # zero. The CheckReplDBHashInBackground hook consistently runs and creates sessions. At the same
- # time, the logical session cache refresh thread will flush these sessions to disk, creating more
- # opLog entries. To prevent this infinite loop, we will denylist the test from this suite.
- - jstests/core/**/awaitdata_getmore_cmd.js
- # These tests verify that an expected number of update operations were tracked in the server
- # status metrics, but the logical session cache refresh causes additional updates to be recorded.
- - jstests/core/**/find_and_modify_metrics.js
- - jstests/core/**/update_metrics.js
-
- exclude_with_any_tags:
- - assumes_standalone_mongod
-
-executor:
- archive:
- hooks:
- - CheckReplDBHashInBackground
- - ValidateCollectionsInBackground
- - CheckReplDBHash
- - CheckReplOplogs
- - ValidateCollections
- config:
- shell_options:
- eval: "testingReplication = true;"
- hooks:
- # The CheckReplDBHash hook waits until all operations have replicated to and have been applied
- # on the secondaries, so we run the ValidateCollections hook after it to ensure we're
- # validating the entire contents of the collection.
- - class: CheckReplDBHashInBackground
- - class: ValidateCollectionsInBackground
- - class: CheckReplOplogs
- - class: CheckReplDBHash
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ReplicaSetFixture
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- num_nodes: 3
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_100ms_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_100ms_refresh_jscore_passthrough.yml
deleted file mode 100644
index 0e9f1c4d1a099..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_100ms_refresh_jscore_passthrough.yml
+++ /dev/null
@@ -1,116 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
- exclude_files:
- # These tests are run in logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml.
- - jstests/core/txns/**/*.js
- # The following tests fail because a certain command or functionality is not supported by
- # mongos. This command or functionality is placed in a comment next to the failing test.
- - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine.
- - jstests/core/**/awaitdata_getmore_cmd.js # capped collections.
- - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted
- - jstests/core/**/check_shard_index.js # checkShardingIndex.
- - jstests/core/**/collection_truncate.js # emptycapped.
- - jstests/core/**/compact_keeps_indexes.js # compact.
- - jstests/core/**/currentop.js # uses fsync.
- - jstests/core/**/dbhash.js # dbhash.
- - jstests/core/**/dbhash2.js # dbhash.
- - jstests/core/**/fsync.js # uses fsync.
- - jstests/core/**/geo_s2cursorlimitskip.js # profiling.
- - jstests/core/**/geo_update_btree2.js # notablescan.
- - jstests/core/**/index9.js # "local" database.
- - jstests/core/**/queryoptimizera.js # "local" database.
- - jstests/core/**/stages*.js # stageDebug.
- - jstests/core/**/startup_log.js # "local" database.
- - jstests/core/**/tailable_cursor_invalidation.js # capped collections.
- - jstests/core/**/tailable_getmore_batch_size.js # capped collections.
- - jstests/core/**/tailable_skip_limit.js # capped collections.
- - jstests/core/**/top.js # top.
- # The following tests fail because mongos behaves differently from mongod when testing certain
- # functionality. The differences are in a comment next to the failing test.
- - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047.
- - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain().
- - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain().
- - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate().
- - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880.
- # The following tests fail because they count indexes. These counts do not take into account the
- # additional hashed shard key indexes that are automatically added by this passthrough.
- - jstests/core/**/apitest_dbcollection.js
- - jstests/core/**/bad_index_plugin.js
- - jstests/core/**/create_indexes.js
- - jstests/core/**/list_indexes_non_existent_ns.js
- - jstests/core/**/mr_preserve_indexes.js
- # The following tests fail because they expect no databases to be created. However a DB is created
- # automatically when we shard a collection.
- - jstests/core/**/dbcase.js
- - jstests/core/**/dbcase2.js
- - jstests/core/**/no_db_created.js
- - jstests/core/**/killop_drop_collection.js # Uses fsyncLock.
- # These tests fail because sharded clusters do not clean up correctly after failed index builds.
- # See SERVER-33207 as an example.
- - jstests/core/**/geo_borders.js
- # These tests expect the logical session cache refresh thread to be turned off, so that refreshes
- # can be triggered deterministically.
- - jstests/core/**/list_all_local_sessions.js
- - jstests/core/**/list_all_sessions.js
- - jstests/core/**/list_sessions.js
- # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded
- # queries with a limit or for distinct commands.
- - jstests/core/**/distinct_index1.js
- - jstests/core/**/explain1.js
- - jstests/core/**/explain4.js
- - jstests/core/**/sortk.js
- # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is
- # incorrect on sharded collections.
- - jstests/core/**/explain_count.js
- - jstests/core/**/explain_server_params.js
- # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output.
- - jstests/core/**/expr_index_use.js
- - jstests/core/**/index_multikey.js
- - jstests/core/**/optimized_match_explain.js
- - jstests/core/**/sort_array.js
-
- exclude_with_any_tags:
- - assumes_standalone_mongod
- - assumes_against_mongod_not_mongos
- # Tests tagged with the following will fail because they assume collections are not sharded.
- - assumes_no_implicit_collection_creation_after_drop
- - assumes_no_implicit_index_creation
- - assumes_unsharded_collection
- - cannot_create_unique_index_when_using_hashed_shard_key
- # system.profile collection doesn't exist on mongos.
- - requires_profiling
-
-executor:
- archive:
- hooks:
- - CheckReplDBHash
- - CheckMetadataConsistencyInBackground
- - ValidateCollections
- config:
- shell_options:
- eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js")
- hooks:
- - class: CheckReplDBHash
- - class: CheckMetadataConsistencyInBackground
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ShardedClusterFixture
- num_shards: 2
- enable_balancer: false
- mongos_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 100
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 100
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml
deleted file mode 100644
index 3464f572f79be..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml
+++ /dev/null
@@ -1,72 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/txns/**/*.js
- exclude_files:
- # Profile can only be run against the admin database on mongos.
- - jstests/core/txns/transactions_profiling.js
- - jstests/core/txns/transactions_profiling_with_drops.js
-
- # Implicitly creates a database through a collection rename, which does not work in a sharded
- # cluster.
- - jstests/core/txns/transactions_block_ddl.js
-
- # transactionLifetimeLimitSeconds parameter is not available in mongos.
- - jstests/core/txns/abort_expired_transaction.js
- - jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js
- - jstests/core/txns/kill_op_on_txn_expiry.js
-
- # Uses hangAfterCollectionInserts failpoint not available on mongos.
- - jstests/core/txns/speculative_snapshot_includes_all_writes.js
-
- # View tests aren't expected to work when collections are implicitly sharded.
- - jstests/core/txns/view_reads_in_transaction.js
-
- # These workloads explicitly create collections inside multi-document transactions. These are
- # non-idempotent operations, and the implicit collection sharding logic upon collection access
- # results in premature collection creation, causing the workloads to fail.
- - jstests/core/txns/create_collection.js
- - jstests/core/txns/create_collection_parallel.js
- - jstests/core/txns/create_indexes.js
- - jstests/core/txns/create_indexes_parallel.js
-
- exclude_with_any_tags:
- - assumes_against_mongod_not_mongos
- # Tests tagged with the following will fail because they assume collections are not sharded.
- - assumes_no_implicit_collection_creation_after_drop
- - assumes_no_implicit_index_creation
- - assumes_unsharded_collection
- - cannot_create_unique_index_when_using_hashed_shard_key
- # Transactions are not allowed to operate on capped collections.
- - requires_capped
- # Prepare is not a command on mongos.
- - uses_prepare_transaction
-
-executor:
- archive:
- hooks:
- - CheckReplDBHash
- - ValidateCollections
- config:
- shell_options:
- eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js")
- hooks:
- - class: CheckReplDBHash
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ShardedClusterFixture
- num_shards: 2
- enable_balancer: false
- mongos_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 100
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 100
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_10sec_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_10sec_refresh_jscore_passthrough.yml
deleted file mode 100644
index 076717acd11f0..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_10sec_refresh_jscore_passthrough.yml
+++ /dev/null
@@ -1,116 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
- exclude_files:
- # These tests are run in logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml.
- - jstests/core/txns/**/*.js
- # The following tests fail because a certain command or functionality is not supported by
- # mongos. This command or functionality is placed in a comment next to the failing test.
- - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine.
- - jstests/core/**/awaitdata_getmore_cmd.js # capped collections.
- - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted
- - jstests/core/**/check_shard_index.js # checkShardingIndex.
- - jstests/core/**/collection_truncate.js # emptycapped.
- - jstests/core/**/compact_keeps_indexes.js # compact.
- - jstests/core/**/currentop.js # uses fsync.
- - jstests/core/**/dbhash.js # dbhash.
- - jstests/core/**/dbhash2.js # dbhash.
- - jstests/core/**/fsync.js # uses fsync.
- - jstests/core/**/geo_s2cursorlimitskip.js # profiling.
- - jstests/core/**/geo_update_btree2.js # notablescan.
- - jstests/core/**/index9.js # "local" database.
- - jstests/core/**/queryoptimizera.js # "local" database.
- - jstests/core/**/stages*.js # stageDebug.
- - jstests/core/**/startup_log.js # "local" database.
- - jstests/core/**/tailable_cursor_invalidation.js # capped collections.
- - jstests/core/**/tailable_getmore_batch_size.js # capped collections.
- - jstests/core/**/tailable_skip_limit.js # capped collections.
- - jstests/core/**/top.js # top.
- # The following tests fail because mongos behaves differently from mongod when testing certain
- # functionality. The differences are in a comment next to the failing test.
- - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047.
- - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain().
- - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain().
- - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate().
- - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880.
- # The following tests fail because they count indexes. These counts do not take into account the
- # additional hashed shard key indexes that are automatically added by this passthrough.
- - jstests/core/**/apitest_dbcollection.js
- - jstests/core/**/bad_index_plugin.js
- - jstests/core/**/create_indexes.js
- - jstests/core/**/list_indexes_non_existent_ns.js
- - jstests/core/**/mr_preserve_indexes.js
- # The following tests fail because they expect no databases to be created. However a DB is created
- # automatically when we shard a collection.
- - jstests/core/**/dbcase.js
- - jstests/core/**/dbcase2.js
- - jstests/core/**/no_db_created.js
- - jstests/core/**/killop_drop_collection.js # Uses fsyncLock.
- # These tests fail because sharded clusters do not clean up correctly after failed index builds.
- # See SERVER-33207 as an example.
- - jstests/core/**/geo_borders.js
- # These tests expect the logical session cache refresh thread to be turned off, so that refreshes
- # can be triggered deterministically.
- - jstests/core/**/list_all_local_sessions.js
- - jstests/core/**/list_all_sessions.js
- - jstests/core/**/list_sessions.js
- # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded
- # queries with a limit or for distinct commands.
- - jstests/core/**/distinct_index1.js
- - jstests/core/**/explain1.js
- - jstests/core/**/explain4.js
- - jstests/core/**/sortk.js
- # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is
- # incorrect on sharded collections.
- - jstests/core/**/explain_count.js
- - jstests/core/**/explain_server_params.js
- # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output.
- - jstests/core/**/expr_index_use.js
- - jstests/core/**/index_multikey.js
- - jstests/core/**/optimized_match_explain.js
- - jstests/core/**/sort_array.js
-
- exclude_with_any_tags:
- - assumes_standalone_mongod
- - assumes_against_mongod_not_mongos
- # Tests tagged with the following will fail because they assume collections are not sharded.
- - assumes_no_implicit_collection_creation_after_drop
- - assumes_no_implicit_index_creation
- - assumes_unsharded_collection
- - cannot_create_unique_index_when_using_hashed_shard_key
- # system.profile collection doesn't exist on mongos.
- - requires_profiling
-
-executor:
- archive:
- hooks:
- - CheckReplDBHash
- - CheckMetadataConsistencyInBackground
- - ValidateCollections
- config:
- shell_options:
- eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js")
- hooks:
- - class: CheckReplDBHash
- - class: CheckMetadataConsistencyInBackground
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ShardedClusterFixture
- num_shards: 2
- enable_balancer: false
- mongos_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 10000
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 10000
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_1sec_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_1sec_refresh_jscore_passthrough.yml
deleted file mode 100644
index 26838b46c0445..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_1sec_refresh_jscore_passthrough.yml
+++ /dev/null
@@ -1,116 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
- exclude_files:
- # These tests are run in logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml.
- - jstests/core/txns/**/*.js
- # The following tests fail because a certain command or functionality is not supported by
- # mongos. This command or functionality is placed in a comment next to the failing test.
- - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine.
- - jstests/core/**/awaitdata_getmore_cmd.js # capped collections.
- - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted
- - jstests/core/**/check_shard_index.js # checkShardingIndex.
- - jstests/core/**/collection_truncate.js # emptycapped.
- - jstests/core/**/compact_keeps_indexes.js # compact.
- - jstests/core/**/currentop.js # uses fsync.
- - jstests/core/**/dbhash.js # dbhash.
- - jstests/core/**/dbhash2.js # dbhash.
- - jstests/core/**/fsync.js # uses fsync.
- - jstests/core/**/geo_s2cursorlimitskip.js # profiling.
- - jstests/core/**/geo_update_btree2.js # notablescan.
- - jstests/core/**/index9.js # "local" database.
- - jstests/core/**/queryoptimizera.js # "local" database.
- - jstests/core/**/stages*.js # stageDebug.
- - jstests/core/**/startup_log.js # "local" database.
- - jstests/core/**/tailable_cursor_invalidation.js # capped collections.
- - jstests/core/**/tailable_getmore_batch_size.js # capped collections.
- - jstests/core/**/tailable_skip_limit.js # capped collections.
- - jstests/core/**/top.js # top.
- # The following tests fail because mongos behaves differently from mongod when testing certain
- # functionality. The differences are in a comment next to the failing test.
- - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047.
- - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain().
- - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain().
- - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate().
- - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880.
- # The following tests fail because they count indexes. These counts do not take into account the
- # additional hashed shard key indexes that are automatically added by this passthrough.
- - jstests/core/**/apitest_dbcollection.js
- - jstests/core/**/bad_index_plugin.js
- - jstests/core/**/create_indexes.js
- - jstests/core/**/list_indexes_non_existent_ns.js
- - jstests/core/**/mr_preserve_indexes.js
- # The following tests fail because they expect no databases to be created. However a DB is created
- # automatically when we shard a collection.
- - jstests/core/**/dbcase.js
- - jstests/core/**/dbcase2.js
- - jstests/core/**/no_db_created.js
- - jstests/core/**/killop_drop_collection.js # Uses fsyncLock.
- # These tests fail because sharded clusters do not clean up correctly after failed index builds.
- # See SERVER-33207 as an example.
- - jstests/core/**/geo_borders.js
- # These tests expect the logical session cache refresh thread to be turned off, so that refreshes
- # can be triggered deterministically.
- - jstests/core/**/list_all_local_sessions.js
- - jstests/core/**/list_all_sessions.js
- - jstests/core/**/list_sessions.js
- # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded
- # queries with a limit or for distinct commands.
- - jstests/core/**/distinct_index1.js
- - jstests/core/**/explain1.js
- - jstests/core/**/explain4.js
- - jstests/core/**/sortk.js
- # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is
- # incorrect on sharded collections.
- - jstests/core/**/explain_count.js
- - jstests/core/**/explain_server_params.js
- # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output.
- - jstests/core/**/expr_index_use.js
- - jstests/core/**/index_multikey.js
- - jstests/core/**/optimized_match_explain.js
- - jstests/core/**/sort_array.js
-
- exclude_with_any_tags:
- - assumes_standalone_mongod
- - assumes_against_mongod_not_mongos
- # Tests tagged with the following will fail because they assume collections are not sharded.
- - assumes_no_implicit_collection_creation_after_drop
- - assumes_no_implicit_index_creation
- - assumes_unsharded_collection
- - cannot_create_unique_index_when_using_hashed_shard_key
- # system.profile collection doesn't exist on mongos.
- - requires_profiling
-
-executor:
- archive:
- hooks:
- - CheckReplDBHash
- - CheckMetadataConsistencyInBackground
- - ValidateCollections
- config:
- shell_options:
- eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js")
- hooks:
- - class: CheckReplDBHash
- - class: CheckMetadataConsistencyInBackground
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ShardedClusterFixture
- num_shards: 2
- enable_balancer: false
- mongos_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 1000
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 1000
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_default_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_default_refresh_jscore_passthrough.yml
deleted file mode 100644
index fbc8e2736e30f..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_default_refresh_jscore_passthrough.yml
+++ /dev/null
@@ -1,114 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
- exclude_files:
- # These tests are run in logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml.
- - jstests/core/txns/**/*.js
- # The following tests fail because a certain command or functionality is not supported by
- # mongos. This command or functionality is placed in a comment next to the failing test.
- - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine.
- - jstests/core/**/awaitdata_getmore_cmd.js # capped collections.
- - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted
- - jstests/core/**/check_shard_index.js # checkShardingIndex.
- - jstests/core/**/collection_truncate.js # emptycapped.
- - jstests/core/**/compact_keeps_indexes.js # compact.
- - jstests/core/**/currentop.js # uses fsync.
- - jstests/core/**/dbhash.js # dbhash.
- - jstests/core/**/dbhash2.js # dbhash.
- - jstests/core/**/fsync.js # uses fsync.
- - jstests/core/**/geo_s2cursorlimitskip.js # profiling.
- - jstests/core/**/geo_update_btree2.js # notablescan.
- - jstests/core/**/index9.js # "local" database.
- - jstests/core/**/queryoptimizera.js # "local" database.
- - jstests/core/**/stages*.js # stageDebug.
- - jstests/core/**/startup_log.js # "local" database.
- - jstests/core/**/tailable_cursor_invalidation.js # capped collections.
- - jstests/core/**/tailable_getmore_batch_size.js # capped collections.
- - jstests/core/**/tailable_skip_limit.js # capped collections.
- - jstests/core/**/top.js # top.
- # The following tests fail because mongos behaves differently from mongod when testing certain
- # functionality. The differences are in a comment next to the failing test.
- - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047.
- - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain().
- - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain().
- - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate().
- - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880.
- # The following tests fail because they count indexes. These counts do not take into account the
- # additional hashed shard key indexes that are automatically added by this passthrough.
- - jstests/core/**/apitest_dbcollection.js
- - jstests/core/**/bad_index_plugin.js
- - jstests/core/**/create_indexes.js
- - jstests/core/**/list_indexes_non_existent_ns.js
- - jstests/core/**/mr_preserve_indexes.js
- # The following tests fail because they expect no databases to be created. However a DB is created
- # automatically when we shard a collection.
- - jstests/core/**/dbcase.js
- - jstests/core/**/dbcase2.js
- - jstests/core/**/no_db_created.js
- - jstests/core/**/killop_drop_collection.js # Uses fsyncLock.
- # These tests fail because sharded clusters do not clean up correctly after failed index builds.
- # See SERVER-33207 as an example.
- - jstests/core/**/geo_borders.js
- # These tests expect the logical session cache refresh thread to be turned off, so that refreshes
- # can be triggered deterministically.
- - jstests/core/**/list_all_local_sessions.js
- - jstests/core/**/list_all_sessions.js
- - jstests/core/**/list_sessions.js
- # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded
- # queries with a limit or for distinct commands.
- - jstests/core/**/distinct_index1.js
- - jstests/core/**/explain1.js
- - jstests/core/**/explain4.js
- - jstests/core/**/sortk.js
- # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is
- # incorrect on sharded collections.
- - jstests/core/**/explain_count.js
- - jstests/core/**/explain_server_params.js
- # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output.
- - jstests/core/**/expr_index_use.js
- - jstests/core/**/index_multikey.js
- - jstests/core/**/optimized_match_explain.js
- - jstests/core/**/sort_array.js
-
- exclude_with_any_tags:
- - assumes_standalone_mongod
- - assumes_against_mongod_not_mongos
- # Tests tagged with the following will fail because they assume collections are not sharded.
- - assumes_no_implicit_collection_creation_after_drop
- - assumes_no_implicit_index_creation
- - assumes_unsharded_collection
- - cannot_create_unique_index_when_using_hashed_shard_key
- # system.profile collection doesn't exist on mongos.
- - requires_profiling
-
-executor:
- archive:
- hooks:
- - CheckReplDBHash
- - CheckMetadataConsistencyInBackground
- - ValidateCollections
- config:
- shell_options:
- eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js")
- hooks:
- - class: CheckReplDBHash
- - class: CheckMetadataConsistencyInBackground
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ShardedClusterFixture
- num_shards: 2
- enable_balancer: false
- mongos_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_100ms_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_100ms_refresh_jscore_passthrough.yml
deleted file mode 100644
index 2b61effb3a172..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_100ms_refresh_jscore_passthrough.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- exclude_files:
- # Transactions are not supported on MongoDB standalone nodes.
- - jstests/core/txns/**/*.js
- # This test expects a fixed number of operations. The logical session cache will perform its own
- # operations, inflating the number of operations and causing the test to fail.
- - jstests/core/**/opcounters_write_cmd.js
- # These tests expect the logical session cache refresh thread to be turned off, so that refreshes
- # can be triggered deterministically.
- - jstests/core/**/list_all_local_sessions.js
- - jstests/core/**/list_all_sessions.js
- - jstests/core/**/list_sessions.js
- # These tests verify that an expected number of update operations were tracked in the server
- # status metrics, but the logical session cache refresh causes additional updates to be recorded.
- - jstests/core/**/find_and_modify_metrics.js
- - jstests/core/**/update_metrics.js
-
-executor:
- archive:
- hooks:
- - ValidateCollections
- config: {}
- hooks:
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: MongoDFixture
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 100
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_10sec_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_10sec_refresh_jscore_passthrough.yml
deleted file mode 100644
index 4d3ef6b270192..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_10sec_refresh_jscore_passthrough.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- exclude_files:
- # Transactions are not supported on MongoDB standalone nodes.
- - jstests/core/txns/**/*.js
- # This test expects a fixed number of operations. The logical session cache will perform its own
- # operations, inflating the number of operations and causing the test to fail.
- - jstests/core/**/opcounters_write_cmd.js
- # These tests expect the logical session cache refresh thread to be turned off, so that refreshes
- # can be triggered deterministically.
- - jstests/core/**/list_all_local_sessions.js
- - jstests/core/**/list_all_sessions.js
- - jstests/core/**/list_sessions.js
- # These tests verify that an expected number of update operations were tracked in the server
- # status metrics, but the logical session cache refresh causes additional updates to be recorded.
- - jstests/core/**/find_and_modify_metrics.js
- - jstests/core/**/update_metrics.js
-
-executor:
- archive:
- hooks:
- - ValidateCollections
- config: {}
- hooks:
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: MongoDFixture
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 10000
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_1sec_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_1sec_refresh_jscore_passthrough.yml
deleted file mode 100644
index fa7b9c13daa97..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_1sec_refresh_jscore_passthrough.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- exclude_files:
- # Transactions are not supported on MongoDB standalone nodes.
- - jstests/core/txns/**/*.js
- # This test expects a fixed number of operations. The logical session cache will perform its own
- # operations, inflating the number of operations and causing the test to fail.
- - jstests/core/**/opcounters_write_cmd.js
- # These tests expect the logical session cache refresh thread to be turned off, so that refreshes
- # can be triggered deterministically.
- - jstests/core/**/list_all_local_sessions.js
- - jstests/core/**/list_all_sessions.js
- - jstests/core/**/list_sessions.js
- # These tests verify that an expected number of update operations were tracked in the server
- # status metrics, but the logical session cache refresh causes additional updates to be recorded.
- - jstests/core/**/find_and_modify_metrics.js
- - jstests/core/**/update_metrics.js
-
-executor:
- archive:
- hooks:
- - ValidateCollections
- config: {}
- hooks:
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: MongoDFixture
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
- logicalSessionRefreshMillis: 1000
diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_default_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_default_refresh_jscore_passthrough.yml
deleted file mode 100644
index 5f91f3b17f3b4..0000000000000
--- a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_default_refresh_jscore_passthrough.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- exclude_files:
- # Transactions are not supported on MongoDB standalone nodes.
- - jstests/core/txns/**/*.js
- # This test expects a fixed number of operations. The logical session cache will perform its own
- # operations, inflating the number of operations and causing the test to fail.
- - jstests/core/**/opcounters_write_cmd.js
- # These tests expect the logical session cache refresh thread to be turned off, so that refreshes
- # can be triggered deterministically.
- - jstests/core/**/list_all_local_sessions.js
- - jstests/core/**/list_all_sessions.js
- - jstests/core/**/list_sessions.js
- # These tests verify that an expected number of update operations were tracked in the server
- # status metrics, but the logical session cache refresh causes additional updates to be recorded.
- - jstests/core/**/find_and_modify_metrics.js
- - jstests/core/**/update_metrics.js
-
-executor:
- archive:
- hooks:
- - ValidateCollections
- config: {}
- hooks:
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: MongoDFixture
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- disableLogicalSessionCacheRefresh: false
diff --git a/buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml
index 83e2b67dc4ab7..3bdb9d440c66d 100644
--- a/buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml
@@ -287,7 +287,7 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');
load('jstests/libs/override_methods/enable_sessions.js');
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
@@ -313,6 +313,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml
index c8b08e5fe5ac8..77fa867b7bcd0 100644
--- a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml
@@ -304,7 +304,7 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');
load('jstests/libs/override_methods/enable_sessions.js');
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
@@ -330,6 +330,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml
index 407bb4c391365..42fccf797d7c9 100644
--- a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml
@@ -326,8 +326,6 @@ selector:
- requires_dbstats
# "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..."
- requires_collstats
- # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..."
- - requires_datasize
# "Cowardly fail if startParallelShell is run with a mongod that had an unclean shutdown: ..."
- uses_parallel_shell
# system.profile collection doesn't exist on mongos. Also, transactions are not allowed to operate
@@ -358,9 +356,9 @@ executor:
# shutdown). Workaround by relying on the requires_fastcount/dbstats/collstats/datasize tags
# to denylist tests that uses them unsafely.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
@@ -395,6 +393,7 @@ executor:
- class: CheckReplOplogs
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
+ - class: CheckOrphansDeleted
- class: ValidateCollections
shell_options:
global_vars:
diff --git a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml
index 8e9f5447f228c..f1894ffc00098 100644
--- a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml
@@ -326,8 +326,6 @@ selector:
- requires_dbstats
# "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..."
- requires_collstats
- # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..."
- - requires_datasize
# "Cowardly fail if startParallelShell is run with a mongod that had an unclean shutdown: ..."
- uses_parallel_shell
# system.profile collection doesn't exist on mongos.
@@ -356,9 +354,9 @@ executor:
# shutdown). Workaround by relying on the requires_fastcount/dbstats/collstats/datasize tags
# to denylist tests that uses them unsafely.
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
@@ -393,6 +391,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml b/buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml
index 1dbf0f1f49710..dfb40cab4a249 100644
--- a/buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml
+++ b/buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml
@@ -315,7 +315,7 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');
ImplicitlyShardAccessCollSettings.setMode(ImplicitlyShardAccessCollSettings.Modes.kHashedMoveToSingleShard);
load('jstests/libs/override_methods/enable_sessions.js');
@@ -337,6 +337,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: DropShardedCollections
- class: CleanEveryN
n: 20
diff --git a/buildscripts/resmokeconfig/suites/multiversion.yml b/buildscripts/resmokeconfig/suites/multiversion.yml
index aa7939823f52d..b7d4e95b7516c 100644
--- a/buildscripts/resmokeconfig/suites/multiversion.yml
+++ b/buildscripts/resmokeconfig/suites/multiversion.yml
@@ -4,13 +4,11 @@ selector:
roots:
- jstests/multiVersion/**/*.js
- src/mongo/db/modules/*/jstests/hot_backups/multiVersion/*.js
+ - src/mongo/db/modules/*/jstests/audit/multiVersion/*.js
exclude_files:
# Do not execute files with helper functions.
- jstests/multiVersion/libs/*.js
- # TODO: SERVER-21578
- - jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js
-
# TODO: SERVER-28104
- jstests/multiVersion/genericBinVersion/minor_version_tags_new_old_new.js
diff --git a/buildscripts/resmokeconfig/suites/multiversion_auth.yml b/buildscripts/resmokeconfig/suites/multiversion_auth.yml
index aacf5eb8d943c..6d47fbe873d52 100644
--- a/buildscripts/resmokeconfig/suites/multiversion_auth.yml
+++ b/buildscripts/resmokeconfig/suites/multiversion_auth.yml
@@ -12,9 +12,6 @@ selector:
# Do not execute files with helper functions.
- jstests/multiVersion/libs/*.js
- # TODO: SERVER-21578
- - jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js
-
# TODO: SERVER-28104
- jstests/multiVersion/genericBinVersion/minor_version_tags_new_old_new.js
diff --git a/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml
index 13cc52a24cbbd..ccbc43594328d 100644
--- a/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml
@@ -74,17 +74,15 @@ selector:
# These tests create a new thread, so $tenant won't be properly injected.
- jstests/core/txns/transactions_block_ddl.js
- jstests/core/txns/write_conflicts_with_non_txns.js
+ - jstests/core/txns/kill_op_on_txn_expiry.js
# TODO SERVER-72357: cannot get the expected error due to an authorization contract issue.
- jstests/core/txns/multi_statement_transaction_command_args.js
- # TODO SERVER-72187: bulkWrite command does not support Tenant ID command
- - jstests/core/write/bulk/bulk_write_insert_cursor.js
- - jstests/core/write/bulk/bulk_write_update_cursor.js
- # TODO SERVER-73023 The tenantId is not attached to the namespace provided to failcommand
- # failpoint
- - jstests/core/failcommand_failpoint.js
# This test looks for the presence of a log line that contains a db name. Injecting a tenantId in
# the requests causes the test to fails due to a mismatch.
- jstests/core/api//apitest_db_profile_level.js
+ # Queryable encryption test requires an internal connection for the keyvault that does not
+ # inject a $tenant.
+ - jstests/core/queryable_encryption/**/*.js
executor:
archive:
@@ -99,7 +97,7 @@ executor:
<<: *authOptions
eval: |
jsTest.authenticate(db.getMongo());
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/inject_dollar_tenant.js');
global_vars:
TestData: &TestData
@@ -117,19 +115,19 @@ executor:
shell_options:
global_vars:
TestData: *TestData
- eval: jsTest.authenticate(db.getMongo())
+ eval: jsTest.authenticate(db.getMongo())
<<: *authOptions
- class: CheckReplDBHash
shell_options:
global_vars:
TestData: *TestData
- eval: jsTest.authenticate(db.getMongo())
+ eval: jsTest.authenticate(db.getMongo())
<<: *authOptions
- class: ValidateCollections
shell_options:
global_vars:
TestData: *TestData
- eval: jsTest.authenticate(db.getMongo())
+ eval: jsTest.authenticate(db.getMongo())
<<: *authOptions
- class: CleanEveryN
n: 20
diff --git a/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml
index fa88a3527d5e8..aca599e2ccf48 100644
--- a/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml
@@ -66,6 +66,9 @@ selector:
- jstests/core/**/list_catalog.js
# This test uses '_hashBSONElement' command that cannot be run with security token.
- jstests/core/**/index_key_expression.js
+ # Queryable encryption test performs implicit encryption which issues commands that don't
+ # include the security token.
+ - jstests/core/queryable_encryption/**/*.js
executor:
archive:
@@ -77,7 +80,7 @@ executor:
config:
shell_options:
eval: |
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/inject_security_token.js');
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
@@ -96,7 +99,7 @@ executor:
enableTestCommands: 1
multitenancySupport: true
featureFlagSecurityToken: true
- # TODO SERVER-74284: remove featureFlagRequireTenantID from the parameters and have the
+ # TODO SERVER-78300: remove featureFlagRequireTenantID from the parameters and have the
# inject_security_token override to be able to test both tenant-prefixed request and non-tenant-prefixed request.
# Currently, we only test non-tenant-prefixed request and enable the featureFlagRequireTenantID
# to have mongod return non-tenant-prefixed response too.
diff --git a/buildscripts/resmokeconfig/suites/pretty-printer-tests.yml b/buildscripts/resmokeconfig/suites/pretty-printer-tests.yml
new file mode 100644
index 0000000000000..fa101e6cfcc9d
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/pretty-printer-tests.yml
@@ -0,0 +1,7 @@
+test_kind: pretty_printer_test
+
+selector:
+ root: build/pretty_printer_tests.txt
+
+executor:
+ config: {}
diff --git a/buildscripts/resmokeconfig/suites/query_golden_classic.yml b/buildscripts/resmokeconfig/suites/query_golden_classic.yml
index 44915921403df..a3c72c852c03a 100644
--- a/buildscripts/resmokeconfig/suites/query_golden_classic.yml
+++ b/buildscripts/resmokeconfig/suites/query_golden_classic.yml
@@ -18,7 +18,7 @@ executor:
eval: |
// Keep in sync with query_golden_cqf.yml.
load("jstests/libs/override_methods/detect_spawning_own_mongod.js");
- load("jstests/libs/golden_test.js");
+ await import("jstests/libs/override_methods/golden_overrides.js");
_openGoldenData(jsTestName(), {relativePath: "jstests/query_golden/expected_output"});
hooks:
- class: ValidateCollections
diff --git a/buildscripts/resmokeconfig/suites/query_golden_cqf.yml b/buildscripts/resmokeconfig/suites/query_golden_cqf.yml
index 578f720f3da36..3418d0d70759b 100644
--- a/buildscripts/resmokeconfig/suites/query_golden_cqf.yml
+++ b/buildscripts/resmokeconfig/suites/query_golden_cqf.yml
@@ -17,7 +17,7 @@ executor:
// Keep in sync with query_golden_classic.yml.
load("jstests/libs/override_methods/detect_spawning_own_mongod.js");
load("jstests/libs/set_force_bonsai.js");
- load("jstests/libs/golden_test.js");
+ await import("jstests/libs/override_methods/golden_overrides.js");
_openGoldenData(jsTestName(), {relativePath: "jstests/query_golden/expected_output"});
hooks:
- class: ValidateCollections
@@ -34,4 +34,5 @@ executor:
set_parameters:
enableTestCommands: 1
featureFlagCommonQueryFramework: true
+ internalQueryCardinalityEstimatorMode: "sampling"
internalQueryFrameworkControl: "forceBonsai"
diff --git a/buildscripts/resmokeconfig/suites/query_stats_passthrough.yml b/buildscripts/resmokeconfig/suites/query_stats_passthrough.yml
new file mode 100644
index 0000000000000..088a46966e450
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/query_stats_passthrough.yml
@@ -0,0 +1,41 @@
+test_kind: js_test
+description: |
+ This suite enables the collection of query stats metrics on a mongod server, then runs the tests in
+ core and aggregation as normal. This should cause each query or aggregation to compute a query
+ shape and query stats key, and record in-memory some metrics like execution time and number of
+ scanned documents. Then it uses the 'RunQueryStats' hook to have a background thread ask to collect
+ the query stats every one second. It doesn't assert anything about the collected query stats, it is
+ just meant to make sure nothing is going seriously awry (e.g. crashing).
+
+selector:
+ roots:
+ - jstests/core/**/*.js
+ # - jstests/aggregation/**/*.js # TODO: SERVER-75596 enable aggregation tests in the full passthrough.
+ exclude_files:
+ # Transactions are not supported on MongoDB standalone nodes, so we do not run these tests.
+ - jstests/core/txns/**/*.js
+ - jstests/core/views/invalid_system_views.js # TODO SERVER-78025 reenable coverage on this test
+ # Queryable encryption is not supported on standalone
+ - jstests/core/queryable_encryption/**/*.js
+ exclude_with_any_tags:
+ # Running $queryStats will increment these counters which can screw up some test assertions.
+ - inspects_command_opcounters
+
+executor:
+ archive:
+ hooks:
+ - ValidateCollections
+ hooks:
+ # Be sure to run the hooks which depend on the fixture being alive before the CleanEveryN hook.
+ # That way the fixture restart can't cause any trouble for the other hooks.
+ - class: RunQueryStats
+ - class: ValidateCollections
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ internalQueryStatsRateLimit: -1
+ internalQueryStatsErrorsAreCommandFatal: true
diff --git a/buildscripts/resmokeconfig/suites/query_stats_passthrough_writeonly.yml b/buildscripts/resmokeconfig/suites/query_stats_passthrough_writeonly.yml
new file mode 100644
index 0000000000000..331c26145f612
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/query_stats_passthrough_writeonly.yml
@@ -0,0 +1,28 @@
+# TODO: SERVER-75596 delete this suite and run aggregation through RunQueryStats hook as well.
+test_kind: js_test
+description: |
+ This suite enables the collection of query stats metrics on a mongod server, then runs the tests in
+ aggregation as normal. This should cause each query or aggregation to compute a query
+ shape and query stats key, and record in-memory some metrics like execution time and number of
+ scanned documents.
+
+selector:
+ roots:
+ - jstests/aggregation/**/*.js
+
+executor:
+ archive:
+ hooks:
+ - ValidateCollections
+ hooks:
+ # Be sure to run the hooks which depend on the fixture being alive before the CleanEveryN hook.
+ # That way the fixture restart can't cause any trouble for the other hooks.
+ - class: ValidateCollections
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ internalQueryStatsRateLimit: -1
diff --git a/buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml b/buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml
index c84deae1fa223..e0fd4bc2b3b8c 100644
--- a/buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml
@@ -41,7 +41,7 @@ executor:
TestData:
defaultReadConcernLevel: linearizable
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
diff --git a/buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml
index 78cd645be67d0..a7a95d5b8a998 100644
--- a/buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml
@@ -36,7 +36,7 @@ executor:
TestData:
defaultReadConcernLevel: majority
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_api_version_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_api_version_jscore_passthrough.yml
index 3809405d3f781..d81a69346e9df 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_api_version_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_api_version_jscore_passthrough.yml
@@ -26,7 +26,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_api_version.js');
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_fcbis_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_fcbis_jscore_passthrough.yml
index 51be09c6f421b..4025689dad977 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_fcbis_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_fcbis_jscore_passthrough.yml
@@ -48,7 +48,7 @@ executor:
- BackgroundInitialSync
config:
shell_options:
- eval: "testingReplication = true;"
+ eval: "globalThis.testingReplication = true;"
hooks:
- class: BackgroundInitialSync
n: *run_hook_interval
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml
index 9ae8f19f6f750..3fbf8730a3059 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml
@@ -47,7 +47,7 @@ executor:
- BackgroundInitialSync
config:
shell_options:
- eval: "testingReplication = true;"
+ eval: "globalThis.testingReplication = true;"
hooks:
- class: BackgroundInitialSync
n: *run_hook_interval
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml
index 35fd9456b932b..fee8c8cc34bbc 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml
@@ -21,7 +21,7 @@ executor:
- IntermediateInitialSync
config:
shell_options:
- eval: "testingReplication = true;"
+ eval: "globalThis.testingReplication = true;"
hooks:
- class: IntermediateInitialSync
n: *run_hook_interval
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml
index 9bcd88b16730f..5ffdf8251cdc3 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml
@@ -31,7 +31,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: "testingReplication = true;"
+ eval: "globalThis.testingReplication = true;"
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
# on the secondaries, so we run the ValidateCollections hook after it to ensure we're
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml
index 6df62268b9f05..2a70d88d335be 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml
@@ -37,7 +37,7 @@ executor:
- PeriodicKillSecondaries
config:
shell_options:
- eval: "testingReplication = true;"
+ eval: "globalThis.testingReplication = true;"
hooks:
- class: PeriodicKillSecondaries
fixture:
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_large_txns_format_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_large_txns_format_jscore_passthrough.yml
index 9fb34b66fc61e..56aa1a77cb9c1 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_large_txns_format_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_large_txns_format_jscore_passthrough.yml
@@ -23,7 +23,7 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: "testingReplication = true;"
+ eval: "globalThis.testingReplication = true;"
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
# on the secondaries, so we run the ValidateCollections hook after it to ensure we're
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml
index 47406b42bc388..ef35798901c43 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml
@@ -245,7 +245,7 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/enable_sessions.js');
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
load('jstests/libs/override_methods/network_error_and_txn_override.js');
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml
index 53b8d8e3de771..0237313a7f2e3 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml
@@ -277,8 +277,6 @@ selector:
- requires_dbstats
# "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..."
- requires_collstats
- # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..."
- - requires_datasize
## The next tag corresponds to long running-operations, as they may exhaust their number
# of retries and result in a network error being thrown.
- operations_longer_than_stepdown_interval
@@ -311,9 +309,9 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml
index bb59e2d0b099c..b3820ce194300 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml
@@ -291,9 +291,9 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
global_vars:
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml
index 6233179e62715..c25433031f946 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml
@@ -300,9 +300,9 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_passthrough.yml
index 9baa473f3e46d..df3e2048a8cdf 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_passthrough.yml
@@ -46,7 +46,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
load("jstests/libs/override_methods/enable_sessions.js");
global_vars:
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_stepdown_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_stepdown_passthrough.yml
index 8fd0180ba922b..58299ecb3b17b 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_stepdown_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_stepdown_passthrough.yml
@@ -116,7 +116,6 @@ selector:
# collStats is not causally consistent
- requires_collstats
- requires_dbstats
- - requires_datasize
- requires_sharding
# Operations in the main test shell aren't guaranteed to be causally consistent with operations
# performed earlier in a parallel shell if multiple nodes are electable because the latest
@@ -133,9 +132,9 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
global_vars:
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_terminate_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_terminate_primary_jscore_passthrough.yml
index 3df5d47056154..e1ed7ace28304 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_terminate_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_terminate_primary_jscore_passthrough.yml
@@ -106,9 +106,9 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js");
diff --git a/buildscripts/resmokeconfig/suites/retryable_writes_downgrade.yml b/buildscripts/resmokeconfig/suites/retryable_writes_downgrade.yml
index 8097a74ffc360..2123fbdecd2ac 100644
--- a/buildscripts/resmokeconfig/suites/retryable_writes_downgrade.yml
+++ b/buildscripts/resmokeconfig/suites/retryable_writes_downgrade.yml
@@ -101,8 +101,6 @@ selector:
- requires_dbstats
# "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..."
- requires_collstats
- # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..."
- - requires_datasize
## The next tag corresponds to long running-operations, as they may exhaust their number
# of retries and result in a network error being thrown.
- operations_longer_than_stepdown_interval
@@ -125,9 +123,9 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
diff --git a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml
index 35caa128159f8..711e4b0b28e28 100644
--- a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml
@@ -56,7 +56,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/retry_writes_at_least_once.js");
global_vars:
diff --git a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml
index d6b9a27e561a6..f941f415095bc 100644
--- a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml
@@ -105,9 +105,9 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
global_vars:
diff --git a/buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml b/buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml
index 03553d7dfd1f7..dea2b7851fcb6 100644
--- a/buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml
@@ -57,7 +57,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_preference_secondary.js');
load('jstests/libs/override_methods/enable_causal_consistency.js');
hooks:
diff --git a/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml
index f9d0fa83f26eb..908cc2be5d759 100644
--- a/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml
@@ -7,6 +7,9 @@ selector:
# Transactions are not supported on MongoDB standalone nodes.
- jstests/core/txns/**/*.js
+ # Queryable encryption is not supported on standalone
+ - jstests/core/queryable_encryption/**/*.js
+
# These test run commands using legacy queries, which are not supported on sessions.
- jstests/core/**/comment_field.js
- jstests/core/**/exhaust.js
diff --git a/buildscripts/resmokeconfig/suites/shard_merge_causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_merge_causally_consistent_jscore_passthrough.yml
index cdcd52a7d6e9d..5dd408cac638e 100644
--- a/buildscripts/resmokeconfig/suites/shard_merge_causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/shard_merge_causally_consistent_jscore_passthrough.yml
@@ -264,7 +264,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/inject_tenant_prefix.js');
load('jstests/libs/override_methods/enable_sessions.js');
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
@@ -331,9 +331,6 @@ executor:
# so. Therefore, the garbage collection delay doesn't need to be large.
tenantMigrationGarbageCollectionDelayMS: 1
ttlMonitorSleepSecs: 1
- # Tenant migrations is not currently compatible with implicitly replicated retryable
- # findAndModify images.
- storeFindAndModifyImagesInSideCollection: false
minSnapshotHistoryWindowInSeconds: 30
tlsMode: allowTLS
tlsCAFile: jstests/libs/ca.pem
diff --git a/buildscripts/resmokeconfig/suites/shard_merge_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_merge_jscore_passthrough.yml
index 9153cafd46432..f810b70efba24 100644
--- a/buildscripts/resmokeconfig/suites/shard_merge_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/shard_merge_jscore_passthrough.yml
@@ -53,7 +53,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/inject_tenant_prefix.js');
jsTest.authenticate(db.getMongo());
global_vars:
@@ -111,9 +111,6 @@ executor:
# so. Therefore, the garbage collection delay doesn't need to be large.
tenantMigrationGarbageCollectionDelayMS: 1
ttlMonitorSleepSecs: 1
- # Tenant migrations is not currently compatible with implicitly replicated retryable
- # findAndModify images.
- storeFindAndModifyImagesInSideCollection: false
minSnapshotHistoryWindowInSeconds: 30
tlsMode: allowTLS
tlsCAFile: jstests/libs/ca.pem
diff --git a/buildscripts/resmokeconfig/suites/shard_merge_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_merge_multi_stmt_txn_jscore_passthrough.yml
index 3e828cd582960..4e3b5ce567233 100644
--- a/buildscripts/resmokeconfig/suites/shard_merge_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/shard_merge_multi_stmt_txn_jscore_passthrough.yml
@@ -264,7 +264,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/inject_tenant_prefix.js');
load('jstests/libs/override_methods/enable_sessions.js');
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
@@ -328,9 +328,6 @@ executor:
# so. Therefore, the garbage collection delay doesn't need to be large.
tenantMigrationGarbageCollectionDelayMS: 1
ttlMonitorSleepSecs: 1
- # Tenant migrations is not currently compatible with implicitly replicated retryable
- # findAndModify images.
- storeFindAndModifyImagesInSideCollection: false
minSnapshotHistoryWindowInSeconds: 30
tlsMode: allowTLS
tlsCAFile: jstests/libs/ca.pem
diff --git a/buildscripts/resmokeconfig/suites/shard_split_causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_split_causally_consistent_jscore_passthrough.yml
index 6b5af0d852954..fc4e960736d47 100644
--- a/buildscripts/resmokeconfig/suites/shard_split_causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/shard_split_causally_consistent_jscore_passthrough.yml
@@ -119,7 +119,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/inject_tenant_prefix.js');
load('jstests/libs/override_methods/enable_causal_consistency.js');
load('jstests/libs/override_methods/enable_sessions.js');
diff --git a/buildscripts/resmokeconfig/suites/shard_split_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_split_jscore_passthrough.yml
index bbf4f369ee437..20aaa9f37ec7b 100644
--- a/buildscripts/resmokeconfig/suites/shard_split_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/shard_split_jscore_passthrough.yml
@@ -55,7 +55,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/inject_tenant_prefix.js');
jsTest.authenticate(db.getMongo());
global_vars:
diff --git a/buildscripts/resmokeconfig/suites/shard_split_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_split_kill_primary_jscore_passthrough.yml
index b25bda49b404d..e528effec0186 100644
--- a/buildscripts/resmokeconfig/suites/shard_split_kill_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/shard_split_kill_primary_jscore_passthrough.yml
@@ -97,8 +97,6 @@ selector:
- requires_dbstats
# "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..."
- requires_collstats
- # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..."
- - requires_datasize
# Due to background shard splits, operations in the main test shell are not guaranteed to
# be causally consistent with operations in a parallel shell. The reason is that
# TenantMigrationCommitted error is only thrown when the client does a write or a atClusterTime/
@@ -135,9 +133,9 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load('jstests/libs/override_methods/inject_tenant_prefix.js');
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
@@ -180,7 +178,7 @@ executor:
- class: ContinuousShardSplit
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
global_vars:
diff --git a/buildscripts/resmokeconfig/suites/shard_split_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_split_multi_stmt_txn_jscore_passthrough.yml
index f6a94169ea9f4..5bc0bc14f88cc 100644
--- a/buildscripts/resmokeconfig/suites/shard_split_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/shard_split_multi_stmt_txn_jscore_passthrough.yml
@@ -259,7 +259,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/inject_tenant_prefix.js');
load('jstests/libs/override_methods/enable_sessions.js');
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
diff --git a/buildscripts/resmokeconfig/suites/shard_split_stepdown_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_split_stepdown_jscore_passthrough.yml
index eef468bda3e48..0b33f27d51474 100644
--- a/buildscripts/resmokeconfig/suites/shard_split_stepdown_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/shard_split_stepdown_jscore_passthrough.yml
@@ -127,9 +127,9 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load('jstests/libs/override_methods/inject_tenant_prefix.js');
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
@@ -169,7 +169,7 @@ executor:
- class: ContinuousShardSplit
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
global_vars:
diff --git a/buildscripts/resmokeconfig/suites/shard_split_terminate_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_split_terminate_primary_jscore_passthrough.yml
index 8b4e5132be92e..f93aa05bf2298 100644
--- a/buildscripts/resmokeconfig/suites/shard_split_terminate_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/shard_split_terminate_primary_jscore_passthrough.yml
@@ -127,9 +127,9 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load('jstests/libs/override_methods/inject_tenant_prefix.js');
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
@@ -170,7 +170,7 @@ executor:
- class: ContinuousShardSplit
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
global_vars:
diff --git a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
index ddcbd2c22bfcb..54b941b97392f 100644
--- a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml
@@ -91,6 +91,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_txns_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_txns_passthrough.yml
index ba6868bdc0753..67dea09960b86 100644
--- a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_txns_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_txns_passthrough.yml
@@ -41,12 +41,13 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
hooks:
# We don't execute dbHash or oplog consistency checks since there is only a single replica set
# node.
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_read_concern_snapshot_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_read_concern_snapshot_passthrough.yml
index fb183730ac584..233d8e6a2a6eb 100644
--- a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_read_concern_snapshot_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_read_concern_snapshot_passthrough.yml
@@ -93,13 +93,14 @@ executor:
defaultReadConcernLevel: snapshot
disallowSnapshotDistinct: true
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load("jstests/libs/override_methods/enable_causal_consistency.js");
load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js");
hooks:
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_causally_consistent_jscore_txns_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_causally_consistent_jscore_txns_passthrough.yml
index 0d90f77f3e9fd..cf1fa432b3e66 100644
--- a/buildscripts/resmokeconfig/suites/sharded_collections_causally_consistent_jscore_txns_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_collections_causally_consistent_jscore_txns_passthrough.yml
@@ -55,13 +55,14 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js');
load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');
hooks:
# We don't execute dbHash or oplog consistency checks since there is only a single replica set
# node.
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
index a3bfb2adea7bb..c19a1bb33f8b0 100644
--- a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml
@@ -94,6 +94,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_catalog_shard.yml
deleted file mode 100644
index 11c8f5e1a83ef..0000000000000
--- a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_catalog_shard.yml
+++ /dev/null
@@ -1,109 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
- exclude_files:
- # These tests run in the jscore_txn passthrough suites.
- - jstests/core/txns/**/*.js
-
- # The following tests fail because a certain command or functionality is not supported by
- # mongos. This command or functionality is placed in a comment next to the failing test.
- - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine.
- - jstests/core/**/awaitdata_getmore_cmd.js # capped collections.
- - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted
- - jstests/core/**/check_shard_index.js # checkShardingIndex.
- - jstests/core/**/collection_truncate.js # emptycapped.
- - jstests/core/**/compact_keeps_indexes.js # compact.
- - jstests/core/**/currentop.js # uses fsync.
- - jstests/core/**/dbhash.js # dbhash.
- - jstests/core/**/dbhash2.js # dbhash.
- - jstests/core/**/fsync.js # uses fsync.
- - jstests/core/**/geo_s2cursorlimitskip.js # profiling.
- - jstests/core/**/geo_update_btree2.js # notablescan.
- - jstests/core/**/index9.js # "local" database.
- - jstests/core/**/queryoptimizera.js # "local" database.
- - jstests/core/**/stages*.js # stageDebug.
- - jstests/core/**/startup_log.js # "local" database.
- - jstests/core/**/tailable_cursor_invalidation.js # capped collections.
- - jstests/core/**/tailable_getmore_batch_size.js # capped collections.
- - jstests/core/**/tailable_skip_limit.js # capped collections.
- - jstests/core/**/top.js # top.
- # The following tests fail because mongos behaves differently from mongod when testing certain
- # functionality. The differences are in a comment next to the failing test.
- - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047.
- - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain().
- - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain().
- - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate().
- - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880.
- # The following tests fail because they count indexes. These counts do not take into account the
- # additional hashed shard key indexes that are automatically added by this passthrough.
- - jstests/core/**/apitest_dbcollection.js
- - jstests/core/**/bad_index_plugin.js
- - jstests/core/**/create_indexes.js
- - jstests/core/**/list_indexes_non_existent_ns.js
- - jstests/core/**/mr_preserve_indexes.js
- # The following tests fail because they expect no databases to be created. However a DB is created
- # automatically when we shard a collection.
- - jstests/core/**/dbcase.js
- - jstests/core/**/dbcase2.js
- - jstests/core/**/no_db_created.js
- - jstests/core/**/killop_drop_collection.js # Uses fsyncLock.
- # These tests fail because sharded clusters do not clean up correctly after failed index builds.
- # See SERVER-33207 as an example.
- - jstests/core/**/geo_borders.js
- # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded
- # queries with a limit or for distinct commands.
- - jstests/core/**/distinct_index1.js
- - jstests/core/**/explain1.js
- - jstests/core/**/explain4.js
- - jstests/core/**/sortk.js
- # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is
- # incorrect on sharded collections.
- - jstests/core/**/explain_count.js
- - jstests/core/**/explain_server_params.js
- # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output.
- - jstests/core/**/expr_index_use.js
- - jstests/core/**/index_multikey.js
- - jstests/core/**/optimized_match_explain.js
- - jstests/core/**/sort_array.js
-
- exclude_with_any_tags:
- - assumes_standalone_mongod
- - assumes_against_mongod_not_mongos
- # Tests tagged with the following will fail because they assume collections are not sharded.
- - assumes_no_implicit_collection_creation_after_drop
- - assumes_no_implicit_index_creation
- - assumes_unsharded_collection
- - cannot_create_unique_index_when_using_hashed_shard_key
- # system.profile collection doesn't exist on mongos.
- - requires_profiling
- - catalog_shard_incompatible
- - temporary_catalog_shard_incompatible
-
-executor:
- archive:
- hooks:
- - CheckReplDBHash
- - ValidateCollections
- config:
- shell_options:
- eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js")
- hooks:
- - class: CheckReplDBHash
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ShardedClusterFixture
- catalog_shard: "any"
- num_shards: 2
- enable_balancer: false
- mongos_options:
- set_parameters:
- enableTestCommands: 1
- mongod_options:
- set_parameters:
- enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_config_shard.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_config_shard.yml
new file mode 100644
index 0000000000000..4ff75632b9b45
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_config_shard.yml
@@ -0,0 +1,109 @@
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/core/**/*.js
+ - jstests/fle2/**/*.js
+ - src/mongo/db/modules/*/jstests/fle2/**/*.js
+ exclude_files:
+ # These tests run in the jscore_txn passthrough suites.
+ - jstests/core/txns/**/*.js
+
+ # The following tests fail because a certain command or functionality is not supported by
+ # mongos. This command or functionality is placed in a comment next to the failing test.
+ - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine.
+ - jstests/core/**/awaitdata_getmore_cmd.js # capped collections.
+ - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted
+ - jstests/core/**/check_shard_index.js # checkShardingIndex.
+ - jstests/core/**/collection_truncate.js # emptycapped.
+ - jstests/core/**/compact_keeps_indexes.js # compact.
+ - jstests/core/**/currentop.js # uses fsync.
+ - jstests/core/**/dbhash.js # dbhash.
+ - jstests/core/**/dbhash2.js # dbhash.
+ - jstests/core/**/fsync.js # uses fsync.
+ - jstests/core/**/geo_s2cursorlimitskip.js # profiling.
+ - jstests/core/**/geo_update_btree2.js # notablescan.
+ - jstests/core/**/index9.js # "local" database.
+ - jstests/core/**/queryoptimizera.js # "local" database.
+ - jstests/core/**/stages*.js # stageDebug.
+ - jstests/core/**/startup_log.js # "local" database.
+ - jstests/core/**/tailable_cursor_invalidation.js # capped collections.
+ - jstests/core/**/tailable_getmore_batch_size.js # capped collections.
+ - jstests/core/**/tailable_skip_limit.js # capped collections.
+ - jstests/core/**/top.js # top.
+ # The following tests fail because mongos behaves differently from mongod when testing certain
+ # functionality. The differences are in a comment next to the failing test.
+ - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047.
+ - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain().
+ - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain().
+ - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate().
+ - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880.
+ # The following tests fail because they count indexes. These counts do not take into account the
+ # additional hashed shard key indexes that are automatically added by this passthrough.
+ - jstests/core/**/apitest_dbcollection.js
+ - jstests/core/**/bad_index_plugin.js
+ - jstests/core/**/create_indexes.js
+ - jstests/core/**/list_indexes_non_existent_ns.js
+ - jstests/core/**/mr_preserve_indexes.js
+ # The following tests fail because they expect no databases to be created. However a DB is created
+ # automatically when we shard a collection.
+ - jstests/core/**/dbcase.js
+ - jstests/core/**/dbcase2.js
+ - jstests/core/**/no_db_created.js
+ - jstests/core/**/killop_drop_collection.js # Uses fsyncLock.
+ # These tests fail because sharded clusters do not clean up correctly after failed index builds.
+ # See SERVER-33207 as an example.
+ - jstests/core/**/geo_borders.js
+ # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded
+ # queries with a limit or for distinct commands.
+ - jstests/core/**/distinct_index1.js
+ - jstests/core/**/explain1.js
+ - jstests/core/**/explain4.js
+ - jstests/core/**/sortk.js
+ # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is
+ # incorrect on sharded collections.
+ - jstests/core/**/explain_count.js
+ - jstests/core/**/explain_server_params.js
+ # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output.
+ - jstests/core/**/expr_index_use.js
+ - jstests/core/**/index_multikey.js
+ - jstests/core/**/optimized_match_explain.js
+ - jstests/core/**/sort_array.js
+
+ exclude_with_any_tags:
+ - assumes_standalone_mongod
+ - assumes_against_mongod_not_mongos
+ # Tests tagged with the following will fail because they assume collections are not sharded.
+ - assumes_no_implicit_collection_creation_after_drop
+ - assumes_no_implicit_index_creation
+ - assumes_unsharded_collection
+ - cannot_create_unique_index_when_using_hashed_shard_key
+ # system.profile collection doesn't exist on mongos.
+ - requires_profiling
+ - config_shard_incompatible
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHash
+ - ValidateCollections
+ config:
+ shell_options:
+ eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js")
+ hooks:
+ - class: CheckReplDBHash
+ - class: ValidateCollections
+ - class: CheckOrphansDeleted
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: ShardedClusterFixture
+ config_shard: "any"
+ num_shards: 2
+ enable_balancer: false
+ mongos_options:
+ set_parameters:
+ enableTestCommands: 1
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_single_writes_without_shard_key_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_single_writes_without_shard_key_jscore_passthrough.yml
new file mode 100644
index 0000000000000..65a5f1d9f194d
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/sharded_collections_single_writes_without_shard_key_jscore_passthrough.yml
@@ -0,0 +1,321 @@
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/core/**/*.js
+ exclude_files:
+ # The following tests fail because a certain command or functionality is not supported by mongos.
+ # This command or functionality is placed in a comment next to the failing test.
+ - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine.
+ - jstests/core/**/awaitdata_getmore_cmd.js # capped collections.
+ - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted
+ - jstests/core/**/check_shard_index.js # checkShardingIndex.
+ - jstests/core/**/collection_truncate.js # emptycapped.
+ - jstests/core/**/compact_keeps_indexes.js # compact.
+ - jstests/core/**/currentop.js # uses fsync.
+ - jstests/core/**/dbhash.js # dbhash.
+ - jstests/core/**/dbhash2.js # dbhash.
+ - jstests/core/**/fsync.js # uses fsync.
+ - jstests/core/**/geo_s2cursorlimitskip.js # profiling.
+ - jstests/core/**/geo_update_btree2.js # notablescan.
+ - jstests/core/**/index9.js # "local" database.
+ - jstests/core/**/queryoptimizera.js # "local" database.
+ - jstests/core/**/stages*.js # stageDebug.
+ - jstests/core/**/startup_log.js # "local" database.
+ - jstests/core/**/tailable_cursor_invalidation.js # capped collections.
+ - jstests/core/**/tailable_getmore_batch_size.js # capped collections.
+ - jstests/core/**/tailable_skip_limit.js # capped collections.
+ - jstests/core/**/top.js # top.
+ - jstests/core/**/collection_truncate.js # emptycapped.
+ - jstests/core/**/index_many.js # renameCollection.
+ - jstests/core/**/fts_index2.js # renameCollection.
+ - jstests/core/**/list_indexes_invalidation.js # renameCollection.
+ - jstests/core/**/long_index_rename.js # renameCollection.
+ # The following tests fail because mongos behaves differently from mongod when testing certain
+ # functionality. The differences are in a comment next to the failing test.
+ - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047.
+ - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain().
+ - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain().
+ - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate().
+ - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880.
+
+ # The following tests fail because they count indexes. These counts do not take into account the
+ # additional hashed shard key indexes that are automatically added by this passthrough.
+ - jstests/core/**/apitest_dbcollection.js
+ - jstests/core/**/bad_index_plugin.js
+ - jstests/core/**/create_indexes.js
+ - jstests/core/**/list_indexes_non_existent_ns.js
+ - jstests/core/**/mr_preserve_indexes.js
+
+ # The following tests fail because they expect no databases to be created. However a DB is created
+ # automatically when we shard a collection.
+ - jstests/core/**/dbcase.js
+ - jstests/core/**/dbcase2.js
+ - jstests/core/**/no_db_created.js
+ - jstests/core/**/killop_drop_collection.js # Uses fsyncLock.
+ # These tests fail because sharded clusters do not clean up correctly after failed index builds.
+ # See SERVER-33207 as an example.
+ - jstests/core/**/geo_borders.js
+
+ # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded
+ # queries with a limit or for distinct commands.
+ - jstests/core/**/distinct_index1.js
+ - jstests/core/**/explain1.js
+ - jstests/core/**/explain4.js
+ - jstests/core/**/sortk.js
+
+ # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is
+ # incorrect on sharded collections.
+ - jstests/core/**/explain_count.js
+ - jstests/core/**/explain_server_params.js
+
+ # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output.
+ - jstests/core/**/expr_index_use.js
+ - jstests/core/**/index_multikey.js
+ - jstests/core/**/optimized_match_explain.js
+ - jstests/core/**/sort_array.js
+
+ # These tests create secondary unique: true indexes without the shard key prefix.
+ - jstests/core/**/batch_write_command_update.js
+ - jstests/core/**/batch_write_command_w0.js
+ - jstests/core/**/bulk_api_unordered.js
+ - jstests/core/**/bulk_api_ordered.js
+ - jstests/core/**/compound_index_max_fields.js
+
+ # Test assertions expect a certain document is deleted whereas updateOnes, deleteOnes, and
+ # findAndModify without shard key can pick and modify any matching document.
+ - jstests/core/**/crud_api.js
+
+ # Aggregation does not support $where.
+ - jstests/core/**/find_and_modify_concurrent_update.js
+ - jstests/core/**/find_and_modify_where.js
+
+ # {multi: true} upsert requires specifying the full shard key.
+ - jstests/core/**/update_multi_halts.js
+ - jstests/core/**/updatei.js
+ - jstests/core/**/server1470.js
+
+ # {multi: true} update testing behavior not applicable to sharded clusters
+ - jstests/core/**/updatej.js
+ - jstests/core/**/write_result.js
+
+ # Expects to validate that findandmodify alias is not valid in the stable api, mongos
+ # implementation of findAndModify does not support that currently.
+ - jstests/core/**/api_version_find_and_modify.js
+
+ # Capped collections cannot be sharded.
+ - jstests/core/**/capped*.js
+ - jstests/core/**/rename_collection_capped.js
+
+ # Queries on a sharded collection are not able to be covered when they aren't on the shard key
+ # since the document needs to be fetched in order to apply the SHARDING_FILTER stage.
+ - jstests/core/**/coveredIndex1.js
+ - jstests/core/**/coveredIndex2.js
+ - jstests/core/**/covered_index_compound_1.js
+ - jstests/core/**/covered_index_simple_1.js
+ - jstests/core/**/covered_index_simple_2.js
+ - jstests/core/**/covered_index_simple_3.js
+ - jstests/core/**/covered_index_sort_1.js
+ - jstests/core/**/covered_index_sort_3.js
+ - jstests/core/**/covered_index_sort_no_fetch_optimization.js
+ - jstests/core/**/covered_query_with_sort.js
+ - jstests/core/**/return_key.js
+
+ # $near, $nearSphere are not supported in aggregate (which underlies the two phase write
+ # protocol).
+ - jstests/core/**/geo_update.js
+ - jstests/core/**/geo_update_dedup.js
+
+ # These tests assert on query plans expected from unsharded collections.
+ - jstests/core/**/hashed_index_collation.js
+ - jstests/core/**/hashed_index_covered_queries.js
+ - jstests/core/**/hashed_index_sort.js
+ - jstests/core/**/index_bounds_code.js
+ - jstests/core/**/index_bounds_maxkey.js
+ - jstests/core/**/index_bounds_minkey.js
+ - jstests/core/**/index_check6.js
+ - jstests/core/**/index_decimal.js
+ - jstests/core/**/index_filter_commands_invalidate_plan_cache_entries.js
+ - jstests/core/**/wildcard_index_collation.js
+ - jstests/core/**/wildcard_index_count.js
+ - jstests/core/**/wildcard_index_covered_queries.js
+ - jstests/core/**/covered_multikey.js
+ - jstests/core/**/distinct_multikey_dotted_path.js
+ - jstests/core/**/distinct_with_hashed_index.js
+ - jstests/core/**/explain_multikey.js
+ - jstests/core/**/explain_plan_scores.js
+ - jstests/core/**/explain_shell_helpers.js
+ - jstests/core/**/explain_sort_type.js
+ - jstests/core/**/explain_winning_plan.js
+ - jstests/core/**/find_covered_projection.js
+ - jstests/core/**/or_to_in.js
+ - jstests/core/**/partial_index_logical.js
+ - jstests/core/**/cached_plan_trial_does_not_discard_work.js
+ - jstests/core/**/collation_plan_cache.js
+ - jstests/core/**/plan_cache*.js
+ - jstests/core/**/projection_dotted_paths.js
+ - jstests/core/**/regex6.js
+ - jstests/core/**/sbe_plan_cache_autoparameterize_ixscan.js
+ - jstests/core/**/index_bounds_object.js
+ - jstests/core/**/column_scan_skip_row_store_projection.js
+ - jstests/core/**/sbe_explain_rejected_plans.js
+ - jstests/core/**/sbe_plan_cache_autoparameterize_collscan.js
+ - jstests/core/**/sparse_index_supports_ne_null.js
+ - jstests/core/**/update_hint.js
+ - jstests/core/**/delete_hint.js
+ - jstests/core/**/find_and_modify_hint.js
+ - jstests/core/**/index_stats.js
+ - jstests/core/**/index_partial_read_ops.js
+ - jstests/core/**/explain_upsert.js
+ - jstests/core/**/explain_multi_plan.js
+
+ # Test not applicable for sharded collections.
+ - jstests/core/**/add_skip_stage_before_fetch.js
+
+ # Aggregation pipeline does not support the use of sharded collections as the output collection.
+ - jstests/core/**/explain_agg_write_concern.js
+
+ # Can't shard collection with invalid dbName.
+ - jstests/core/**/invalid_db_name.js
+
+ # Cannot output to a non-sharded collection because sharded collection exists already.
+ - jstests/core/**/mr_bigobject_replace.js
+ - jstests/core/**/mr_merge.js
+ - jstests/core/**/mr_reduce.js
+
+ # Cannot implicitly shard accessed collections because mapReduce cannot replace a sharded
+ # collection as output.
+ - jstests/core/**/mr_compute_avg.js
+ - jstests/core/**/mr_replace_into_other_db.js
+
+ # Cannot implicitly shard accessed collections because the "limit" option to the "mapReduce"
+ # command cannot be used on a sharded collection.
+ - jstests/core/**/mr_sort.js
+
+ # This test expects a function stored in the system.js collection to be available for an operation
+ # which may not be the case if it is implicitly sharded in a passthrough.
+ - jstests/core/**/mr_stored.js
+ - jstests/core/**/where_system_js.js
+ - jstests/core/**/system_js_access.js
+ - jstests/core/**/system_js_drop.js
+
+ # Test expects failure, but two phase write protocol exits early with OK status if there are no
+ # matching documents.
+ - jstests/core/**/rename_operator.js
+ - jstests/core/**/field_name_validation.js
+
+ # Operation is not supported on a view.
+ - jstests/core/views/**/*.js
+
+ # Operation not supported in a transaction.
+ - jstests/core/**/create_collection_not_blocked_by_txn.js
+ - jstests/core/**/drop_collection_not_blocked_by_txn.js
+ - jstests/core/**/indexing_not_blocked_by_txn.js
+ - jstests/core/**/listcollections_autocomplete.js
+ - jstests/core/**/rename_collection_not_blocked_by_txn.js
+
+ # $natural not supported in $sort for aggregation pipelines.
+ - jstests/core/**/natural_validation.js
+
+ # Test expects no index to be created, but shardCollection implicitly creates one.
+ - jstests/core/**/timeseries_id_range.js
+
+ # Test relies on keeping the test collection unsharded.
+ - jstests/core/**/command_let_variables_merge_only.js
+ - jstests/core/**/illegal_cmd_namespace.js
+
+ # Cannot implicitly shard accessed collections because the error response from the shard about
+ # using the empty string as the out collection name is converted to an error and no longer retains
+ # the "code" property.
+ - jstests/core/**/commands_namespace_parsing.js
+
+ # Cannot implicitly shard accessed collections because the "dataSize" command returns an
+ # "keyPattern must equal shard key" error response.
+ - jstests/core/**/datasize2.js
+ - jstests/core/**/datasize_validation.js
+
+ # Cannot implicitly shard accessed collections because of following error: GridFS fs.chunks
+ # collection must be sharded on either {files_id:1} or {files_id:1, n:1}
+ - jstests/core/**/filemd5.js
+
+ # This test assumes that timestamps inserted within the same second will have increasing increment
+ # values, which may not be the case if the inserts are into a sharded collection.
+ - jstests/core/**/ts1.js
+
+ # Cannot implicitly shard accessed collections because the "splitVector" command cannot be run
+ # on a sharded collection
+ - jstests/core/**/splitvector.js
+
+ # Profile can only be run against the admin database on mongos.
+ - jstests/core/txns/transactions_profiling.js
+ - jstests/core/txns/transactions_profiling_with_drops.js
+
+ # Implicitly creates a database through a collection rename, which does not work in a sharded
+ # cluster.
+ - jstests/core/txns/transactions_block_ddl.js
+
+ # Set the transactionLifetimeLimitSeconds parameter, which is not on mongos.
+ - jstests/core/txns/abort_expired_transaction.js
+ - jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js
+ - jstests/core/txns/kill_op_on_txn_expiry.js
+
+ # Uses hangAfterCollectionInserts failpoint not available on mongos.
+ - jstests/core/txns/speculative_snapshot_includes_all_writes.js
+
+ # View tests aren't expected to work when collections are implicitly sharded.
+ - jstests/core/txns/view_reads_in_transaction.js
+
+ # Does not use the transactions shell helpers so afterClusterTime read concern is incorrectly
+ # attached to statements in a transaction beyond the first one.
+ - jstests/core/txns/non_transactional_operations_on_session_with_transaction.js
+
+ # These workloads explicitly create collections inside multi-document transactions. These are
+ # non-idempotent operations, and the implicit collection sharding logic upon collection access
+ # results in premature collection creation, causing the workloads to fail.
+ - jstests/core/txns/create_collection.js
+ - jstests/core/txns/create_collection_parallel.js
+ - jstests/core/txns/create_indexes.js
+ - jstests/core/txns/create_indexes_parallel.js
+ - jstests/core/txns/commands_in_txns_read_concern.js
+
+ exclude_with_any_tags:
+ - assumes_unsharded_collection
+ - assumes_standalone_mongod
+ - assumes_against_mongod_not_mongos
+ # Tests tagged with the following will fail because they assume collections are not sharded.
+ - assumes_no_implicit_collection_creation_after_drop
+ - assumes_no_implicit_index_creation
+ - cannot_create_unique_index_when_using_hashed_shard_key
+ # system.profile collection doesn't exist on mongos.
+ - requires_profiling
+ # Transactions are not allowed to operate on capped collections.
+ - requires_capped
+ # Prepare is not a command on mongos.
+ - uses_prepare_transaction
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHash
+ - CheckMetadataConsistencyInBackground
+ - ValidateCollections
+ config:
+ shell_options:
+ eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js")
+ hooks:
+ - class: CheckReplDBHash
+ - class: CheckMetadataConsistencyInBackground
+ - class: ValidateCollections
+ - class: CheckOrphansDeleted
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: ShardedClusterFixture
+ num_shards: 2
+ enable_balancer: false
+ mongos_options:
+ set_parameters:
+ enableTestCommands: 1
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
diff --git a/buildscripts/resmokeconfig/suites/sharded_jscore_txns.yml b/buildscripts/resmokeconfig/suites/sharded_jscore_txns.yml
index af1def051dd37..20b294caa4c35 100644
--- a/buildscripts/resmokeconfig/suites/sharded_jscore_txns.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_jscore_txns.yml
@@ -35,11 +35,12 @@ executor:
- ValidateCollections
config:
shell_options:
- eval: "testingReplication = true;"
+ eval: "globalThis.testingReplication = true;"
hooks:
# We don't execute dbHash or oplog consistency checks since there is only a single replica set
# node.
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/sharded_jscore_txns_sharded_collections.yml b/buildscripts/resmokeconfig/suites/sharded_jscore_txns_sharded_collections.yml
index 3e2fc25d61e5e..440cf2c06543a 100644
--- a/buildscripts/resmokeconfig/suites/sharded_jscore_txns_sharded_collections.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_jscore_txns_sharded_collections.yml
@@ -50,12 +50,13 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');
hooks:
# We don't execute dbHash or oplog consistency checks since there is only a single replica set
# node.
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml
index 1d62448b10db7..23860c730f8cb 100644
--- a/buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml
@@ -269,7 +269,7 @@ executor:
config:
shell_options:
eval: >-
- var testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/enable_sessions.js');
load('jstests/libs/override_methods/network_error_and_txn_override.js');
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
@@ -290,6 +290,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/sharded_retryable_writes_downgrade.yml b/buildscripts/resmokeconfig/suites/sharded_retryable_writes_downgrade.yml
index daa760ca855ed..fcf6a1b40d79c 100644
--- a/buildscripts/resmokeconfig/suites/sharded_retryable_writes_downgrade.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_retryable_writes_downgrade.yml
@@ -3,8 +3,6 @@ test_kind: js_test
selector:
roots:
- jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
exclude_files:
# Transactions do not support retryability of individual operations.
# TODO: Remove this once it is supported (SERVER-33952).
@@ -74,6 +72,9 @@ selector:
# TODO SERVER-61050
- jstests/core/timeseries/timeseries_merge.js
+ # Explain doesn't support read concern majority in sharding.
+ - jstests/core/**/command_let_variables.js
+
exclude_with_any_tags:
- assumes_against_mongod_not_mongos
- assumes_standalone_mongod
@@ -129,8 +130,6 @@ selector:
- requires_dbstats
# "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..."
- requires_collstats
- # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..."
- - requires_datasize
## The next tag corresponds to long running-operations, as they may exhaust their number
# of retries and result in a network error being thrown.
- operations_longer_than_stepdown_interval
@@ -154,9 +153,9 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
@@ -189,6 +188,7 @@ executor:
- class: CheckReplOplogs
- class: CheckReplDBHash
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/sharding_api_strict_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_api_strict_passthrough.yml
index ca81b50ad6cf7..1154afc753e8d 100644
--- a/buildscripts/resmokeconfig/suites/sharding_api_strict_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_api_strict_passthrough.yml
@@ -36,6 +36,6 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = false;
+ globalThis.testingReplication = false;
load('jstests/libs/override_methods/set_api_strict.js');
nodb: ''
diff --git a/buildscripts/resmokeconfig/suites/sharding_api_version_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_api_version_jscore_passthrough.yml
index 300a779e6ff07..d5a06c7eb1e4e 100644
--- a/buildscripts/resmokeconfig/suites/sharding_api_version_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_api_version_jscore_passthrough.yml
@@ -87,7 +87,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_api_version.js');
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
@@ -97,6 +97,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth.yml b/buildscripts/resmokeconfig/suites/sharding_auth.yml
index 2b2862e5a8e48..26d78a585f79b 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth.yml
@@ -32,6 +32,7 @@ selector:
- jstests/sharding/migration_critical_section_concurrency.js # SERVER-21713
# Runs with auth enabled.
- jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
+ - jstests/sharding/cluster_time_across_add_shard.js
# Skip because this suite implicitly authenticates as __system, which allows bypassing user write
# blocking.
- jstests/sharding/set_user_write_block_mode.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharding_auth_catalog_shard.yml
deleted file mode 100644
index 459b2debd5a02..0000000000000
--- a/buildscripts/resmokeconfig/suites/sharding_auth_catalog_shard.yml
+++ /dev/null
@@ -1,59 +0,0 @@
-# Section that is ignored by resmoke.py.
-config_variables:
-- &keyFile jstests/libs/authTestsKey
-- &keyFileData Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly
-
-test_kind: js_test
-
-selector:
- roots:
- - jstests/sharding/**/*.js
- exclude_files:
- - jstests/sharding/**/libs/**/*.js
- # Skip any tests that run with auth explicitly.
- - jstests/sharding/*[aA]uth*.js
- - jstests/sharding/analyze_shard_key/*[aA]uth*.js
- - jstests/sharding/query/*[aA]uth*.js
- - jstests/sharding/change_streams/*[aA]uth*.js
-
- - jstests/sharding/advance_cluster_time_action_type.js
- - jstests/sharding/query/aggregation_currentop.js
- - jstests/sharding/internal_txns/internal_client_restrictions.js
- - jstests/sharding/kill_sessions.js
- # Skip these additional tests when running with auth enabled.
- - jstests/sharding/parallel.js
- # Skip the testcases that do not have auth bypass when running ops in parallel.
- - jstests/sharding/migration_ignore_interrupts_1.js # SERVER-21713
- - jstests/sharding/migration_ignore_interrupts_2.js # SERVER-21713
- - jstests/sharding/migration_server_status.js # SERVER-21713
- - jstests/sharding/migration_sets_fromMigrate_flag.js # SERVER-21713
- - jstests/sharding/migration_with_source_ops.js # SERVER-21713
- - jstests/sharding/movechunk_parallel.js # SERVER-21713
- - jstests/sharding/migration_critical_section_concurrency.js # SERVER-21713
- # Runs with auth enabled.
- - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
- # Skip because this suite implicitly authenticates as __system, which allows bypassing user write
- # blocking.
- - jstests/sharding/set_user_write_block_mode.js
- exclude_with_any_tags:
- - catalog_shard_incompatible
- - temporary_catalog_shard_incompatible
-
-executor:
- archive:
- tests:
- - jstests/sharding/*reshard*.js
- config:
- shell_options:
- global_vars:
- TestData:
- auth: true
- authMechanism: SCRAM-SHA-256
- catalogShard: true
- keyFile: *keyFile
- keyFileData: *keyFileData
- roleGraphInvalidationIsFatal: true
- # TODO (SERVER-74534): Enable the metadata consistency check when it will work with
- # co-located configsvr.
- skipCheckMetadataConsistency: true
- nodb: ''
diff --git a/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml
deleted file mode 100644
index b988b4581fdf3..0000000000000
--- a/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/sharding/**/*.js
- exclude_files:
- - jstests/sharding/**/libs/**/*.js
- exclude_with_any_tags:
- - catalog_shard_incompatible
- - temporary_catalog_shard_incompatible
-
-executor:
- archive:
- tests:
- - jstests/sharding/*reshard*.js
- config:
- shell_options:
- global_vars:
- TestData:
- catalogShard: true
- # TODO (SERVER-74534): Enable the metadata consistency check when it will work with
- # co-located configsvr.
- skipCheckMetadataConsistency: true
- nodb: ''
diff --git a/buildscripts/resmokeconfig/suites/sharding_config_shard.yml b/buildscripts/resmokeconfig/suites/sharding_config_shard.yml
new file mode 100644
index 0000000000000..c92994bc7d3f1
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/sharding_config_shard.yml
@@ -0,0 +1,20 @@
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/sharding/**/*.js
+ exclude_files:
+ - jstests/sharding/**/libs/**/*.js
+ exclude_with_any_tags:
+ - config_shard_incompatible
+
+executor:
+ archive:
+ tests:
+ - jstests/sharding/*reshard*.js
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ configShard: true
+ nodb: ''
diff --git a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
deleted file mode 100644
index a4468831cc0f6..0000000000000
--- a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
+++ /dev/null
@@ -1,260 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/sharding/**/*.js
- exclude_files:
- - jstests/sharding/**/libs/**/*.js
- # Skip any tests that run with auth explicitly.
- # Auth tests require authentication on the stepdown thread's connection
- - jstests/sharding/*[aA]uth*.js
- - jstests/sharding/analyze_shard_key/*[aA]uth*.js
- - jstests/sharding/query/*[aA]uth*.js
- - jstests/sharding/change_streams/*[aA]uth*.js
- - jstests/sharding/internal_txns/internal_client_restrictions.js
- - jstests/sharding/internal_txns/non_retryable_writes_during_migration.js
- - jstests/sharding/internal_txns/retry_on_transient_error_validation.js
- - jstests/sharding/internal_txns/retryable_findAndModify_during_migration_oplog.js
- - jstests/sharding/internal_txns/retryable_findAndModify_during_migration_side_coll.js
- - jstests/sharding/internal_txns/retryable_writes_aborted_during_migration.js
- - jstests/sharding/internal_txns/retryable_writes_committed_during_migration.js
-
- - jstests/sharding/localhostAuthBypass.js
- - jstests/sharding/kill_sessions.js
- - jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
- - jstests/sharding/query/mrShardedOutputAuth.js
- - jstests/sharding/query/aggregation_currentop.js
- - jstests/sharding/advance_cluster_time_action_type.js
- - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
- # Count/write/aggregate commands against the config shard do not support retries yet
- - jstests/sharding/addshard1.js
- - jstests/sharding/addshard2.js
- - jstests/sharding/basic_merge.js
- - jstests/sharding/count1.js
- - jstests/sharding/count2.js
- - jstests/sharding/query/current_op_with_drop_shard.js
- - jstests/sharding/cursor1.js
- - jstests/sharding/diffservers1.js
- - jstests/sharding/findandmodify1.js
- - jstests/sharding/query/geo_near_sharded.js
- - jstests/sharding/hash_basic.js
- - jstests/sharding/hash_shard1.js
- - jstests/sharding/hash_shard_non_empty.js
- - jstests/sharding/hash_shard_num_chunks.js
- - jstests/sharding/hash_single_shard.js
- - jstests/sharding/key_many.js
- - jstests/sharding/key_string.js
- - jstests/sharding/large_chunk.js
- - jstests/sharding/limit_push.js
- - jstests/sharding/merge_with_drop_shard.js
- - jstests/sharding/merge_with_move_primary.js
- - jstests/sharding/move_chunk_basic.js
- - jstests/sharding/movePrimary1.js
- - jstests/sharding/names.js
- - jstests/sharding/prefix_shard_key.js
- - jstests/sharding/query_config.js
- - jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js
- - jstests/sharding/remove1.js
- - jstests/sharding/rename_across_mongos.js
- - jstests/sharding/shard2.js
- - jstests/sharding/shard3.js
- - jstests/sharding/shard_collection_basic.js
- - jstests/sharding/tag_range.js
- - jstests/sharding/count_config_servers.js
- - jstests/sharding/split_large_key.js
- - jstests/sharding/balancer_window.js
- - jstests/sharding/zone_changes_compound.js
- - jstests/sharding/zone_changes_hashed.js
- - jstests/sharding/zone_changes_range.js
- # No retries on direct writes to the config/admin databases on the config servers
- - jstests/sharding/listDatabases.js
- - jstests/sharding/bulk_insert.js
- - jstests/sharding/printShardingStatus.js
- - jstests/sharding/refresh_sessions.js
- - jstests/sharding/shard_collection_existing_zones.js
- - jstests/sharding/catalog_shard_mongos_ops_on_config_and_admin.js
- # Balancer writes (direct write to config database with no retries)
- - jstests/sharding/convert_to_and_from_sharded.js
- - jstests/sharding/remove2.js
- - jstests/sharding/features3.js
- - jstests/sharding/in_memory_sort_limit.js
- - jstests/sharding/parallel.js
- - jstests/sharding/migrateBig.js
- - jstests/sharding/sharding_rs1.js
- - jstests/sharding/move_primary_fails_without_database_version.js
- # Calls the config server primary directly (not through mongos)
- - jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js
- - jstests/sharding/analyze_shard_key/invalid_config_docs.js
- - jstests/sharding/analyze_shard_key/persist_sampled_diffs.js
- - jstests/sharding/analyze_shard_key/persist_sampled_read_queries.js
- - jstests/sharding/analyze_shard_key/persist_sampled_write_queries.js
- - jstests/sharding/analyze_shard_key/refresh_sample_rates.js
- - jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
- - jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js
- - jstests/sharding/read_after_optime.js
- - jstests/sharding/server_status.js
- - jstests/sharding/drop_configdb.js
- - jstests/sharding/shard_identity_config_update.js
- - jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js
- - jstests/sharding/key_rotation.js
- - jstests/sharding/keys_rotation_interval_sec.js
- - jstests/sharding/migration_coordinator_basic.js # sets a failpoint on the config primary
- - jstests/sharding/migration_coordinator_abort_failover.js # sets a failpoint on the config primary
- - jstests/sharding/migration_coordinator_commit_failover.js # sets a failpoint on the config primary
- - jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
- - jstests/sharding/move_chunk_insert_with_write_retryability.js
- - jstests/sharding/move_chunk_remove_with_write_retryability.js
- - jstests/sharding/move_chunk_update_with_write_retryability.js
- - jstests/sharding/refine_collection_shard_key_atomic.js # sets a failpoint on the config primary
- - jstests/sharding/restart_transactions.js
- - jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js
- - jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js
- - jstests/sharding/txn_two_phase_commit_failover.js
- - jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js
- # Runs setShardVersion/getShardVersion against the config server and we don't support retries
- # for this command
- - jstests/sharding/major_version_check.js
- # Runs replSetGetStatus -- via awaitLastOpCommitted -- directly against the config server:
- # retries aren't supported.
- - jstests/sharding/coll_epoch_test1.js
- - jstests/sharding/move_stale_mongos.js
- - jstests/sharding/shard4.js
- - jstests/sharding/shard5.js
- - jstests/sharding/split_stale_mongos.js
- - jstests/sharding/stale_mongos_updates_and_removes.js
- - jstests/sharding/zero_shard_version.js
- # Already stop or blackholes the primary of the CSRS config shard
- - jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
- - jstests/sharding/all_config_servers_blackholed_from_mongos.js
- - jstests/sharding/batch_write_command_sharded.js
- - jstests/sharding/config_rs_no_primary.js
- - jstests/sharding/startup_with_all_configs_down.js
- - jstests/sharding/lagged_config_secondary.js
- - jstests/sharding/autodiscover_config_rs_from_secondary.js
- - jstests/sharding/rs_stepdown_and_pooling.js
- - jstests/sharding/mongos_no_replica_set_refresh.js
- - jstests/sharding/primary_config_server_blackholed_from_mongos.js
- - jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js
- - jstests/sharding/health_monitor/config_server_health_observer_crash.js
- # Nothing is affected by config server step down
- - jstests/sharding/basic_sharding_params.js
- # ShardingTest is never used, so continuous step down thread never starts
- - jstests/sharding/config_rs_change.js
- - jstests/sharding/empty_cluster_init.js
- # Temporarily denylisted until more robust
- # Expects same secondaries for entire test
- - jstests/sharding/commands_that_write_accept_wc_configRS.js
- - jstests/sharding/commands_that_write_accept_wc_shards.js
- - jstests/sharding/move_chunk_wc.js
- # Expects that connections to all shards/config servers will never close
- - jstests/sharding/shard6.js
- # Stepping down the primary can make the balancer rerun a migration that was designed to fail
- # earlier, but can potentially pass or have different side effects on the second try
- - jstests/sharding/migration_ignore_interrupts_1.js
- - jstests/sharding/migration_sets_fromMigrate_flag.js
- - jstests/sharding/migration_waits_for_majority_commit.js
- # listCollections is not retryable
- - jstests/sharding/sessions_collection_auto_healing.js
- # shardCollection is not retryable
- - jstests/sharding/shard_collection_config_db.js
- # creates collection, does movePrimary, then shards the collection and moves a chunk to the old
- # primary (SERVER-31909)
- - jstests/sharding/mongos_validate_writes.js
- # Test expects a specific chunk distribution after shardCollection and it can be broken when
- # a step down occurs.
- - jstests/sharding/regex_targeting.js
- # Calls movePrimary after data has been inserted into an unsharded collection, so will fail if
- # a stepdown causes the command to be sent again.
- - jstests/sharding/move_primary_clone.js
- - jstests/sharding/mongos_validate_writes.js
- - jstests/sharding/movePrimary1.js
- # Asserts that the _flushDatabaseCacheUpdates at the end of _configsvrCreateDatabase is sent, but
- # it may not be sent if the config server primary steps down just before sending it.
- - jstests/sharding/database_versioning_all_commands.js
- # Calls removeShard/removeshard which is not idempotent and these tests expect it to be run an exact number of times
- - jstests/sharding/addshard5.js
- - jstests/sharding/auth_add_shard.js
- - jstests/sharding/remove3.js
- - jstests/sharding/authCommands.js
- # - jstests/sharding/addshard2.js
- # - jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
- # - jstests/sharding/convert_to_and_from_sharded.js
- # - jstests/sharding/names.js
- # - jstests/sharding/remove1.js
- # - jstests/sharding/remove2.js
-
- # Moves a chunk before continuing a transaction, which can lead to snapshot errors if the
- # CSRS failovers are sufficiently slow.
- - jstests/sharding/transactions_reject_writes_for_moved_chunks.js
- - jstests/sharding/snapshot_reads_target_at_point_in_time.js
- # Tests that rely on shards becoming aware of collection drops regardless of config stepdowns.
- # (SERVER-34760)
- - jstests/sharding/merge_requires_unique_index.js
- - jstests/sharding/query/merge_stale_on_fields.js
- - jstests/sharding/query/out_fails_to_replace_sharded_collection.js
- # In this suite the cluster may end up in a state where each shard believes the
- # collection is sharded and the mongos believes it is unsharded. $merge is not
- # prepared to work correctly in this situation. This should be fixed by a future
- # improvement in sharding infrastructure, and can be undenylisted by SERVER-40172.
- - jstests/sharding/query/merge_to_existing.js
- # Sets a failpoint on find commands which can lead to a hang when a config steps down.
- - jstests/sharding/sharding_statistics_server_status.js
- # setShardVersion is not robust during config server stepdown.
- - jstests/sharding/mongos_no_detect_sharding.js
- # Runs commands directly on the config server primary and is not robust to the primary changing.
- - jstests/sharding/read_write_concern_defaults_commands_api.js
- - jstests/sharding/read_write_concern_defaults_propagation.js
- - jstests/sharding/live_shard_startup_recovery_config_server.js
- - jstests/sharding/live_shard_logical_initial_sync_config_server.js
-
- # On stepdown there is not gurantee that changelog entries have been inserted [SERVER-45655]
- - jstests/sharding/refine_collection_shard_key_changelog.js
- # This is expected to fail if the config server steps down during moveChunk.
- - jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js
- - jstests/sharding/move_chunk_critical_section_non_internal_client_abort.js
- # Runs commands on mongos which target the primary configsvr, and then checks the outcome using
- # profiling/logging (on the configsvr primary), so cannot tolerate the configsvr primary changing.
- - jstests/sharding/read_write_concern_defaults_application.js
-
- # SERVER-48537 addShard is not idempotent for retries
- - jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js
- - jstests/sharding/move_primary_with_writes.js
-
- # Expects same CSRS primary and shard primary throughout the test
- - jstests/sharding/api_params_nontransaction_sharded.js
- - jstests/sharding/api_params_nontransaction_unsharded.js
- - jstests/sharding/api_params_transaction_sharded.js
- - jstests/sharding/api_params_transaction_unsharded.js
-
- # TODO SERVER-51495: Re-enable these tests after reshardCollection is resilient to config server
- # primary failovers.
- - jstests/sharding/*reshard*.js
-
- # SERVER-51805 splitChunk op is not idempotent
- - jstests/sharding/mongos_get_shard_version.js
-
- # Expects reshardCollection executes without config server stepdown
- - jstests/sharding/shard_encrypted_collection.js
-
- # Runs commands against mongos which target the config server primary and may fail with
- # FailedToSatisfyReadPreference when electing a new primary of the config server replica
- # set takes a while.
- - jstests/sharding/move_chunk_respects_maxtimems.js
-
- # TODO (SERVER-75863): Investigate the timeout issue for read_and_write_distribution.js in the
- # config stepdown suite
- - jstests/sharding/analyze_shard_key/read_and_write_distribution.js
-
- exclude_with_any_tags:
- - does_not_support_stepdowns
-
-executor:
- config:
- shell_options:
- global_vars:
- TestData:
- # TODO: SERVER-45994 remove
- skipCheckingCatalogCacheConsistencyWithShardingCatalog: true
- skipCheckOrphans: true
- eval: "load('jstests/libs/override_methods/sharding_continuous_config_stepdown.js');"
- nodb: ''
diff --git a/buildscripts/resmokeconfig/suites/sharding_csrs_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_csrs_continuous_config_stepdown.yml
new file mode 100644
index 0000000000000..b77b79ea6a455
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/sharding_csrs_continuous_config_stepdown.yml
@@ -0,0 +1,257 @@
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/sharding/**/*.js
+ exclude_files:
+ - jstests/sharding/**/libs/**/*.js
+ # Skip any tests that run with auth explicitly.
+ # Auth tests require authentication on the stepdown thread's connection
+ - jstests/sharding/*[aA]uth*.js
+ - jstests/sharding/analyze_shard_key/*[aA]uth*.js
+ - jstests/sharding/query/*[aA]uth*.js
+ - jstests/sharding/change_streams/*[aA]uth*.js
+ - jstests/sharding/cluster_time_across_add_shard.js
+
+ - jstests/sharding/internal_txns/internal_client_restrictions.js
+ - jstests/sharding/internal_txns/non_retryable_writes_during_migration.js
+ - jstests/sharding/internal_txns/retry_on_transient_error_validation.js
+ - jstests/sharding/internal_txns/retryable_findAndModify_during_migration_side_coll.js
+ - jstests/sharding/internal_txns/retryable_writes_aborted_during_migration.js
+ - jstests/sharding/internal_txns/retryable_writes_committed_during_migration.js
+
+ - jstests/sharding/localhostAuthBypass.js
+ - jstests/sharding/kill_sessions.js
+ - jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
+ - jstests/sharding/query/mrShardedOutputAuth.js
+ - jstests/sharding/query/aggregation_currentop.js
+ - jstests/sharding/advance_cluster_time_action_type.js
+ - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
+ # Count/write/aggregate commands against the config shard do not support retries yet
+ - jstests/sharding/addshard1.js
+ - jstests/sharding/addshard2.js
+ - jstests/sharding/basic_merge.js
+ - jstests/sharding/count1.js
+ - jstests/sharding/count2.js
+ - jstests/sharding/query/current_op_with_drop_shard.js
+ - jstests/sharding/cursor1.js
+ - jstests/sharding/diffservers1.js
+ - jstests/sharding/findandmodify1.js
+ - jstests/sharding/query/geo_near_sharded.js
+ - jstests/sharding/hash_basic.js
+ - jstests/sharding/hash_shard1.js
+ - jstests/sharding/hash_shard_non_empty.js
+ - jstests/sharding/hash_shard_num_chunks.js
+ - jstests/sharding/hash_single_shard.js
+ - jstests/sharding/key_many.js
+ - jstests/sharding/key_string.js
+ - jstests/sharding/large_chunk.js
+ - jstests/sharding/limit_push.js
+ - jstests/sharding/merge_with_drop_shard.js
+ - jstests/sharding/merge_with_move_primary.js
+ - jstests/sharding/move_chunk_basic.js
+ - jstests/sharding/movePrimary1.js
+ - jstests/sharding/names.js
+ - jstests/sharding/prefix_shard_key.js
+ - jstests/sharding/query_config.js
+ - jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js
+ - jstests/sharding/remove1.js
+ - jstests/sharding/rename_across_mongos.js
+ - jstests/sharding/shard2.js
+ - jstests/sharding/shard3.js
+ - jstests/sharding/shard_collection_basic.js
+ - jstests/sharding/tag_range.js
+ - jstests/sharding/count_config_servers.js
+ - jstests/sharding/split_large_key.js
+ - jstests/sharding/balancer_window.js
+ - jstests/sharding/zone_changes_compound.js
+ - jstests/sharding/zone_changes_hashed.js
+ - jstests/sharding/zone_changes_range.js
+ # No retries on direct writes to the config/admin databases on the config servers
+ - jstests/sharding/listDatabases.js
+ - jstests/sharding/bulk_insert.js
+ - jstests/sharding/printShardingStatus.js
+ - jstests/sharding/refresh_sessions.js
+ - jstests/sharding/shard_collection_existing_zones.js
+ - jstests/sharding/catalog_shard_mongos_ops_on_config_and_admin.js
+ # Balancer writes (direct write to config database with no retries)
+ - jstests/sharding/convert_to_and_from_sharded.js
+ - jstests/sharding/remove2.js
+ - jstests/sharding/features3.js
+ - jstests/sharding/in_memory_sort_limit.js
+ - jstests/sharding/parallel.js
+ - jstests/sharding/migrateBig.js
+ - jstests/sharding/sharding_rs1.js
+ - jstests/sharding/move_primary_fails_without_database_version.js
+ # Calls the config server primary directly (not through mongos)
+ - jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js
+ - jstests/sharding/analyze_shard_key/invalid_config_docs.js
+ - jstests/sharding/analyze_shard_key/persist_sampled_diffs.js
+ - jstests/sharding/analyze_shard_key/persist_sampled_read_queries.js
+ - jstests/sharding/analyze_shard_key/persist_sampled_write_queries.js
+ - jstests/sharding/analyze_shard_key/refresh_sample_rates.js
+ - jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
+ - jstests/sharding/read_after_optime.js
+ - jstests/sharding/server_status.js
+ - jstests/sharding/drop_configdb.js
+ - jstests/sharding/shard_identity_config_update.js
+ - jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js
+ - jstests/sharding/key_rotation.js
+ - jstests/sharding/keys_rotation_interval_sec.js
+ - jstests/sharding/migration_coordinator_basic.js # sets a failpoint on the config primary
+ - jstests/sharding/migration_coordinator_abort_failover.js # sets a failpoint on the config primary
+ - jstests/sharding/migration_coordinator_commit_failover.js # sets a failpoint on the config primary
+ - jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js
+ - jstests/sharding/move_chunk_insert_with_write_retryability.js
+ - jstests/sharding/move_chunk_remove_with_write_retryability.js
+ - jstests/sharding/move_chunk_update_with_write_retryability.js
+ - jstests/sharding/refine_collection_shard_key_atomic.js # sets a failpoint on the config primary
+ - jstests/sharding/restart_transactions.js
+ - jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js
+ - jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js
+ - jstests/sharding/txn_two_phase_commit_failover.js
+ - jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js
+ # Runs setShardVersion/getShardVersion against the config server and we don't support retries
+ # for this command
+ - jstests/sharding/major_version_check.js
+ # Runs replSetGetStatus -- via awaitLastOpCommitted -- directly against the config server:
+ # retries aren't supported.
+ - jstests/sharding/coll_epoch_test1.js
+ - jstests/sharding/move_stale_mongos.js
+ - jstests/sharding/shard4.js
+ - jstests/sharding/shard5.js
+ - jstests/sharding/split_stale_mongos.js
+ - jstests/sharding/stale_mongos_updates_and_removes.js
+ - jstests/sharding/zero_shard_version.js
+ # Already stop or blackholes the primary of the CSRS config shard
+ - jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
+ - jstests/sharding/all_config_servers_blackholed_from_mongos.js
+ - jstests/sharding/batch_write_command_sharded.js
+ - jstests/sharding/config_rs_no_primary.js
+ - jstests/sharding/startup_with_all_configs_down.js
+ - jstests/sharding/lagged_config_secondary.js
+ - jstests/sharding/autodiscover_config_rs_from_secondary.js
+ - jstests/sharding/rs_stepdown_and_pooling.js
+ - jstests/sharding/mongos_no_replica_set_refresh.js
+ - jstests/sharding/primary_config_server_blackholed_from_mongos.js
+ - jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js
+ - jstests/sharding/health_monitor/config_server_health_observer_crash.js
+ # Nothing is affected by config server step down
+ - jstests/sharding/basic_sharding_params.js
+ # ShardingTest is never used, so continuous step down thread never starts
+ - jstests/sharding/config_rs_change.js
+ - jstests/sharding/empty_cluster_init.js
+ # Temporarily denylisted until more robust
+ # Expects same secondaries for entire test
+ - jstests/sharding/commands_that_write_accept_wc_configRS.js
+ - jstests/sharding/commands_that_write_accept_wc_shards.js
+ - jstests/sharding/move_chunk_wc.js
+ # Expects that connections to all shards/config servers will never close
+ - jstests/sharding/shard6.js
+ # Stepping down the primary can make the balancer rerun a migration that was designed to fail
+ # earlier, but can potentially pass or have different side effects on the second try
+ - jstests/sharding/migration_ignore_interrupts_1.js
+ - jstests/sharding/migration_sets_fromMigrate_flag.js
+ - jstests/sharding/migration_waits_for_majority_commit.js
+ # listCollections is not retryable
+ - jstests/sharding/sessions_collection_auto_healing.js
+ # shardCollection is not retryable
+ - jstests/sharding/shard_collection_config_db.js
+ # creates collection, does movePrimary, then shards the collection and moves a chunk to the old
+ # primary (SERVER-31909)
+ - jstests/sharding/mongos_validate_writes.js
+ # Test expects a specific chunk distribution after shardCollection and it can be broken when
+ # a step down occurs.
+ - jstests/sharding/regex_targeting.js
+ # Calls movePrimary after data has been inserted into an unsharded collection, so will fail if
+ # a stepdown causes the command to be sent again.
+ - jstests/sharding/move_primary_clone.js
+ - jstests/sharding/mongos_validate_writes.js
+ - jstests/sharding/movePrimary1.js
+ # Asserts that the _flushDatabaseCacheUpdates at the end of _configsvrCreateDatabase is sent, but
+ # it may not be sent if the config server primary steps down just before sending it.
+ - jstests/sharding/database_versioning_all_commands.js
+ # Calls removeShard/removeshard which is not idempotent and these tests expect it to be run an exact number of times
+ - jstests/sharding/addshard5.js
+ - jstests/sharding/auth_add_shard.js
+ - jstests/sharding/remove3.js
+ - jstests/sharding/authCommands.js
+ # - jstests/sharding/addshard2.js
+ # - jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js
+ # - jstests/sharding/convert_to_and_from_sharded.js
+ # - jstests/sharding/names.js
+ # - jstests/sharding/remove1.js
+ # - jstests/sharding/remove2.js
+
+ # Moves a chunk before continuing a transaction, which can lead to snapshot errors if the
+ # CSRS failovers are sufficiently slow.
+ - jstests/sharding/transactions_reject_writes_for_moved_chunks.js
+ - jstests/sharding/snapshot_reads_target_at_point_in_time.js
+ # Tests that rely on shards becoming aware of collection drops regardless of config stepdowns.
+ # (SERVER-34760)
+ - jstests/sharding/merge_requires_unique_index.js
+ - jstests/sharding/query/merge_stale_on_fields.js
+ - jstests/sharding/query/out_fails_to_replace_sharded_collection.js
+ # Sets a failpoint on find commands which can lead to a hang when a config steps down.
+ - jstests/sharding/sharding_statistics_server_status.js
+ # setShardVersion is not robust during config server stepdown.
+ - jstests/sharding/mongos_no_detect_sharding.js
+ # Runs commands directly on the config server primary and is not robust to the primary changing.
+ - jstests/sharding/read_write_concern_defaults_commands_api.js
+ - jstests/sharding/read_write_concern_defaults_propagation.js
+ - jstests/sharding/live_shard_startup_recovery_config_server.js
+ - jstests/sharding/live_shard_logical_initial_sync_config_server.js
+
+ # On stepdown there is not gurantee that changelog entries have been inserted [SERVER-45655]
+ - jstests/sharding/refine_collection_shard_key_changelog.js
+ # This is expected to fail if the config server steps down during moveChunk.
+ - jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js
+ - jstests/sharding/move_chunk_critical_section_non_internal_client_abort.js
+ # Runs commands on mongos which target the primary configsvr, and then checks the outcome using
+ # profiling/logging (on the configsvr primary), so cannot tolerate the configsvr primary changing.
+ - jstests/sharding/read_write_concern_defaults_application.js
+
+ # SERVER-48537 addShard is not idempotent for retries
+ - jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js
+ - jstests/sharding/move_primary_with_writes.js
+
+ # Expects same CSRS primary and shard primary throughout the test
+ - jstests/sharding/api_params_nontransaction_sharded.js
+ - jstests/sharding/api_params_nontransaction_unsharded.js
+ - jstests/sharding/api_params_transaction_sharded.js
+ - jstests/sharding/api_params_transaction_unsharded.js
+
+ # TODO SERVER-51495: Re-enable these tests after reshardCollection is resilient to config server
+ # primary failovers.
+ - jstests/sharding/*reshard*.js
+
+ # SERVER-51805 splitChunk op is not idempotent
+ - jstests/sharding/mongos_get_shard_version.js
+
+ # Expects reshardCollection executes without config server stepdown
+ - jstests/sharding/shard_encrypted_collection.js
+
+ # Runs commands against mongos which target the config server primary and may fail with
+ # FailedToSatisfyReadPreference when electing a new primary of the config server replica
+ # set takes a while.
+ - jstests/sharding/move_chunk_respects_maxtimems.js
+
+ # This test verifies that the number of queries that each mongos or shardsvr mongod samples is
+ # proportional to the number of queries it routes. This is enforced via the sample rate assignment
+ # by the configsvr primary based on the traffic distribution information it has in memory. So the
+ # test doesn't pass reliably when there is continuous stepdown on the config server.
+ - jstests/sharding/analyze_shard_key/sample_rates_sharded.js
+
+ exclude_with_any_tags:
+ - does_not_support_stepdowns
+
+executor:
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ # TODO: SERVER-45994 remove
+ skipCheckingCatalogCacheConsistencyWithShardingCatalog: true
+ skipCheckOrphans: true
+ eval: "load('jstests/libs/override_methods/sharding_csrs_continuous_config_stepdown.js');"
+ nodb: ''
diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
index a16c47a3e1f01..4608fd9684827 100644
--- a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml
@@ -56,6 +56,7 @@ executor:
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: ValidateCollections
+ - class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_catalog_shard.yml
deleted file mode 100644
index 9550b5331d934..0000000000000
--- a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_catalog_shard.yml
+++ /dev/null
@@ -1,72 +0,0 @@
-test_kind: js_test
-
-selector:
- roots:
- - jstests/core/**/*.js
- - jstests/fle2/**/*.js
- - src/mongo/db/modules/*/jstests/fle2/**/*.js
- exclude_files:
- # These tests are run in sharded_jscore_txns.
- - jstests/core/txns/**/*.js
- # The following tests fail because a certain command or functionality is not supported on
- # mongos. This command or functionality is placed in a comment next to the failing test.
- - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine.
- - jstests/core/**/check_shard_index.js # checkShardingIndex.
- - jstests/core/**/collection_truncate.js # emptycapped.
- - jstests/core/**/compact_keeps_indexes.js # compact.
- - jstests/core/**/currentop.js # uses fsync.
- - jstests/core/**/dbhash.js # dbhash.
- - jstests/core/**/dbhash2.js # dbhash.
- - jstests/core/**/fsync.js # uses fsync.
- - jstests/core/**/geo_s2cursorlimitskip.js # profiling.
- - jstests/core/**/geo_update_btree2.js # notablescan.
- - jstests/core/**/index9.js # "local" database.
- - jstests/core/**/queryoptimizera.js # "local" database.
- - jstests/core/**/stages*.js # stageDebug.
- - jstests/core/**/startup_log.js # "local" database.
- - jstests/core/**/top.js # top.
- # The following tests fail because mongos behaves differently from mongod when testing certain
- # functionality. The differences are in a comment next to the failing test.
- - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos.
- - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain().
- - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain().
- - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate().
- - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880.
- - jstests/core/**/killop_drop_collection.js # Uses fsyncLock.
- - jstests/core/**/or_to_in.js # queryPlanner in different spot in explain()
- # The following tests fail because of divergent dropCollection behavior between standalones and
- # sharded clusters. These tests expect a second drop command to error, whereas in sharded clusters
- # we expect a second drop to return status OK.
- - jstests/core/**/explain_upsert.js
-
- exclude_with_any_tags:
- - assumes_standalone_mongod
- - assumes_against_mongod_not_mongos
- # system.profile collection doesn't exist on mongos.
- - requires_profiling
- - catalog_shard_incompatible
- - temporary_catalog_shard_incompatible
-
-executor:
- archive:
- hooks:
- - CheckReplDBHash
- - ValidateCollections
- config: {}
- hooks:
- - class: CheckReplDBHash
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: ShardedClusterFixture
- catalog_shard: "any"
- mongos_options:
- set_parameters:
- enableTestCommands: 1
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- num_rs_nodes_per_shard: 1
- enable_sharding:
- - test
diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_config_shard.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_config_shard.yml
new file mode 100644
index 0000000000000..99cfc9c7e1a71
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_config_shard.yml
@@ -0,0 +1,72 @@
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/core/**/*.js
+ - jstests/fle2/**/*.js
+ - src/mongo/db/modules/*/jstests/fle2/**/*.js
+ exclude_files:
+ # These tests are run in sharded_jscore_txns.
+ - jstests/core/txns/**/*.js
+ # The following tests fail because a certain command or functionality is not supported on
+ # mongos. This command or functionality is placed in a comment next to the failing test.
+ - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine.
+ - jstests/core/**/check_shard_index.js # checkShardingIndex.
+ - jstests/core/**/collection_truncate.js # emptycapped.
+ - jstests/core/**/compact_keeps_indexes.js # compact.
+ - jstests/core/**/currentop.js # uses fsync.
+ - jstests/core/**/dbhash.js # dbhash.
+ - jstests/core/**/dbhash2.js # dbhash.
+ - jstests/core/**/fsync.js # uses fsync.
+ - jstests/core/**/geo_s2cursorlimitskip.js # profiling.
+ - jstests/core/**/geo_update_btree2.js # notablescan.
+ - jstests/core/**/index9.js # "local" database.
+ - jstests/core/**/queryoptimizera.js # "local" database.
+ - jstests/core/**/stages*.js # stageDebug.
+ - jstests/core/**/startup_log.js # "local" database.
+ - jstests/core/**/top.js # top.
+ # The following tests fail because mongos behaves differently from mongod when testing certain
+ # functionality. The differences are in a comment next to the failing test.
+ - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos.
+ - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain().
+ - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain().
+ - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate().
+ - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880.
+ - jstests/core/**/killop_drop_collection.js # Uses fsyncLock.
+ - jstests/core/**/or_to_in.js # queryPlanner in different spot in explain()
+ # The following tests fail because of divergent dropCollection behavior between standalones and
+ # sharded clusters. These tests expect a second drop command to error, whereas in sharded clusters
+ # we expect a second drop to return status OK.
+ - jstests/core/**/explain_upsert.js
+
+ exclude_with_any_tags:
+ - assumes_standalone_mongod
+ - assumes_against_mongod_not_mongos
+ # system.profile collection doesn't exist on mongos.
+ - requires_profiling
+ - config_shard_incompatible
+
+executor:
+ archive:
+ hooks:
+ - CheckReplDBHash
+ - ValidateCollections
+ config: {}
+ hooks:
+ - class: CheckReplDBHash
+ - class: ValidateCollections
+ - class: CheckOrphansDeleted
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: ShardedClusterFixture
+ config_shard: "any"
+ mongos_options:
+ set_parameters:
+ enableTestCommands: 1
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ num_rs_nodes_per_shard: 1
+ enable_sharding:
+ - test
diff --git a/buildscripts/resmokeconfig/suites/simulate_crash_concurrency_replication.yml b/buildscripts/resmokeconfig/suites/simulate_crash_concurrency_replication.yml
index a9f0a03cbd941..562152fcce1f5 100644
--- a/buildscripts/resmokeconfig/suites/simulate_crash_concurrency_replication.yml
+++ b/buildscripts/resmokeconfig/suites/simulate_crash_concurrency_replication.yml
@@ -40,4 +40,6 @@ executor:
syncdelay: 10
set_parameters:
enableTestCommands: 1
+ queryAnalysisSamplerConfigurationRefreshSecs: 1
+ queryAnalysisWriterIntervalSecs: 1
num_nodes: 3
diff --git a/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_jscore_passthrough.yml
index 5c7fb0c2a604b..e2402782812f3 100644
--- a/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_jscore_passthrough.yml
@@ -33,7 +33,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
# on the secondaries, so we run the ValidateCollections hook after it to ensure we're
diff --git a/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_kill_primary_jscore_passthrough.yml
index 11d26f61e8dd9..05626cf8ce085 100644
--- a/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_kill_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_kill_primary_jscore_passthrough.yml
@@ -139,9 +139,9 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js");
diff --git a/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml b/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml
deleted file mode 100644
index 1aa2a490a5f8e..0000000000000
--- a/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-test_kind: js_test
-description: |
- This suite enables the collection of telemetry metrics on a mongod server, then runs the tests in
- core and aggregation as normal. This should cause each query or aggregation to compute a query
- shape and telemetry key, and record in-memory some metrics like execution time and number of
- scanned documents. It doesn't assert anything about the collected telemetry, it is just meant to
- make sure nothing is going seriously awry (e.g. crashing).
-
-selector:
- roots:
- - jstests/aggregation/**/*.js
- - jstests/core/**/*.js
- exclude_files:
- # Transactions are not supported on MongoDB standalone nodes, so we do not run these tests.
- - jstests/core/txns/**/*.js
-
-executor:
- archive:
- hooks:
- - ValidateCollections
- hooks:
- - class: ValidateCollections
- - class: CleanEveryN
- n: 20
- fixture:
- class: MongoDFixture
- mongod_options:
- set_parameters:
- enableTestCommands: 1
- internalQueryConfigureTelemetrySamplingRate: -1
diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_causally_consistent_jscore_passthrough.yml
index c67d77742a988..43887e1b8332d 100644
--- a/buildscripts/resmokeconfig/suites/tenant_migration_causally_consistent_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/tenant_migration_causally_consistent_jscore_passthrough.yml
@@ -78,7 +78,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/inject_tenant_prefix.js');
load('jstests/libs/override_methods/enable_causal_consistency.js');
load('jstests/libs/override_methods/enable_sessions.js');
@@ -138,9 +138,6 @@ executor:
# so. Therefore, the garbage collection delay doesn't need to be large.
tenantMigrationGarbageCollectionDelayMS: 1
ttlMonitorSleepSecs: 1
- # Tenant migrations is not currently compatible with implicitly replicated retryable
- # findAndModify images.
- storeFindAndModifyImagesInSideCollection: false
tlsMode: allowTLS
tlsCAFile: jstests/libs/ca.pem
tlsAllowInvalidHostnames: ''
diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml
index ad61ab8662a4c..2b8bccbc121be 100644
--- a/buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml
@@ -51,7 +51,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/inject_tenant_prefix.js');
jsTest.authenticate(db.getMongo());
global_vars:
@@ -103,9 +103,6 @@ executor:
# so. Therefore, the garbage collection delay doesn't need to be large.
tenantMigrationGarbageCollectionDelayMS: 1
ttlMonitorSleepSecs: 1
- # Tenant migrations is not currently compatible with implicitly replicated retryable
- # findAndModify images.
- storeFindAndModifyImagesInSideCollection: false
tlsMode: allowTLS
tlsCAFile: jstests/libs/ca.pem
tlsAllowInvalidHostnames: ''
diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_kill_primary_jscore_passthrough.yml
index 994837fffaccd..196474da76eba 100644
--- a/buildscripts/resmokeconfig/suites/tenant_migration_kill_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/tenant_migration_kill_primary_jscore_passthrough.yml
@@ -117,8 +117,6 @@ selector:
- requires_dbstats
# "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..."
- requires_collstats
- # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..."
- - requires_datasize
# Due to background tenant migrations, operations in the main test shell are not guaranteed to
# be causally consistent with operations in a parallel shell. The reason is that
# TenantMigrationCommitted error is only thrown when the client does a write or a atClusterTime/
@@ -155,9 +153,9 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load('jstests/libs/override_methods/inject_tenant_prefix.js');
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
@@ -195,7 +193,7 @@ executor:
- class: ContinuousTenantMigration
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
global_vars:
@@ -242,9 +240,6 @@ executor:
# so. Therefore, the garbage collection delay doesn't need to be large.
tenantMigrationGarbageCollectionDelayMS: 1
ttlMonitorSleepSecs: 1
- # Tenant migrations is not currently compatible with implicitly replicated retryable
- # findAndModify images.
- storeFindAndModifyImagesInSideCollection: false
tlsMode: allowTLS
tlsCAFile: jstests/libs/ca.pem
tlsAllowInvalidHostnames: ''
diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml
index e3c9ad3cb9825..a7ae31167c453 100644
--- a/buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml
@@ -262,7 +262,7 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/inject_tenant_prefix.js');
load('jstests/libs/override_methods/enable_sessions.js');
load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js');
@@ -323,9 +323,6 @@ executor:
# so. Therefore, the garbage collection delay doesn't need to be large.
tenantMigrationGarbageCollectionDelayMS: 1
ttlMonitorSleepSecs: 1
- # Tenant migrations is not currently compatible with implicitly replicated retryable
- # findAndModify images.
- storeFindAndModifyImagesInSideCollection: false
tlsMode: allowTLS
tlsCAFile: jstests/libs/ca.pem
tlsAllowInvalidHostnames: ''
diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_stepdown_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_stepdown_jscore_passthrough.yml
index 02b9b194ce3fb..9e6193b1bf7dd 100644
--- a/buildscripts/resmokeconfig/suites/tenant_migration_stepdown_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/tenant_migration_stepdown_jscore_passthrough.yml
@@ -147,9 +147,9 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load('jstests/libs/override_methods/inject_tenant_prefix.js');
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
@@ -184,7 +184,7 @@ executor:
- class: ContinuousTenantMigration
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
global_vars:
@@ -229,9 +229,6 @@ executor:
# so. Therefore, the garbage collection delay doesn't need to be large.
tenantMigrationGarbageCollectionDelayMS: 1
ttlMonitorSleepSecs: 1
- # Tenant migrations is not currently compatible with implicitly replicated retryable
- # findAndModify images.
- storeFindAndModifyImagesInSideCollection: false
tlsMode: allowTLS
tlsCAFile: jstests/libs/ca.pem
tlsAllowInvalidHostnames: ''
diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_terminate_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_terminate_primary_jscore_passthrough.yml
index 8848be9298c6c..3e6208604364a 100644
--- a/buildscripts/resmokeconfig/suites/tenant_migration_terminate_primary_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/tenant_migration_terminate_primary_jscore_passthrough.yml
@@ -147,9 +147,9 @@ executor:
config:
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
- db = connect(TestData.connectionString);
+ globalThis.db = connect(TestData.connectionString);
load('jstests/libs/override_methods/inject_tenant_prefix.js');
load("jstests/libs/override_methods/enable_sessions.js");
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
@@ -185,7 +185,7 @@ executor:
- class: ContinuousTenantMigration
shell_options:
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/network_error_and_txn_override.js');
load("jstests/libs/override_methods/set_read_and_write_concerns.js");
global_vars:
@@ -230,9 +230,6 @@ executor:
# so. Therefore, the garbage collection delay doesn't need to be large.
tenantMigrationGarbageCollectionDelayMS: 1
ttlMonitorSleepSecs: 1
- # Tenant migrations is not currently compatible with implicitly replicated retryable
- # findAndModify images.
- storeFindAndModifyImagesInSideCollection: false
tlsMode: allowTLS
tlsCAFile: jstests/libs/ca.pem
tlsAllowInvalidHostnames: ''
diff --git a/buildscripts/resmokeconfig/suites/vector_search.yml b/buildscripts/resmokeconfig/suites/vector_search.yml
new file mode 100644
index 0000000000000..35174e5a00396
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/vector_search.yml
@@ -0,0 +1,10 @@
+test_kind: js_test
+
+selector:
+ roots:
+ - src/mongo/db/modules/*/jstests/vector_search/*.js
+
+executor:
+ config:
+ shell_options:
+ nodb: ''
diff --git a/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml
index 1ff025d34f796..422a1911f1c96 100644
--- a/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml
@@ -46,7 +46,7 @@ executor:
TestData:
defaultReadConcernLevel: local
eval: >-
- testingReplication = true;
+ globalThis.testingReplication = true;
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
load('jstests/libs/override_methods/set_read_preference_secondary.js');
hooks:
diff --git a/buildscripts/resmokelib/cli.py b/buildscripts/resmokelib/cli.py
index 1a72d6e638904..0302272726a8b 100644
--- a/buildscripts/resmokelib/cli.py
+++ b/buildscripts/resmokelib/cli.py
@@ -4,8 +4,6 @@
import time
import os
import psutil
-from mongo_tooling_metrics.client import get_mongo_metrics_client
-from mongo_tooling_metrics.errors import ExternalHostException
from mongo_tooling_metrics.lib.top_level_metrics import ResmokeToolingMetrics
from buildscripts.resmokelib import parser
@@ -25,20 +23,10 @@ def main(argv):
usage="Resmoke is MongoDB's correctness testing orchestrator.\n"
"For more information, see the help message for each subcommand.\n"
"For example: resmoke.py run -h\n"
- "Note: bisect and setup-multiversion subcommands have been moved to db-contrib-tool (https://github.com/10gen/db-contrib-tool#readme).\n"
+ "Note: bisect, setup-multiversion and symbolize subcommands have been moved to db-contrib-tool (https://github.com/10gen/db-contrib-tool#readme).\n"
+ )
+ ResmokeToolingMetrics.register_metrics(
+ utc_starttime=datetime.utcfromtimestamp(__start_time),
+ parser=parser.get_parser(),
)
- try:
- metrics_client = get_mongo_metrics_client()
- metrics_client.register_metrics(
- ResmokeToolingMetrics,
- utc_starttime=datetime.utcfromtimestamp(__start_time),
- parser=parser.get_parser(),
- )
- except ExternalHostException as _:
- pass
- except Exception as _: # pylint: disable=broad-except
- print(
- "This MongoDB Virtual Workstation could not connect to the internal cluster\nThis is a non-issue, but if this message persists feel free to reach out in #server-dev-platform"
- )
-
subcommand.execute()
diff --git a/buildscripts/resmokelib/config.py b/buildscripts/resmokelib/config.py
index 7643c05da7ac3..368a1a5df36bd 100644
--- a/buildscripts/resmokelib/config.py
+++ b/buildscripts/resmokelib/config.py
@@ -57,7 +57,7 @@
"base_port": 20000,
"backup_on_restart_dir": None,
"buildlogger_url": "https://logkeeper2.build.10gen.cc",
- "catalog_shard": None,
+ "config_shard": None,
"continue_on_failure": False,
"dbpath_prefix": None,
"dbtest_executable": None,
@@ -67,6 +67,7 @@
"flow_control_tickets": None,
"force_excluded_tests": False,
"fuzz_mongod_configs": None,
+ "fuzz_mongos_configs": None,
"config_fuzz_seed": None,
"genny_executable": None,
"include_with_any_tags": None,
@@ -83,6 +84,7 @@
"mrlog": None,
"no_journal": False,
"num_clients_per_fixture": 1,
+ "origin_suite": None,
"perf_report_file": None,
"cedar_report_file": None,
"repeat_suites": 1,
@@ -300,8 +302,8 @@ def all_options(cls):
# actually running them).
DRY_RUN = None
-# If set, specifies which node is the catalog shard. Can also be set to 'any'.
-CATALOG_SHARD = None
+# If set, specifies which node is the config shard. Can also be set to 'any'.
+CONFIG_SHARD = None
# URL to connect to the Evergreen service.
EVERGREEN_URL = None
@@ -357,7 +359,13 @@ def all_options(cls):
# If true, then a test failure or error will cause resmoke.py to exit and not run any more tests.
FAIL_FAST = None
+# Defines how to fuzz mongod parameters
FUZZ_MONGOD_CONFIGS = None
+
+# Defines how to fuzz mongos parameters
+FUZZ_MONGOS_CONFIGS = None
+
+# This seeds the random number generator used to fuzz mongod and mongos parameters
CONFIG_FUZZ_SEED = None
# Executable file for genny, passed in as a command line arg.
@@ -420,6 +428,9 @@ def all_options(cls):
# If set, then each fixture runs tests with the specified number of clients.
NUM_CLIENTS_PER_FIXTURE = None
+# Indicates the name of the test suite prior to the suite being split up by uite generation
+ORIGIN_SUITE = None
+
# Report file for the Evergreen performance plugin.
PERF_REPORT_FILE = None
@@ -570,12 +581,13 @@ def all_options(cls):
DEFAULT_UNIT_TEST_LIST = "build/unittests.txt"
DEFAULT_INTEGRATION_TEST_LIST = "build/integration_tests.txt"
DEFAULT_LIBFUZZER_TEST_LIST = "build/libfuzzer_tests.txt"
+DEFAULT_PRETTY_PRINTER_TEST_LIST = "build/pretty_printer_tests.txt"
# External files or executables, used as suite selectors, that are created during the build and
# therefore might not be available when creating a test membership map.
EXTERNAL_SUITE_SELECTORS = (DEFAULT_BENCHMARK_TEST_LIST, DEFAULT_UNIT_TEST_LIST,
DEFAULT_INTEGRATION_TEST_LIST, DEFAULT_DBTEST_EXECUTABLE,
- DEFAULT_LIBFUZZER_TEST_LIST)
+ DEFAULT_LIBFUZZER_TEST_LIST, DEFAULT_PRETTY_PRINTER_TEST_LIST)
# Where to look for logging and suite configuration files
CONFIG_DIR = None
diff --git a/buildscripts/resmokelib/configure_resmoke.py b/buildscripts/resmokelib/configure_resmoke.py
index ce9019c90b8ff..f3d3042476eb0 100644
--- a/buildscripts/resmokelib/configure_resmoke.py
+++ b/buildscripts/resmokelib/configure_resmoke.py
@@ -14,13 +14,14 @@
import shlex
import pymongo.uri_parser
+import yaml
from buildscripts.idl import gen_all_feature_flag_list
from buildscripts.idl.lib import ALL_FEATURE_FLAG_FILE
from buildscripts.resmokelib import config as _config
from buildscripts.resmokelib import utils
-from buildscripts.resmokelib import mongod_fuzzer_configs
+from buildscripts.resmokelib import mongo_fuzzer_configs
from buildscripts.resmokelib.suitesconfig import SuiteFinder
@@ -238,6 +239,11 @@ def setup_feature_flags():
_config.EXCLUDE_WITH_ANY_TAGS.extend(
utils.default_if_none(_tags_from_list(config.pop("exclude_with_any_tags")), []))
+ force_disabled_flags = yaml.safe_load(
+ open("buildscripts/resmokeconfig/fully_disabled_feature_flags.yml"))
+
+ _config.EXCLUDE_WITH_ANY_TAGS.extend(force_disabled_flags)
+
if _config.RUN_NO_FEATURE_FLAG_TESTS:
# Don't run any feature flag tests.
_config.EXCLUDE_WITH_ANY_TAGS.extend(all_feature_flags)
@@ -301,6 +307,7 @@ def _merge_set_params(param_list):
_config.MONGOD_SET_PARAMETERS = _merge_set_params(mongod_set_parameters)
_config.FUZZ_MONGOD_CONFIGS = config.pop("fuzz_mongod_configs")
+ _config.FUZZ_MONGOS_CONFIGS = config.pop("fuzz_mongos_configs")
_config.CONFIG_FUZZ_SEED = config.pop("config_fuzz_seed")
if _config.FUZZ_MONGOD_CONFIGS:
@@ -309,7 +316,7 @@ def _merge_set_params(param_list):
else:
_config.CONFIG_FUZZ_SEED = int(_config.CONFIG_FUZZ_SEED)
_config.MONGOD_SET_PARAMETERS, _config.WT_ENGINE_CONFIG, _config.WT_COLL_CONFIG, \
- _config.WT_INDEX_CONFIG = mongod_fuzzer_configs.fuzz_set_parameters(
+ _config.WT_INDEX_CONFIG = mongo_fuzzer_configs.fuzz_mongod_set_parameters(
_config.FUZZ_MONGOD_CONFIGS, _config.CONFIG_FUZZ_SEED, _config.MONGOD_SET_PARAMETERS)
_config.EXCLUDE_WITH_ANY_TAGS.extend(["uses_compact"])
_config.EXCLUDE_WITH_ANY_TAGS.extend(["requires_emptycapped"])
@@ -318,6 +325,15 @@ def _merge_set_params(param_list):
mongos_set_parameters = config.pop("mongos_set_parameters")
_config.MONGOS_SET_PARAMETERS = _merge_set_params(mongos_set_parameters)
+ if _config.FUZZ_MONGOS_CONFIGS:
+ if not _config.CONFIG_FUZZ_SEED:
+ _config.CONFIG_FUZZ_SEED = random.randrange(sys.maxsize)
+ else:
+ _config.CONFIG_FUZZ_SEED = int(_config.CONFIG_FUZZ_SEED)
+
+ _config.MONGOS_SET_PARAMETERS = mongo_fuzzer_configs.fuzz_mongos_set_parameters(
+ _config.FUZZ_MONGOS_CONFIGS, _config.CONFIG_FUZZ_SEED, _config.MONGOS_SET_PARAMETERS)
+
_config.MONGOCRYPTD_SET_PARAMETERS = _merge_set_params(config.pop("mongocryptd_set_parameters"))
_config.MRLOG = config.pop("mrlog")
@@ -325,8 +341,9 @@ def _merge_set_params(param_list):
_config.NUM_CLIENTS_PER_FIXTURE = config.pop("num_clients_per_fixture")
_config.NUM_REPLSET_NODES = config.pop("num_replset_nodes")
_config.NUM_SHARDS = config.pop("num_shards")
- _config.CATALOG_SHARD = utils.pick_catalog_shard_node(
- config.pop("catalog_shard"), _config.NUM_SHARDS)
+ _config.CONFIG_SHARD = utils.pick_catalog_shard_node(
+ config.pop("config_shard"), _config.NUM_SHARDS)
+ _config.ORIGIN_SUITE = config.pop("origin_suite")
_config.PERF_REPORT_FILE = config.pop("perf_report_file")
_config.CEDAR_REPORT_FILE = config.pop("cedar_report_file")
_config.RANDOM_SEED = config.pop("seed")
diff --git a/buildscripts/resmokelib/core/programs.py b/buildscripts/resmokelib/core/programs.py
index a82383f1958c9..f0de0044f4c4e 100644
--- a/buildscripts/resmokelib/core/programs.py
+++ b/buildscripts/resmokelib/core/programs.py
@@ -252,8 +252,8 @@ def basename(filepath):
test_data["undoRecorderPath"] = config.UNDO_RECORDER_PATH
- if "catalogShard" not in test_data and config.CATALOG_SHARD is not None:
- test_data["catalogShard"] = config.CATALOG_SHARD
+ if "configShard" not in test_data and config.CONFIG_SHARD is not None:
+ test_data["configShard"] = True
# There's a periodic background thread that checks for and aborts expired transactions.
# "transactionLifetimeLimitSeconds" specifies for how long a transaction can run before expiring
@@ -279,7 +279,7 @@ def basename(filepath):
eval_sb.append(str(kwargs.pop("eval")))
# Load a callback to check that the cluster-wide metadata is consistent.
- eval_sb.append("load('jstests/libs/override_methods/check_metadata_consistency.js');")
+ eval_sb.append("await import('jstests/libs/override_methods/check_metadata_consistency.js');")
# Load this file to allow a callback to validate collections before shutting down mongod.
eval_sb.append("load('jstests/libs/override_methods/validate_collections_on_shutdown.js');")
@@ -297,7 +297,8 @@ def basename(filepath):
# Load a callback to check that the info stored in config.collections and config.chunks is
# semantically correct before shutting down a ShardingTest.
- eval_sb.append("load('jstests/libs/override_methods/check_routing_table_consistency.js');")
+ eval_sb.append(
+ "await import('jstests/libs/override_methods/check_routing_table_consistency.js');")
# Load a callback to check that all shards have correct filtering information before shutting
# down a ShardingTest.
@@ -454,6 +455,11 @@ def _set_keyfile_permissions(opts):
We can't permanently set the keyfile permissions because git is not
aware of them.
"""
+ for keysuffix in ["1", "2", "ForRollover"]:
+ keyfile = "jstests/libs/key%s" % keysuffix
+ if os.path.exists(keyfile):
+ os.chmod(keyfile, stat.S_IRUSR | stat.S_IWUSR)
+
if "keyFile" in opts:
os.chmod(opts["keyFile"], stat.S_IRUSR | stat.S_IWUSR)
if "encryptionKeyFile" in opts:
diff --git a/buildscripts/resmokelib/errors.py b/buildscripts/resmokelib/errors.py
index f323782ca9bc3..d95cb4ef7911f 100644
--- a/buildscripts/resmokelib/errors.py
+++ b/buildscripts/resmokelib/errors.py
@@ -86,3 +86,9 @@ class InvalidMatrixSuiteError(ResmokeError):
"""Exception raised when validating a matrix suite mapping file."""
pass
+
+
+class TagFileDoesNotExistError(ResmokeError):
+ """Exception raised when a tag file is passed into resmoke that does not exist."""
+
+ pass
diff --git a/buildscripts/resmokelib/flags.py b/buildscripts/resmokelib/flags.py
new file mode 100644
index 0000000000000..6aff696166684
--- /dev/null
+++ b/buildscripts/resmokelib/flags.py
@@ -0,0 +1,5 @@
+"""Global flags used by resmoke."""
+
+import threading
+
+HANG_ANALYZER_CALLED = threading.Event()
diff --git a/buildscripts/resmokelib/hang_analyzer/extractor.py b/buildscripts/resmokelib/hang_analyzer/extractor.py
index 2e8870064aa03..23fc02f431d49 100644
--- a/buildscripts/resmokelib/hang_analyzer/extractor.py
+++ b/buildscripts/resmokelib/hang_analyzer/extractor.py
@@ -7,19 +7,22 @@
from buildscripts.resmokelib.setup_multiversion.download import DownloadError
from buildscripts.resmokelib.run import compare_start_time
from buildscripts.resmokelib.utils.filesystem import build_hygienic_bin_path
+from buildscripts.resmokelib.symbolizer import Symbolizer
_DEBUG_FILE_BASE_NAMES = ['mongo', 'mongod', 'mongos']
-def download_debug_symbols(root_logger, symbolizer):
+def download_debug_symbols(root_logger, symbolizer: Symbolizer, retry_secs: int = 10,
+ download_timeout_secs: int = 10 * 60):
"""
Extract debug symbols. Idempotent.
:param root_logger: logger to use
:param symbolizer: pre-configured instance of symbolizer for downloading symbols.
+ :param retry_secs: seconds before retrying to download symbols
+ :param download_timeout_secs: timeout in seconds before failing to download
:return: None
"""
- retry_secs = 10
# Check if the files are already there. They would be on *SAN builds.
sym_files = _get_symbol_files()
@@ -32,17 +35,18 @@ def download_debug_symbols(root_logger, symbolizer):
while True:
try:
symbolizer.execute()
+ root_logger.info("Debug symbols successfully downloaded")
break
except (tarfile.ReadError, DownloadError):
- root_logger.info("Debug symbols unavailable after %s secs, retrying in %s secs",
- compare_start_time(time.time()), retry_secs)
+ root_logger.warn(
+ "Debug symbols unavailable after %s secs, retrying in %s secs, waiting for a total of %s secs",
+ compare_start_time(time.time()), retry_secs, download_timeout_secs)
time.sleep(retry_secs)
- ten_min = 10 * 60
- if compare_start_time(time.time()) > ten_min:
- root_logger.info(
+ if compare_start_time(time.time()) > download_timeout_secs:
+ root_logger.warn(
'Debug-symbols archive-file does not exist after %s secs; '
- 'Hang-Analyzer may not complete successfully.', ten_min)
+ 'Hang-Analyzer may not complete successfully.', download_timeout_secs)
break
diff --git a/buildscripts/resmokelib/hang_analyzer/hang_analyzer.py b/buildscripts/resmokelib/hang_analyzer/hang_analyzer.py
index 190aa9824da78..28dc4248467c0 100755
--- a/buildscripts/resmokelib/hang_analyzer/hang_analyzer.py
+++ b/buildscripts/resmokelib/hang_analyzer/hang_analyzer.py
@@ -68,6 +68,12 @@ def configure_task_id():
self._configure_processes()
self._setup_logging(logger)
+ def kill_rogue_processes(self):
+ """Kill any processes that are currently being analyzed."""
+ processes = process_list.get_processes(self.process_ids, self.interesting_processes,
+ self.options.process_match, self.root_logger)
+ process.teardown_processes(self.root_logger, processes, dump_pids={})
+
def execute(self):
"""
Execute hang analysis.
@@ -193,13 +199,12 @@ def _configure_processes(self):
def _setup_logging(self, logger):
if logger is None:
self.root_logger = logging.Logger("hang_analyzer", level=logging.DEBUG)
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(logging.Formatter(fmt="%(message)s"))
+ self.root_logger.addHandler(handler)
else:
self.root_logger = logger
- handler = logging.StreamHandler(sys.stdout)
- handler.setFormatter(logging.Formatter(fmt="%(message)s"))
- self.root_logger.addHandler(handler)
-
def _log_system_info(self):
self.root_logger.info("Python Version: %s", sys.version)
self.root_logger.info("OS: %s", platform.platform())
diff --git a/buildscripts/resmokelib/mongo_fuzzer_configs.py b/buildscripts/resmokelib/mongo_fuzzer_configs.py
new file mode 100644
index 0000000000000..2e91f9e76e0e7
--- /dev/null
+++ b/buildscripts/resmokelib/mongo_fuzzer_configs.py
@@ -0,0 +1,191 @@
+"""Generator functions for all parameters that we fuzz when invoked with --fuzzMongodConfigs."""
+
+import random
+from buildscripts.resmokelib import utils
+
+
+def generate_eviction_configs(rng, mode):
+ """Generate random configurations for wiredTigerEngineConfigString parameter."""
+ eviction_checkpoint_target = rng.randint(1, 99)
+ eviction_target = rng.randint(50, 95)
+ eviction_trigger = rng.randint(eviction_target + 1, 99)
+
+ # Fuzz eviction_dirty_target and trigger both as relative and absolute values
+ target_bytes_min = 50 * 1024 * 1024 # 50MB # 5% of 1GB default cache size on Evergreen
+ target_bytes_max = 256 * 1024 * 1024 # 256MB # 1GB default cache size on Evergreen
+ eviction_dirty_target = rng.choice(
+ [rng.randint(5, 50), rng.randint(target_bytes_min, target_bytes_max)])
+ trigger_max = 75 if eviction_dirty_target <= 50 else target_bytes_max
+ eviction_dirty_trigger = rng.randint(eviction_dirty_target + 1, trigger_max)
+
+ assert eviction_dirty_trigger > eviction_dirty_target
+ assert eviction_dirty_trigger <= trigger_max
+
+ # Fuzz eviction_updates_target and eviction_updates_trigger. These are by default half the
+ # values of the corresponding eviction dirty target and trigger. They need to stay less than the
+ # dirty equivalents. The default updates target is 2.5% of the cache, so let's start fuzzing
+ # from 2%.
+ updates_target_min = 2 if eviction_dirty_target <= 100 else 20 * 1024 * 1024 # 2% of 1GB cache
+ eviction_updates_target = rng.randint(updates_target_min, eviction_dirty_target - 1)
+ eviction_updates_trigger = rng.randint(eviction_updates_target + 1, eviction_dirty_trigger - 1)
+
+ # Fuzz File manager settings
+ close_idle_time_secs = rng.randint(1, 100)
+ close_handle_minimum = rng.randint(0, 1000)
+ close_scan_interval = rng.randint(1, 100)
+
+ # The debug_mode for WiredTiger offers some settings to change internal behavior that could help
+ # find bugs. Settings to fuzz:
+ # eviction - Turns aggressive eviction on/off
+ # realloc_exact - Finds more memory bugs by allocating the memory for the exact size asked
+ # rollback_error - Forces WiredTiger to return a rollback error every Nth call
+ # slow_checkpoint - Adds internal delays in processing internal leaf pages during a checkpoint
+ dbg_eviction = rng.choice(['true', 'false'])
+ dbg_realloc_exact = rng.choice(['true', 'false'])
+ # Rollback every Nth transaction. The values have been tuned after looking at how many
+ # WiredTiger transactions happen per second for the config-fuzzed jstests.
+ # The setting is trigerring bugs, disabled until they get resolved.
+ # dbg_rollback_error = rng.choice([0, rng.randint(250, 1500)])
+ dbg_rollback_error = 0
+ dbg_slow_checkpoint = 'false' if mode != 'stress' else rng.choice(['true', 'false'])
+
+ return "debug_mode=(eviction={0},realloc_exact={1},rollback_error={2}, slow_checkpoint={3}),"\
+ "eviction_checkpoint_target={4},eviction_dirty_target={5},eviction_dirty_trigger={6},"\
+ "eviction_target={7},eviction_trigger={8},eviction_updates_target={9},"\
+ "eviction_updates_trigger={10},file_manager=(close_handle_minimum={11},"\
+ "close_idle_time={12},close_scan_interval={13})".format(dbg_eviction,
+ dbg_realloc_exact,
+ dbg_rollback_error,
+ dbg_slow_checkpoint,
+ eviction_checkpoint_target,
+ eviction_dirty_target,
+ eviction_dirty_trigger,
+ eviction_target,
+ eviction_trigger,
+ eviction_updates_target,
+ eviction_updates_trigger,
+ close_handle_minimum,
+ close_idle_time_secs,
+ close_scan_interval)
+
+
+def generate_table_configs(rng):
+ """Generate random configurations for WiredTiger tables."""
+
+ internal_page_max = rng.choice([4, 8, 12, 1024, 10 * 1024]) * 1024
+ leaf_page_max = rng.choice([4, 8, 12, 1024, 10 * 1024]) * 1024
+ leaf_value_max = rng.choice([1, 32, 128, 256]) * 1024 * 1024
+
+ memory_page_max_lower_bound = leaf_page_max
+ # Assume WT cache size of 1GB as most MDB tests specify this as the cache size.
+ memory_page_max_upper_bound = round(
+ (rng.randint(256, 1024) * 1024 * 1024) / 10) # cache_size / 10
+ memory_page_max = rng.randint(memory_page_max_lower_bound, memory_page_max_upper_bound)
+
+ split_pct = rng.choice([50, 60, 75, 100])
+ prefix_compression = rng.choice(["true", "false"])
+ block_compressor = rng.choice(["none", "snappy", "zlib", "zstd"])
+
+ return "block_compressor={0},internal_page_max={1},leaf_page_max={2},leaf_value_max={3},"\
+ "memory_page_max={4},prefix_compression={5},split_pct={6}".format(block_compressor,
+ internal_page_max,
+ leaf_page_max,
+ leaf_value_max,
+ memory_page_max,
+ prefix_compression,
+ split_pct)
+
+
+def generate_flow_control_parameters(rng):
+ """Generate parameters related to flow control and returns a dictionary."""
+ configs = {}
+ configs["enableFlowControl"] = rng.choice([True, False])
+ if not configs["enableFlowControl"]:
+ return configs
+
+ configs["flowControlTargetLagSeconds"] = rng.randint(1, 1000)
+ configs["flowControlThresholdLagPercentage"] = rng.random()
+ configs["flowControlMaxSamples"] = rng.randint(1, 1000 * 1000)
+ configs["flowControlSamplePeriod"] = rng.randint(1, 1000 * 1000)
+ configs["flowControlMinTicketsPerSecond"] = rng.randint(1, 10 * 1000)
+
+ return configs
+
+
+def generate_mongod_parameters(rng, mode):
+ """Return a dictionary with values for each mongod parameter."""
+ ret = {}
+ ret["analyzeShardKeySplitPointExpirationSecs"] = rng.randint(1, 300)
+ ret["chunkMigrationConcurrency"] = rng.choice([1, 4, 16])
+ ret["disableLogicalSessionCacheRefresh"] = rng.choice([True, False])
+ ret["initialServiceExecutorUseDedicatedThread"] = rng.choice([True, False])
+ # TODO (SERVER-75632): Uncomment this to enable passthrough testing.
+ # ret["lockCodeSegmentsInMemory"] = rng.choice([True, False])
+ if not ret["disableLogicalSessionCacheRefresh"]:
+ ret["logicalSessionRefreshMillis"] = rng.choice([100, 1000, 10000, 100000])
+ ret["maxNumberOfTransactionOperationsInSingleOplogEntry"] = rng.randint(1, 10) * rng.choice(
+ [1, 10, 100])
+ ret["minSnapshotHistoryWindowInSeconds"] = rng.choice([300, rng.randint(30, 600)])
+ ret["mirrorReads"] = {"samplingRate": rng.random()}
+ ret["queryAnalysisWriterMaxMemoryUsageBytes"] = rng.randint(1, 100) * 1024 * 1024
+ ret["syncdelay"] = rng.choice([60, rng.randint(15, 180)])
+ ret["wiredTigerCursorCacheSize"] = rng.randint(-100, 100)
+ ret["wiredTigerSessionCloseIdleTimeSecs"] = rng.randint(0, 300)
+ ret["storageEngineConcurrencyAdjustmentAlgorithm"] = rng.choices(
+ ["throughputProbing", "fixedConcurrentTransactions"], weights=[10, 1])[0]
+ ret["throughputProbingStepMultiple"] = rng.uniform(0.1, 0.5)
+ ret["throughputProbingInitialConcurrency"] = rng.randint(4, 128)
+ ret["throughputProbingMinConcurrency"] = rng.randint(4,
+ ret["throughputProbingInitialConcurrency"])
+ ret["throughputProbingMaxConcurrency"] = rng.randint(ret["throughputProbingInitialConcurrency"],
+ 128)
+ ret["throughputProbingReadWriteRatio"] = rng.uniform(0, 1)
+ ret["throughputProbingConcurrencyMovingAverageWeight"] = 1 - rng.random()
+
+ ret["wiredTigerConcurrentWriteTransactions"] = rng.randint(5, 32)
+ ret["wiredTigerConcurrentReadTransactions"] = rng.randint(5, 32)
+ ret["wiredTigerStressConfig"] = False if mode != 'stress' else rng.choice([True, False])
+
+ # We need a higher timeout to account for test slowness
+ ret["receiveChunkWaitForRangeDeleterTimeoutMS"] = 300000
+ return ret
+
+
+def generate_mongos_parameters(rng, mode):
+ """Return a dictionary with values for each mongos parameter."""
+ ret = {}
+ ret["initialServiceExecutorUseDedicatedThread"] = rng.choice([True, False])
+ ret["opportunisticSecondaryTargeting"] = rng.choice([True, False])
+ return ret
+
+
+def fuzz_mongod_set_parameters(mode, seed, user_provided_params):
+ """Randomly generate mongod configurations and wiredTigerConnectionString."""
+ rng = random.Random(seed)
+
+ ret = {}
+ params = [generate_flow_control_parameters(rng), generate_mongod_parameters(rng, mode)]
+ for dct in params:
+ for key, value in dct.items():
+ ret[key] = value
+
+ for key, value in utils.load_yaml(user_provided_params).items():
+ ret[key] = value
+
+ return utils.dump_yaml(ret), generate_eviction_configs(rng, mode), generate_table_configs(rng), \
+ generate_table_configs(rng)
+
+
+def fuzz_mongos_set_parameters(mode, seed, user_provided_params):
+ """Randomly generate mongos configurations."""
+ rng = random.Random(seed)
+
+ ret = {}
+ params = generate_mongos_parameters(rng, mode)
+ for key, value in params.items():
+ ret[key] = value
+
+ for key, value in utils.load_yaml(user_provided_params).items():
+ ret[key] = value
+
+ return utils.dump_yaml(ret)
diff --git a/buildscripts/resmokelib/mongod_fuzzer_configs.py b/buildscripts/resmokelib/mongod_fuzzer_configs.py
deleted file mode 100644
index be00b89931d2c..0000000000000
--- a/buildscripts/resmokelib/mongod_fuzzer_configs.py
+++ /dev/null
@@ -1,150 +0,0 @@
-"""Generator functions for all parameters that we fuzz when invoked with --fuzzMongodConfigs."""
-
-import random
-from buildscripts.resmokelib import utils
-
-
-def generate_eviction_configs(rng, mode):
- """Generate random configurations for wiredTigerEngineConfigString parameter."""
- eviction_checkpoint_target = rng.randint(1, 99)
- eviction_target = rng.randint(50, 95)
- eviction_trigger = rng.randint(eviction_target + 1, 99)
-
- # Fuzz eviction_dirty_target and trigger both as relative and absolute values
- target_bytes_min = 50 * 1024 * 1024 # 50MB # 5% of 1GB default cache size on Evergreen
- target_bytes_max = 256 * 1024 * 1024 # 256MB # 1GB default cache size on Evergreen
- eviction_dirty_target = rng.choice(
- [rng.randint(5, 50), rng.randint(target_bytes_min, target_bytes_max)])
- trigger_max = 75 if eviction_dirty_target <= 50 else target_bytes_max
- eviction_dirty_trigger = rng.randint(eviction_dirty_target + 1, trigger_max)
-
- assert eviction_dirty_trigger > eviction_dirty_target
- assert eviction_dirty_trigger <= trigger_max
-
- # Fuzz eviction_updates_target and eviction_updates_trigger. These are by default half the
- # values of the corresponding eviction dirty target and trigger. They need to stay less than the
- # dirty equivalents. The default updates target is 2.5% of the cache, so let's start fuzzing
- # from 2%.
- updates_target_min = 2 if eviction_dirty_target <= 100 else 20 * 1024 * 1024 # 2% of 1GB cache
- eviction_updates_target = rng.randint(updates_target_min, eviction_dirty_target - 1)
- eviction_updates_trigger = rng.randint(eviction_updates_target + 1, eviction_dirty_trigger - 1)
-
- # Fuzz File manager settings
- close_idle_time_secs = rng.randint(1, 100)
- close_handle_minimum = rng.randint(0, 1000)
- close_scan_interval = rng.randint(1, 100)
-
- # The debug_mode for WiredTiger offers some settings to change internal behavior that could help
- # find bugs. Settings to fuzz:
- # eviction - Turns aggressive eviction on/off
- # realloc_exact - Finds more memory bugs by allocating the memory for the exact size asked
- # rollback_error - Forces WiredTiger to return a rollback error every Nth call
- # slow_checkpoint - Adds internal delays in processing internal leaf pages during a checkpoint
- dbg_eviction = rng.choice(['true', 'false'])
- dbg_realloc_exact = rng.choice(['true', 'false'])
- # Rollback every Nth transaction. The values have been tuned after looking at how many
- # WiredTiger transactions happen per second for the config-fuzzed jstests.
- # The setting is trigerring bugs, disabled until they get resolved.
- # dbg_rollback_error = rng.choice([0, rng.randint(250, 1500)])
- dbg_rollback_error = 0
- dbg_slow_checkpoint = 'false' if mode != 'stress' else rng.choice(['true', 'false'])
-
- return "debug_mode=(eviction={0},realloc_exact={1},rollback_error={2}, slow_checkpoint={3}),"\
- "eviction_checkpoint_target={4},eviction_dirty_target={5},eviction_dirty_trigger={6},"\
- "eviction_target={7},eviction_trigger={8},eviction_updates_target={9},"\
- "eviction_updates_trigger={10},file_manager=(close_handle_minimum={11},"\
- "close_idle_time={12},close_scan_interval={13})".format(dbg_eviction,
- dbg_realloc_exact,
- dbg_rollback_error,
- dbg_slow_checkpoint,
- eviction_checkpoint_target,
- eviction_dirty_target,
- eviction_dirty_trigger,
- eviction_target,
- eviction_trigger,
- eviction_updates_target,
- eviction_updates_trigger,
- close_handle_minimum,
- close_idle_time_secs,
- close_scan_interval)
-
-
-def generate_table_configs(rng):
- """Generate random configurations for WiredTiger tables."""
-
- internal_page_max = rng.choice([4, 8, 12, 1024, 10 * 1024]) * 1024
- leaf_page_max = rng.choice([4, 8, 12, 1024, 10 * 1024]) * 1024
- leaf_value_max = rng.choice([1, 32, 128, 256]) * 1024 * 1024
-
- memory_page_max_lower_bound = leaf_page_max
- # Assume WT cache size of 1GB as most MDB tests specify this as the cache size.
- memory_page_max_upper_bound = round(
- (rng.randint(256, 1024) * 1024 * 1024) / 10) # cache_size / 10
- memory_page_max = rng.randint(memory_page_max_lower_bound, memory_page_max_upper_bound)
-
- split_pct = rng.choice([50, 60, 75, 100])
- prefix_compression = rng.choice(["true", "false"])
- block_compressor = rng.choice(["none", "snappy", "zlib", "zstd"])
-
- return "block_compressor={0},internal_page_max={1},leaf_page_max={2},leaf_value_max={3},"\
- "memory_page_max={4},prefix_compression={5},split_pct={6}".format(block_compressor,
- internal_page_max,
- leaf_page_max,
- leaf_value_max,
- memory_page_max,
- prefix_compression,
- split_pct)
-
-
-def generate_flow_control_parameters(rng):
- """Generate parameters related to flow control and returns a dictionary."""
- configs = {}
- configs["enableFlowControl"] = rng.choice([True, False])
- if not configs["enableFlowControl"]:
- return configs
-
- configs["flowControlTargetLagSeconds"] = rng.randint(1, 1000)
- configs["flowControlThresholdLagPercentage"] = rng.random()
- configs["flowControlMaxSamples"] = rng.randint(1, 1000 * 1000)
- configs["flowControlSamplePeriod"] = rng.randint(1, 1000 * 1000)
- configs["flowControlMinTicketsPerSecond"] = rng.randint(1, 10 * 1000)
-
- return configs
-
-
-def generate_independent_parameters(rng, mode):
- """Return a dictionary with values for each independent parameter."""
- ret = {}
- ret["wiredTigerCursorCacheSize"] = rng.randint(-100, 100)
- ret["wiredTigerSessionCloseIdleTimeSecs"] = rng.randint(0, 300)
- ret["storageEngineConcurrencyAdjustmentAlgorithm"] = ""
- ret["wiredTigerConcurrentWriteTransactions"] = rng.randint(5, 32)
- ret["wiredTigerConcurrentReadTransactions"] = rng.randint(5, 32)
- ret["wiredTigerStressConfig"] = False if mode != 'stress' else rng.choice([True, False])
- if rng.choice(3 * [True] + [False]):
- # The old retryable writes format is used by other variants. Weight towards turning on the
- # new retryable writes format on in this one.
- ret["storeFindAndModifyImagesInSideCollection"] = True
- ret["syncdelay"] = rng.choice([60, rng.randint(15, 180)])
- ret["minSnapshotHistoryWindowInSeconds"] = rng.choice([300, rng.randint(5, 600)])
- # TODO (SERVER-75632): Uncomment this to enable passthrough testing.
- # ret["lockCodeSegmentsInMemory"] = rng.choice([True, False])
-
- return ret
-
-
-def fuzz_set_parameters(mode, seed, user_provided_params):
- """Randomly generate mongod configurations and wiredTigerConnectionString."""
- rng = random.Random(seed)
-
- ret = {}
- params = [generate_flow_control_parameters(rng), generate_independent_parameters(rng, mode)]
- for dct in params:
- for key, value in dct.items():
- ret[key] = value
-
- for key, value in utils.load_yaml(user_provided_params).items():
- ret[key] = value
-
- return utils.dump_yaml(ret), generate_eviction_configs(rng, mode), generate_table_configs(rng), \
- generate_table_configs(rng)
diff --git a/buildscripts/resmokelib/multiversionconstants.py b/buildscripts/resmokelib/multiversionconstants.py
index 6d499728a337c..57dbbc349150d 100644
--- a/buildscripts/resmokelib/multiversionconstants.py
+++ b/buildscripts/resmokelib/multiversionconstants.py
@@ -2,8 +2,10 @@
import os
import shutil
from subprocess import DEVNULL, STDOUT, CalledProcessError, call, check_output
+import requests
import structlog
+import buildscripts.resmokelib.config as _config
from buildscripts.resmokelib.multiversion.multiversion_service import (
MongoReleases, MongoVersion, MultiversionService, MONGO_VERSION_YAML, RELEASES_YAML)
@@ -13,6 +15,11 @@
LAST_LTS = "last_lts"
LAST_CONTINUOUS = "last_continuous"
+# We use the "releases.yml" file from "master" because it is guaranteed to be up-to-date
+# with the latest EOL versions. If a "last-continuous" version is EOL, we don't include
+# it in the multiversion config and therefore don't test against it.
+MASTER_RELEASES_FILE = "https://raw.githubusercontent.com/mongodb/mongo/master/src/mongo/util/version/releases.yml"
+
LOGGER = structlog.getLogger(__name__)
@@ -32,15 +39,20 @@ def generate_mongo_version_file():
def generate_releases_file():
"""Generate the releases constants file."""
- # Copy the 'releases.yml' file from the source tree.
- releases_yaml_path = os.path.join("src", "mongo", "util", "version", "releases.yml")
- if not os.path.isfile(releases_yaml_path):
- LOGGER.info(
- 'Skipping yml file generation because file .resmoke_mongo_release_values.yml does not exist at path {}.'
- .format(releases_yaml_path))
- return
-
- shutil.copyfile(releases_yaml_path, RELEASES_YAML)
+ try:
+ # Get the latest releases.yml from github
+ with open(RELEASES_YAML, "wb") as file:
+ file.write(requests.get(MASTER_RELEASES_FILE).content)
+ except Exception as exc:
+ LOGGER.warning(f"Could not get releases.yml file: {MASTER_RELEASES_FILE}")
+
+ # If this fails in CI we want to be aware and fix this
+ if _config.EVERGREEN_TASK_ID:
+ raise exc
+
+ # Fallback to the current releases.yml
+ releases_yaml_path = os.path.join("src", "mongo", "util", "version", "releases.yml")
+ shutil.copyfile(releases_yaml_path, RELEASES_YAML)
def in_git_root_dir():
diff --git a/buildscripts/resmokelib/parser.py b/buildscripts/resmokelib/parser.py
index e09b76ec29288..80e647a9906c3 100644
--- a/buildscripts/resmokelib/parser.py
+++ b/buildscripts/resmokelib/parser.py
@@ -11,7 +11,6 @@
from buildscripts.resmokelib.multiversion import MultiversionPlugin
from buildscripts.resmokelib.powercycle import PowercyclePlugin
from buildscripts.resmokelib.run import RunPlugin
-from buildscripts.resmokelib.symbolizer import SymbolizerPlugin
from buildscripts.resmokelib.undodb import UndoDbPlugin
_PLUGINS = [
@@ -19,7 +18,6 @@
HangAnalyzerPlugin(),
UndoDbPlugin(),
PowercyclePlugin(),
- SymbolizerPlugin(),
GenerateFCVConstantsPlugin(),
DiscoveryPlugin(),
MultiversionPlugin(),
diff --git a/buildscripts/resmokelib/run/__init__.py b/buildscripts/resmokelib/run/__init__.py
index 2afd9aa64e1a2..76b3dde383258 100644
--- a/buildscripts/resmokelib/run/__init__.py
+++ b/buildscripts/resmokelib/run/__init__.py
@@ -15,6 +15,7 @@
import pkg_resources
import psutil
+from buildscripts.ciconfig.evergreen import parse_evergreen_file
from buildscripts.resmokelib import parser as main_parser
from buildscripts.resmokelib import config
from buildscripts.resmokelib import configure_resmoke
@@ -31,6 +32,7 @@
from buildscripts.resmokelib.run import list_tags
from buildscripts.resmokelib.run.runtime_recorder import compare_start_time
from buildscripts.resmokelib.suitesconfig import get_suite_files
+from buildscripts.resmokelib.utils.dictionary import get_dict_value
_INTERNAL_OPTIONS_TITLE = "Internal Options"
_MONGODB_SERVER_OPTIONS_TITLE = "MongoDB Server Options"
@@ -259,21 +261,81 @@ def _run_suite(self, suite):
def _log_local_resmoke_invocation(self):
"""Log local resmoke invocation example."""
+
+ # Do not log local args if this is not being ran in evergreen
+ if not config.EVERGREEN_TASK_ID:
+ print("Skipping local invocation because evergreen task id was not provided.")
+ return
+
+ evg_conf = parse_evergreen_file("etc/evergreen.yml")
+
+ suite = self._get_suites()[0]
+ suite_name = config.ORIGIN_SUITE or suite.get_name()
+
+ # try to find the evergreen task from the resmoke suite name
+ task = evg_conf.get_task(suite_name) or evg_conf.get_task(f"{suite_name}_gen")
+
+ multiversion_bin_version = None
+ # Some evergreen task names do not reflect what suite names they run.
+ # The suite names should be in the evergreen functions in this case
+ if task is None:
+ for current_task in evg_conf.tasks:
+ func = current_task.find_func_command("run tests") \
+ or current_task.find_func_command("generate resmoke tasks")
+ if func and get_dict_value(func, ["vars", "suite"]) == suite_name:
+ task = current_task
+ break
+
+ func = current_task.find_func_command("initialize multiversion tasks")
+ if not func:
+ continue
+ for subtask in func["vars"]:
+ if subtask == suite_name:
+ task = current_task
+ multiversion_bin_version = func["vars"][subtask]
+ break
+
+ if task:
+ break
+
+ if task is None:
+ raise RuntimeError(f"Error: Could not find evergreen task definition for {suite_name}")
+
+ is_multiversion = "multiversion" in task.tags
+ generate_func = task.find_func_command("generate resmoke tasks")
+ is_jstestfuzz = False
+ if generate_func:
+ is_jstestfuzz = get_dict_value(generate_func, ["vars", "is_jstestfuzz"]) == "true"
+
local_args = to_local_args()
+ local_args = strip_fuzz_config_params(local_args)
local_resmoke_invocation = (
f"{os.path.join('buildscripts', 'resmoke.py')} {' '.join(local_args)}")
+ using_config_fuzzer = False
if config.FUZZ_MONGOD_CONFIGS:
- local_args = strip_fuzz_config_params(local_args)
- local_resmoke_invocation = (
- f"{os.path.join('buildscripts', 'resmoke.py')} {' '.join(local_args)}"
- f" --fuzzMongodConfigs={config.FUZZ_MONGOD_CONFIGS} --configFuzzSeed={str(config.CONFIG_FUZZ_SEED)}"
- )
+ using_config_fuzzer = True
+ local_resmoke_invocation += f" --fuzzMongodConfigs={config.FUZZ_MONGOD_CONFIGS}"
self._resmoke_logger.info("Fuzzed mongodSetParameters:\n%s",
config.MONGOD_SET_PARAMETERS)
self._resmoke_logger.info("Fuzzed wiredTigerConnectionString: %s",
config.WT_ENGINE_CONFIG)
+
+ if config.FUZZ_MONGOS_CONFIGS:
+ using_config_fuzzer = True
+ local_resmoke_invocation += f" --fuzzMongosConfigs={config.FUZZ_MONGOS_CONFIGS}"
+
+ self._resmoke_logger.info("Fuzzed mongosSetParameters:\n%s",
+ config.MONGOS_SET_PARAMETERS)
+
+ if using_config_fuzzer:
+ local_resmoke_invocation += f" --configFuzzSeed={str(config.CONFIG_FUZZ_SEED)}"
+
+ if multiversion_bin_version:
+ default_tag_file = config.DEFAULTS["exclude_tags_file_path"]
+ local_resmoke_invocation += f" --tagFile={default_tag_file}"
+
resmoke_env_options = ''
if os.path.exists('resmoke_env_options.txt'):
with open('resmoke_env_options.txt') as fin:
@@ -282,26 +344,60 @@ def _log_local_resmoke_invocation(self):
self._resmoke_logger.info("resmoke.py invocation for local usage: %s %s",
resmoke_env_options, local_resmoke_invocation)
- suite = self._get_suites()[0]
+ lines = []
+
+ if is_multiversion:
+ lines.append("# DISCLAIMER:")
+ lines.append(
+ "# The `db-contrib-tool` command downloads the latest last-continuous/lts mongo shell binaries available in CI."
+ )
+ if multiversion_bin_version:
+ lines.append(
+ "# The generated `multiversion_exclude_tags.yml` is dependent on the `backports_required_for_multiversion_tests.yml` file of the last-continuous/lts mongo shell binary git commit."
+ )
+ lines.append(
+ "# If there have been new commits to last-continuous/lts, the excluded tests & binaries may be slightly different on this task vs locally."
+ )
+ if is_jstestfuzz:
+ lines.append(
+ "# This is a jstestfuzz suite and is dependent on the generated tests specific to this task execution."
+ )
+
if suite.get_description():
- self._resmoke_logger.info("'%s' suite description:\n\n%s\n", suite.get_name(),
- suite.get_description())
+ lines.append(f"# {suite.get_description()}")
- if suite.is_matrix_suite():
- self._resmoke_logger.info(
- "This suite is a matrix suite. To view the generated matrix suite run python3 ./buildscripts/resmoke.py suiteconfig %s",
- suite.get_name())
-
- if config.EVERGREEN_TASK_ID:
- with open("local-resmoke-invocation.txt", "w") as fh:
- lines = [f"{resmoke_env_options} {local_resmoke_invocation}"]
- if suite.get_description():
- lines.append(f"{suite.get_name()}: {suite.get_description()}")
- if suite.is_matrix_suite():
- lines.append(
- f"This suite is a matrix suite. To view the generated matrix suite run python3 ./buildscripts/resmoke.py suiteconfig {suite.get_name()}"
- )
- fh.write("\n".join(lines))
+ lines.append(
+ "# Having trouble reproducing your failure with this? Feel free to reach out in #server-testing."
+ )
+ lines.append("")
+ if is_multiversion:
+ if not os.path.exists("local-db-contrib-tool-invocation.txt"):
+ raise RuntimeError(
+ "ERROR: local-db-contrib-tool-invocation.txt does not exist for multiversion task"
+ )
+
+ with open("local-db-contrib-tool-invocation.txt", "r") as fh:
+ db_contrib_tool_invocation = fh.read().strip() + " && \\"
+ lines.append(db_contrib_tool_invocation)
+
+ if multiversion_bin_version:
+ generate_tag_file_invocation = f"buildscripts/resmoke.py generate-multiversion-exclude-tags --oldBinVersion={multiversion_bin_version} && \\"
+ lines.append(generate_tag_file_invocation)
+
+ if is_jstestfuzz:
+ download_url = f"https://mciuploads.s3.amazonaws.com/{config.EVERGREEN_PROJECT_NAME}/{config.EVERGREEN_VARIANT_NAME}/{config.EVERGREEN_REVISION}/jstestfuzz/{config.EVERGREEN_TASK_ID}-{config.EVERGREEN_EXECUTION}.tgz"
+ jstestfuzz_dir = "jstestfuzz/"
+ jstests_tar = "jstests.tgz"
+ lines.append(f"mkdir -p {jstestfuzz_dir} && \\")
+ lines.append(f"rm -rf {jstestfuzz_dir}* && \\")
+ lines.append(f"wget '{download_url}' -O {jstests_tar} && \\")
+ lines.append(f"tar -xf {jstests_tar} -C {jstestfuzz_dir} && \\")
+ lines.append(f"rm {jstests_tar} && \\")
+
+ lines.append(local_resmoke_invocation)
+
+ with open("local-resmoke-invocation.txt", "w") as fh:
+ fh.write("\n".join(lines))
def _check_for_mongo_processes(self):
"""Check for existing mongo processes as they could interfere with running the tests."""
@@ -442,6 +538,9 @@ def _get_suites(self):
self._resmoke_logger.error("Failed to parse YAML suite definition: %s", str(err))
self.list_suites()
self.exit(1)
+ except errors.InvalidMatrixSuiteError as err:
+ self._resmoke_logger.error("Failed to get matrix suite: %s", str(err))
+ self.exit(1)
except errors.ResmokeError as err:
self._resmoke_logger.error(
"Cannot run excluded test in suite config. Use '--force-excluded-tests' to override: %s",
@@ -850,6 +949,9 @@ def _add_run(cls, subparsers):
parser.add_argument("--maxTestQueueSize", type=int, dest="max_test_queue_size",
help=argparse.SUPPRESS)
+ parser.add_argument("--tagFile", action="append", dest="tag_files", metavar="TAG_FILES",
+ help="One or more YAML files that associate tests and tags.")
+
mongodb_server_options = parser.add_argument_group(
title=_MONGODB_SERVER_OPTIONS_TITLE,
description=("Options related to starting a MongoDB cluster that are forwarded from"
@@ -933,18 +1035,23 @@ def _add_run(cls, subparsers):
mongodb_server_options.add_argument(
"--fuzzMongodConfigs", dest="fuzz_mongod_configs",
- help="Randomly chooses server parameters that were not specified. Use 'stress' to fuzz "
+ help="Randomly chooses mongod parameters that were not specified. Use 'stress' to fuzz "
"all configs including stressful storage configurations that may significantly "
"slow down the server. Use 'normal' to only fuzz non-stressful configurations. ",
metavar="MODE", choices=('normal', 'stress'))
- mongodb_server_options.add_argument("--configFuzzSeed", dest="config_fuzz_seed",
- metavar="PATH",
- help="Sets the seed used by storage config fuzzer")
+ mongodb_server_options.add_argument(
+ "--fuzzMongosConfigs", dest="fuzz_mongos_configs",
+ help="Randomly chooses mongos parameters that were not specified", metavar="MODE",
+ choices=('normal', ))
+
+ mongodb_server_options.add_argument(
+ "--configFuzzSeed", dest="config_fuzz_seed", metavar="PATH",
+ help="Sets the seed used by mongod and mongos config fuzzers")
mongodb_server_options.add_argument(
- "--catalogShard", dest="catalog_shard", metavar="CONFIG",
- help="If set, specifies which node is the catalog shard. Can also be set to 'any'.")
+ "--configShard", dest="config_shard", metavar="CONFIG",
+ help="If set, specifies which node is the config shard. Can also be set to 'any'.")
internal_options = parser.add_argument_group(
title=_INTERNAL_OPTIONS_TITLE,
@@ -1062,10 +1169,6 @@ def _add_run(cls, subparsers):
metavar="REVISION_ORDER_ID",
help="Sets the chronological order number of this commit.")
- evergreen_options.add_argument("--tagFile", action="append", dest="tag_files",
- metavar="TAG_FILES",
- help="One or more YAML files that associate tests and tags.")
-
evergreen_options.add_argument(
"--taskName", dest="task_name", metavar="TASK_NAME",
help="Sets the name of the Evergreen task running the tests.")
@@ -1208,6 +1311,9 @@ def to_local_args(input_args=None):
run_parser = command_subparser.choices.get("run")
+ # arguments that are in the standard run parser that we do not want to include in the local invocation
+ skipped_args = ["install_dir", "tag_files"]
+
suites_arg = None
storage_engine_arg = None
other_local_args = []
@@ -1219,7 +1325,10 @@ def format_option(option_name, option_value):
This function assumes that 'option_name' is always "--" prefix and isn't "-" prefixed.
"""
- return f"{option_name}={option_value}"
+ if " " not in str(option_value):
+ return f"{option_name}={option_value}"
+ else:
+ return f"'{option_name}={option_value}'"
# Trim the argument namespace of any args we don't want to return.
for group in run_parser._action_groups: # pylint: disable=protected-access
@@ -1245,6 +1354,8 @@ def format_option(option_name, option_value):
_INTERNAL_OPTIONS_TITLE, _EVERGREEN_ARGUMENT_TITLE, _CEDAR_ARGUMENT_TITLE
]:
continue
+ elif arg_dest in skipped_args:
+ continue
elif group.title == 'positional arguments':
positional_args.extend(arg_value)
# Keep all remaining args.
@@ -1286,7 +1397,7 @@ def strip_fuzz_config_params(input_args):
ret = []
for arg in input_args:
- if "--fuzzMongodConfigs" not in arg and "--fuzzConfigSeed" not in arg:
+ if not arg.startswith(("--fuzzMongodConfigs", "--fuzzMongosConfigs", "--configFuzzSeed")):
ret.append(arg)
return ret
diff --git a/buildscripts/resmokelib/run/generate_multiversion_exclude_tags.py b/buildscripts/resmokelib/run/generate_multiversion_exclude_tags.py
index 98be3db0fa862..1266806285c54 100755
--- a/buildscripts/resmokelib/run/generate_multiversion_exclude_tags.py
+++ b/buildscripts/resmokelib/run/generate_multiversion_exclude_tags.py
@@ -79,8 +79,7 @@ def generate_exclude_yaml(old_bin_version: str, output: str, logger: logging.Log
output = os.path.abspath(output)
location, _ = os.path.split(output)
if not os.path.isdir(location):
- logger.info(f"Cannot write to {output}. Not generating tag file.")
- return
+ os.makedirs(location)
backports_required_latest = read_yaml_file(os.path.join(ETC_DIR, BACKPORTS_REQUIRED_FILE))
diff --git a/buildscripts/resmokelib/selector.py b/buildscripts/resmokelib/selector.py
index b26ac4a374b65..332dc3891b1d2 100644
--- a/buildscripts/resmokelib/selector.py
+++ b/buildscripts/resmokelib/selector.py
@@ -126,7 +126,9 @@ def parse_tag_files(test_kind, tag_files=None, tagged_tests=None):
if tag_files is None:
tag_files = []
for tag_file in tag_files:
- if tag_file and os.path.exists(tag_file):
+ if not tag_file:
+ continue
+ if os.path.exists(tag_file):
tags_conf = _tags.TagsConfig.from_file(tag_file)
tagged_roots = tags_conf.get_test_patterns(test_kind)
for tagged_root in tagged_roots:
@@ -137,6 +139,12 @@ def parse_tag_files(test_kind, tag_files=None, tagged_tests=None):
# A test could have a tag in more than one place, due to wildcards in the
# selector.
tagged_tests[test].extend(test_tags)
+ else:
+ # TODO SERVER-77265 always validate tag file input when mongo-task-generator
+ # no longer passes in invalid tag files
+ if not config.EVERGREEN_TASK_ID:
+ raise errors.TagFileDoesNotExistError(f"A tag file was not found at {tag_file}")
+
return tagged_tests
@@ -604,6 +612,39 @@ def select(self, selector_config):
return _Selector.select(self, selector_config)
+class _PrettyPrinterTestSelectorConfig(_SelectorConfig):
+ """_SelectorConfig subclass for pretty-printer-tests."""
+
+ def __init__(self, root=config.DEFAULT_INTEGRATION_TEST_LIST, roots=None, include_files=None,
+ exclude_files=None):
+ """Initialize _PrettyPrinterTestSelectorConfig."""
+ if roots:
+ # The 'roots' argument is only present when tests are specified on the command line
+ # and in that case they take precedence over the tests in the root file.
+ _SelectorConfig.__init__(self, roots=roots, include_files=include_files,
+ exclude_files=exclude_files)
+ else:
+ _SelectorConfig.__init__(self, root=root, include_files=include_files,
+ exclude_files=exclude_files)
+
+
+class _PrettyPrinterTestSelector(_Selector):
+ """_Selector subclass for pretty-printer-tests."""
+
+ def __init__(self, test_file_explorer):
+ """Initialize _PrettyPrinterTestSelector."""
+ _Selector.__init__(self, test_file_explorer)
+
+ def select(self, selector_config):
+ """Return selected tests."""
+ if selector_config.roots:
+ # Tests have been specified on the command line. We use them without additional
+ # filtering.
+ test_list = _TestList(self._test_file_explorer, selector_config.roots)
+ return test_list.get_tests()
+ return _Selector.select(self, selector_config)
+
+
class _DbTestSelectorConfig(_SelectorConfig):
"""_Selector config subclass for db_test tests."""
@@ -715,6 +756,7 @@ def __init__(self, test_file_explorer):
_SELECTOR_REGISTRY = {
"cpp_integration_test": (_CppTestSelectorConfig, _CppTestSelector),
"cpp_unit_test": (_CppTestSelectorConfig, _CppTestSelector),
+ "pretty_printer_test": (_PrettyPrinterTestSelectorConfig, _PrettyPrinterTestSelector),
"benchmark_test": (_CppTestSelectorConfig, _CppTestSelector),
"sdam_json_test": (_FileBasedSelectorConfig, _Selector),
"server_selection_json_test": (_FileBasedSelectorConfig, _Selector),
diff --git a/buildscripts/resmokelib/setup_multiversion/setup_multiversion.py b/buildscripts/resmokelib/setup_multiversion/setup_multiversion.py
index 5df6e47725a3b..b26277099ed6c 100644
--- a/buildscripts/resmokelib/setup_multiversion/setup_multiversion.py
+++ b/buildscripts/resmokelib/setup_multiversion/setup_multiversion.py
@@ -29,25 +29,6 @@
SUBCOMMAND = "setup-multiversion"
-LOGGER = structlog.getLogger(__name__)
-
-
-def setup_logging(debug=False):
- """Enable logging."""
- log_level = logging.DEBUG if debug else logging.INFO
- logging.basicConfig(
- format="[%(asctime)s - %(name)s - %(levelname)s] %(message)s",
- level=log_level,
- stream=sys.stdout,
- )
- logging.getLogger("urllib3").setLevel(logging.WARNING)
- logging.getLogger("s3transfer").setLevel(logging.WARNING)
- logging.getLogger("botocore").setLevel(logging.WARNING)
- logging.getLogger("boto3").setLevel(logging.WARNING)
- logging.getLogger("evergreen").setLevel(logging.WARNING)
- logging.getLogger("github").setLevel(logging.WARNING)
- structlog.configure(logger_factory=structlog.stdlib.LoggerFactory())
-
def infer_platform(edition=None, version=None):
"""Infer platform for popular OS."""
@@ -70,16 +51,16 @@ def infer_platform(edition=None, version=None):
return pltf
-def get_merge_base_commit(version: str) -> Optional[str]:
+def get_merge_base_commit(version: str, logger: logging.Logger) -> Optional[str]:
"""Get merge-base commit hash between origin/master and version."""
cmd = ["git", "merge-base", "origin/master", f"origin/v{version}"]
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
if result.returncode:
- LOGGER.warning("Git merge-base command failed. Falling back to latest master", cmd=cmd,
+ logger.warning("Git merge-base command failed. Falling back to latest master", cmd=cmd,
error=result.stderr.decode("utf-8").strip())
return None
commit_hash = result.stdout.decode("utf-8").strip()
- LOGGER.info("Found merge-base commit.", cmd=cmd, commit=commit_hash)
+ logger.info("Found merge-base commit.", cmd=cmd, commit=commit_hash)
return commit_hash
@@ -93,16 +74,31 @@ class EvgURLInfo(NamedTuple):
class SetupMultiversion(Subcommand):
"""Main class for the setup multiversion subcommand."""
- def __init__(self, download_options, install_dir="", link_dir="", mv_platform=None,
- edition=None, architecture=None, use_latest=None, versions=None, variant=None,
- install_last_lts=None, install_last_continuous=None, evergreen_config=None,
- github_oauth_token=None, debug=None, ignore_failed_push=False,
- evg_versions_file=None):
+ def __init__(
+ self,
+ download_options,
+ install_dir="",
+ link_dir="",
+ mv_platform=None,
+ edition=None,
+ architecture=None,
+ use_latest=None,
+ versions=None,
+ variant=None,
+ install_last_lts=None,
+ install_last_continuous=None,
+ evergreen_config=None,
+ github_oauth_token=None,
+ debug=None,
+ ignore_failed_push=False,
+ evg_versions_file=None,
+ logger: Optional[logging.Logger] = None,
+ ):
"""Initialize."""
- setup_logging(debug)
+
+ self.logger = logger or self.setup_logger()
self.install_dir = os.path.abspath(install_dir)
self.link_dir = os.path.abspath(link_dir)
-
self.edition = edition.lower() if edition else None
self.platform = mv_platform.lower() if mv_platform else None
self.inferred_platform = bool(self.platform is None)
@@ -132,6 +128,29 @@ def __init__(self, download_options, install_dir="", link_dir="", mv_platform=No
self._is_windows = is_windows()
self._windows_bin_install_dirs = []
+ @staticmethod
+ def setup_logger(debug=False) -> logging.Logger:
+ """
+ Setup logger.
+
+ :param debug: Whether to enable debugging or not.
+ :return: Logger instance.
+ """
+ logging.getLogger("urllib3").setLevel(logging.WARNING)
+ logging.getLogger("s3transfer").setLevel(logging.WARNING)
+ logging.getLogger("botocore").setLevel(logging.WARNING)
+ logging.getLogger("boto3").setLevel(logging.WARNING)
+ logging.getLogger("evergreen").setLevel(logging.WARNING)
+ logging.getLogger("github").setLevel(logging.WARNING)
+
+ log_level = logging.DEBUG if debug else logging.INFO
+ logger = logging.Logger("SetupMultiversion", level=log_level)
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(
+ logging.Formatter(fmt="[%(asctime)s - %(name)s - %(levelname)s] %(message)s"))
+ logger.addHandler(handler)
+ return logger
+
@staticmethod
def _get_bin_suffix(version, evg_project_id):
"""Get the multiversion bin suffix from the evergreen project ID."""
@@ -146,27 +165,29 @@ def _get_bin_suffix(version, evg_project_id):
# Use the Evergreen project ID as fallback.
return re.search(r"(\d+\.\d+$)", evg_project_id).group(0)
- @staticmethod
- def _get_release_versions(install_last_lts: Optional[bool],
+ def _get_release_versions(self, install_last_lts: Optional[bool],
install_last_continuous: Optional[bool]) -> List[str]:
"""Return last-LTS and/or last-continuous versions."""
out = []
if not os.path.isfile(
os.path.join(os.getcwd(), "buildscripts", "resmokelib",
"multiversionconstants.py")):
- LOGGER.error("This command should be run from the root of the mongo repo.")
- LOGGER.error("If you're running it from the root of the mongo repo and still seeing"
- " this error, please reach out in #server-testing slack channel.")
+ self.logger.error("This command should be run from the root of the mongo repo.")
+ self.logger.error(
+ "If you're running it from the root of the mongo repo and still seeing"
+ " this error, please reach out in #server-testing slack channel.")
exit(1)
try:
import buildscripts.resmokelib.multiversionconstants as multiversionconstants
except ImportError:
- LOGGER.error("Could not import `buildscripts.resmokelib.multiversionconstants`.")
- LOGGER.error("If you're passing `--installLastLTS` and/or `--installLastContinuous`"
- " flags, this module is required to automatically calculate last-LTS"
- " and/or last-continuous versions.")
- LOGGER.error("Try omitting these flags if you don't need the automatic calculation."
- " Otherwise please reach out in #server-testing slack channel.")
+ self.logger.error("Could not import `buildscripts.resmokelib.multiversionconstants`.")
+ self.logger.error(
+ "If you're passing `--installLastLTS` and/or `--installLastContinuous`"
+ " flags, this module is required to automatically calculate last-LTS"
+ " and/or last-continuous versions.")
+ self.logger.error(
+ "Try omitting these flags if you don't need the automatic calculation."
+ " Otherwise please reach out in #server-testing slack channel.")
exit(1)
else:
releases = {
@@ -181,14 +202,15 @@ def execute(self):
"""Execute setup multiversion mongodb."""
if self.install_last_lts or self.install_last_continuous:
self.versions.extend(
- self._get_release_versions(self.install_last_lts, self.install_last_continuous))
+ self._get_release_versions(self, self.install_last_lts,
+ self.install_last_continuous))
self.versions = list(set(self.versions))
downloaded_versions = []
for version in self.versions:
- LOGGER.info("Setting up version.", version=version)
- LOGGER.info("Fetching download URL from Evergreen.")
+ self.logger.info("Setting up version. version=%s", version)
+ self.logger.info("Fetching download URL from Evergreen.")
try:
self.platform = infer_platform(self.edition,
@@ -197,18 +219,18 @@ def execute(self):
if self.use_latest:
urls_info = self.get_latest_urls(version)
if self.use_latest and not urls_info.urls:
- LOGGER.warning("Latest URL is not available, falling back"
- " to getting the URL from 'mongodb-mongo-master'"
- " project preceding the merge-base commit.")
- merge_base_revision = get_merge_base_commit(version)
+ self.logger.warning("Latest URL is not available, falling back"
+ " to getting the URL from 'mongodb-mongo-master'"
+ " project preceding the merge-base commit.")
+ merge_base_revision = get_merge_base_commit(version, self.logger)
urls_info = self.get_latest_urls("master", merge_base_revision)
if not urls_info.urls:
- LOGGER.warning("Latest URL is not available or not requested,"
- " falling back to getting the URL for a specific"
- " version.")
+ self.logger.warning("Latest URL is not available or not requested,"
+ " falling back to getting the URL for a specific"
+ " version.")
urls_info = self.get_urls(version, self.variant)
if not urls_info:
- LOGGER.error("URL is not available for the version.", version=version)
+ self.logger.error("URL is not available for the version. version=%s", version)
exit(1)
urls = urls_info.urls
@@ -219,21 +241,21 @@ def execute(self):
# Give each version a unique install dir
install_dir = os.path.join(self.install_dir, version)
- self.download_and_extract_from_urls(urls, bin_suffix, install_dir)
+ self.download_and_extract_from_urls(self, urls, bin_suffix, install_dir)
except (github_conn.GithubConnError, evergreen_conn.EvergreenConnError,
download.DownloadError) as ex:
- LOGGER.error(ex)
+ self.logger.error(ex)
exit(1)
else:
- LOGGER.info("Setup version completed.", version=version)
- LOGGER.info("-" * 50)
+ self.logger.info("Setup version completed. version=%s", version)
+ self.logger.info("-" * 50)
if self._is_windows:
- self._write_windows_install_paths(self._windows_bin_install_dirs)
+ self._write_windows_install_paths(self, self._windows_bin_install_dirs)
if self.evg_versions_file:
- self._write_evg_versions_file(self.evg_versions_file, downloaded_versions)
+ self._write_evg_versions_file(self, self.evg_versions_file, downloaded_versions)
def download_and_extract_from_urls(self, urls, bin_suffix, install_dir):
"""Download and extract values indicated in `urls`."""
@@ -269,22 +291,21 @@ def download_and_extract_from_urls(self, urls, bin_suffix, install_dir):
install_dir, bin_suffix, link_dir=self.link_dir,
install_dir_list=self._windows_bin_install_dirs)
- @staticmethod
- def _write_windows_install_paths(paths):
+ def _write_windows_install_paths(self, paths):
with open(config.WINDOWS_BIN_PATHS_FILE, "a") as out:
if os.stat(config.WINDOWS_BIN_PATHS_FILE).st_size > 0:
out.write(os.pathsep)
out.write(os.pathsep.join(paths))
- LOGGER.info(f"Finished writing binary paths on Windows to {config.WINDOWS_BIN_PATHS_FILE}")
+ self.logger.info("Finished writing binary paths on Windows to %s",
+ config.WINDOWS_BIN_PATHS_FILE)
- @staticmethod
- def _write_evg_versions_file(file_name: str, versions: List[str]):
+ def _write_evg_versions_file(self, file_name: str, versions: List[str]):
with open(file_name, "a") as out:
out.write("\n".join(versions))
- LOGGER.info(
- f"Finished writing downloaded Evergreen versions to {os.path.abspath(file_name)}")
+ self.logger.info("Finished writing downloaded Evergreen versions to %s",
+ os.path.abspath(file_name))
def get_latest_urls(self, version: str,
start_from_revision: Optional[str] = None) -> EvgURLInfo:
@@ -308,14 +329,14 @@ def get_latest_urls(self, version: str,
return EvgURLInfo()
buildvariant_name = self.get_buildvariant_name(version)
- LOGGER.debug("Found buildvariant.", buildvariant_name=buildvariant_name)
+ self.logger.debug("Found buildvariant. buildvariant_name=%s", buildvariant_name)
found_start_revision = start_from_revision is None
for evg_version in chain(iter([evg_version]), evg_versions):
# Skip all versions until we get the revision we should start looking from
if found_start_revision is False and evg_version.revision != start_from_revision:
- LOGGER.warning("Skipping evergreen version.", evg_version=evg_version)
+ self.logger.warning("Skipping evergreen version. evg_version=%s", evg_version)
continue
else:
found_start_revision = True
@@ -341,14 +362,15 @@ def get_urls(self, version: str, buildvariant_name: Optional[str] = None) -> Evg
if evg_version is None:
git_tag, commit_hash = github_conn.get_git_tag_and_commit(self.github_oauth_token,
version)
- LOGGER.info("Found git attributes.", git_tag=git_tag, commit_hash=commit_hash)
+ self.logger.info("Found git attributes. git_tag=%s, commit_hash=%s", git_tag,
+ commit_hash)
evg_version = evergreen_conn.get_evergreen_version(self.evg_api, commit_hash)
if evg_version is None:
return EvgURLInfo()
if not buildvariant_name:
evg_project = evg_version.project_identifier
- LOGGER.debug("Found evergreen project.", evergreen_project=evg_project)
+ self.logger.debug("Found evergreen project. evergreen_project=%s", evg_project)
try:
major_minor_version = re.findall(r"\d+\.\d+", evg_project)[-1]
@@ -356,7 +378,7 @@ def get_urls(self, version: str, buildvariant_name: Optional[str] = None) -> Evg
major_minor_version = "master"
buildvariant_name = self.get_buildvariant_name(major_minor_version)
- LOGGER.debug("Found buildvariant.", buildvariant_name=buildvariant_name)
+ self.logger.debug("Found buildvariant. buildvariant_name=%s", buildvariant_name)
if buildvariant_name not in evg_version.build_variants_map:
raise ValueError(
@@ -369,8 +391,7 @@ def get_urls(self, version: str, buildvariant_name: Optional[str] = None) -> Evg
return EvgURLInfo(urls=urls, evg_version_id=evg_version.version_id)
- @staticmethod
- def setup_mongodb(artifacts_url, binaries_url, symbols_url, python_venv_url, install_dir,
+ def setup_mongodb(self, artifacts_url, binaries_url, symbols_url, python_venv_url, install_dir,
bin_suffix=None, link_dir=None, install_dir_list=None):
"""Download, extract and symlink."""
@@ -385,8 +406,8 @@ def try_download(download_url):
try:
try_download(url)
except Exception as err: # pylint: disable=broad-except
- LOGGER.warning("Setting up tarball failed with error, retrying once...",
- error=err)
+ self.logger.warning(
+ "Setting up tarball failed with error, retrying once... error=%s", err)
time.sleep(1)
try_download(url)
@@ -397,7 +418,7 @@ def try_download(download_url):
if not is_windows():
link_dir = download.symlink_version(bin_suffix, install_dir, link_dir)
else:
- LOGGER.info(
+ self.logger.info(
"Linking to install_dir on Windows; executable have to live in different working"
" directories to avoid DLLs for different versions clobbering each other")
link_dir = download.symlink_version(bin_suffix, install_dir, None)
@@ -450,7 +471,7 @@ def parse(self, subcommand, parser, parsed_args, **kwargs):
install_last_continuous=args.install_last_continuous, download_options=download_options,
evergreen_config=args.evergreen_config, github_oauth_token=args.github_oauth_token,
ignore_failed_push=(not args.require_push), evg_versions_file=args.evg_versions_file,
- debug=args.debug)
+ debug=args.debug, logger=SetupMultiversion.setup_logger(parsed_args.debug))
@classmethod
def _add_args_to_parser(cls, parser):
diff --git a/buildscripts/resmokelib/sighandler.py b/buildscripts/resmokelib/sighandler.py
index 5df67812d06cc..609ea8a6e3e5f 100644
--- a/buildscripts/resmokelib/sighandler.py
+++ b/buildscripts/resmokelib/sighandler.py
@@ -10,10 +10,10 @@
import psutil
+from buildscripts.resmokelib.flags import HANG_ANALYZER_CALLED
from buildscripts.resmokelib import reportfile
from buildscripts.resmokelib import testing
from buildscripts.resmokelib import config
-from buildscripts.resmokelib.hang_analyzer import hang_analyzer
from buildscripts.resmokelib import parser
_IS_WINDOWS = (sys.platform == "win32")
@@ -32,8 +32,8 @@ def _handle_sigusr1(signum, frame): # pylint: disable=unused-argument
log suite summaries.
"""
+ HANG_ANALYZER_CALLED.set()
header_msg = "Dumping stacks due to SIGUSR1 signal"
-
_dump_and_log(header_msg)
def _handle_set_event(event_handle):
@@ -53,6 +53,7 @@ def _handle_set_event(event_handle):
except win32event.error as err:
logger.error("Exception from win32event.WaitForSingleObject with error: %s" % err)
else:
+ HANG_ANALYZER_CALLED.set()
header_msg = "Dumping stacks due to signal from win32event.SetEvent"
_dump_and_log(header_msg)
@@ -159,4 +160,26 @@ def _analyze_pids(logger, pids):
if not os.getenv('ASAN_OPTIONS'):
hang_analyzer_args.append('-c')
_hang_analyzer = parser.parse_command_line(hang_analyzer_args, logger=logger)
- _hang_analyzer.execute()
+
+ # Evergreen has a 15 minute timeout for task timeout commands
+ # Limit the hang analyzer to 12 minutes so there is time for other tasks.
+ hang_analyzer_hard_timeout = None
+ if config.EVERGREEN_TASK_ID:
+ hang_analyzer_hard_timeout = 60 * 12
+ logger.info(
+ "Limit the resmoke invoked hang analyzer to 12 minutes so there is time for resmoke to finish up."
+ )
+
+ hang_analyzer_thread = threading.Thread(target=_hang_analyzer.execute, daemon=True)
+ hang_analyzer_thread.start()
+ hang_analyzer_thread.join(hang_analyzer_hard_timeout)
+
+ if hang_analyzer_thread.is_alive():
+ logger.warning(
+ "Resmoke invoked hang analyzer thread did not finish, but will continue running in the background. The thread may be disruputed and may show extraneous output."
+ )
+ logger.warning("Cleaning up resmoke child processes so that resmoke can fail gracefully.")
+ _hang_analyzer.kill_rogue_processes()
+
+ else:
+ logger.info("Done running resmoke invoked hang analyzer thread.")
diff --git a/buildscripts/resmokelib/suitesconfig.py b/buildscripts/resmokelib/suitesconfig.py
index 0fe935b05aaf1..a6ea326288b95 100644
--- a/buildscripts/resmokelib/suitesconfig.py
+++ b/buildscripts/resmokelib/suitesconfig.py
@@ -2,6 +2,7 @@
import collections
import copy
import os
+import pathlib
from threading import Lock
from typing import Dict, List
@@ -128,7 +129,7 @@ def _make_suite_roots(files):
def _get_suite_config(suite_name_or_path):
"""Attempt to read YAML configuration from 'suite_path' for the suite."""
- return SuiteFinder.get_config_obj(suite_name_or_path)
+ return SuiteFinder.get_config_obj_no_verify(suite_name_or_path)
def generate():
@@ -139,7 +140,7 @@ class SuiteConfigInterface:
"""Interface for suite configs."""
@classmethod
- def get_config_obj(cls, suite_name):
+ def get_config_obj_no_verify(cls, suite_name):
"""Get the config object given the suite name, which can be a path."""
pass
@@ -161,7 +162,7 @@ class ExplicitSuiteConfig(SuiteConfigInterface):
_named_suites = {}
@classmethod
- def get_config_obj(cls, suite_name):
+ def get_config_obj_no_verify(cls, suite_name):
"""Get the suite config object in the given file."""
if suite_name in cls.get_named_suites():
# Check if is a named suite first for efficiency.
@@ -226,36 +227,39 @@ def get_suites_dir():
return os.path.join(_config.CONFIG_DIR, "matrix_suites")
@classmethod
- def get_config_obj(cls, suite_name):
+ def get_config_obj_and_verify(cls, suite_name):
"""Get the suite config object in the given file and verify it matches the generated file."""
- config = cls._get_config_obj_no_verify(suite_name)
+ config = cls.get_config_obj_no_verify(suite_name)
if not config:
return None
- # TODO: SERVER-75688 add validation back
- # generated_path = cls.get_generated_suite_path(suite_name)
- # if not os.path.exists(generated_path):
- # raise errors.InvalidMatrixSuiteError(
- # f"No generated suite file was found for {suite_name}" +
- # "To (re)generate the matrix suite files use `python3 buildscripts/resmoke.py generate-matrix-suites`"
- # )
-
- # new_text = cls.generate_matrix_suite_text(suite_name)
- # with open(generated_path, "r") as file:
- # old_text = file.read()
- # if new_text != old_text:
- # raise errors.InvalidMatrixSuiteError(
- # f"The generated file found on disk did not match the mapping file for {suite_name}. "
- # +
- # "To (re)generate the matrix suite files use `python3 buildscripts/resmoke.py generate-matrix-suites`"
- # )
+ generated_path = cls.get_generated_suite_path(suite_name)
+ if not os.path.exists(generated_path):
+ raise errors.InvalidMatrixSuiteError(
+ f"No generated suite file was found for {suite_name}" +
+ "To (re)generate the matrix suite files use `python3 buildscripts/resmoke.py generate-matrix-suites`"
+ )
+
+ new_text = cls.generate_matrix_suite_text(suite_name)
+ with open(generated_path, "r") as file:
+ old_text = file.read()
+ if new_text != old_text:
+ loggers.ROOT_EXECUTOR_LOGGER.error("Generated file on disk:")
+ loggers.ROOT_EXECUTOR_LOGGER.error(old_text)
+ loggers.ROOT_EXECUTOR_LOGGER.error("Generated text from mapping file:")
+ loggers.ROOT_EXECUTOR_LOGGER.error(new_text)
+ raise errors.InvalidMatrixSuiteError(
+ f"The generated file found on disk did not match the mapping file for {suite_name}. "
+ +
+ "To (re)generate the matrix suite files use `python3 buildscripts/resmoke.py generate-matrix-suites`"
+ )
return config
@classmethod
- def _get_config_obj_no_verify(cls, suite_name):
+ def get_config_obj_no_verify(cls, suite_name):
"""Get the suite config object in the given file."""
suites_dir = cls.get_suites_dir()
matrix_suite = cls.parse_mappings_file(suites_dir, suite_name)
@@ -275,7 +279,7 @@ def process_overrides(cls, suite, overrides, suite_name):
eval_names = suite.get("eval", None)
description = suite.get("description")
- base_suite = ExplicitSuiteConfig.get_config_obj(base_suite_name)
+ base_suite = ExplicitSuiteConfig.get_config_obj_no_verify(base_suite_name)
if base_suite is None:
raise ValueError(f"Unknown base suite {base_suite_name} for matrix suite {suite_name}")
@@ -412,12 +416,14 @@ def generate_matrix_suite_text(cls, suite_name):
if os.path.exists(path):
mapping_path = path
- matrix_suite = cls._get_config_obj_no_verify(suite_name)
+ matrix_suite = cls.get_config_obj_no_verify(suite_name)
if not matrix_suite or not mapping_path:
print(f"Could not find mappings file for {suite_name}")
return None
+ # This path needs to output the same text on both windows and linux/mac
+ mapping_path = pathlib.PurePath(mapping_path)
yml = yaml.safe_dump(matrix_suite)
comments = [
"##########################################################",
@@ -425,7 +431,7 @@ def generate_matrix_suite_text(cls, suite_name):
"# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE",
"# AND REGENERATE THE MATRIX SUITES.",
"#",
- f"# matrix suite mapping file: {mapping_path}",
+ f"# matrix suite mapping file: {mapping_path.as_posix()}",
"# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites",
"##########################################################",
]
@@ -451,10 +457,10 @@ class SuiteFinder(object):
"""Utility/Factory class for getting polymorphic suite classes given a directory."""
@staticmethod
- def get_config_obj(suite_path):
+ def get_config_obj_no_verify(suite_path):
"""Get the suite config object in the given file."""
- explicit_suite = ExplicitSuiteConfig.get_config_obj(suite_path)
- matrix_suite = MatrixSuiteConfig.get_config_obj(suite_path)
+ explicit_suite = ExplicitSuiteConfig.get_config_obj_no_verify(suite_path)
+ matrix_suite = MatrixSuiteConfig.get_config_obj_no_verify(suite_path)
if not (explicit_suite or matrix_suite):
raise errors.SuiteNotFound("Unknown suite '%s'" % suite_path)
diff --git a/buildscripts/resmokelib/symbolizer/__init__.py b/buildscripts/resmokelib/symbolizer/__init__.py
index 4381fa97a88bb..60acf7f8cd96a 100644
--- a/buildscripts/resmokelib/symbolizer/__init__.py
+++ b/buildscripts/resmokelib/symbolizer/__init__.py
@@ -115,6 +115,7 @@ def _get_compile_artifacts(self):
urlinfo = self.multiversion_setup.get_urls(version=version_id,
buildvariant_name=buildvariant_name)
+ self.logger.info("Found urls to download and extract %s", urlinfo.urls)
self.multiversion_setup.download_and_extract_from_urls(urlinfo.urls, bin_suffix=None,
install_dir=self.dest_dir)
diff --git a/buildscripts/resmokelib/testing/fixtures/_builder.py b/buildscripts/resmokelib/testing/fixtures/_builder.py
index 429d2253eaa03..7fa3fe7543481 100644
--- a/buildscripts/resmokelib/testing/fixtures/_builder.py
+++ b/buildscripts/resmokelib/testing/fixtures/_builder.py
@@ -1,13 +1,9 @@
"""Utilities for constructing fixtures that may span multiple versions."""
-import io
import logging
-import os
import threading
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple, Type
-from git import Repo
-
import buildscripts.resmokelib.config as config
import buildscripts.resmokelib.utils.registry as registry
from buildscripts.resmokelib import errors
@@ -24,12 +20,11 @@
FIXTURE_DIR = "buildscripts/resmokelib/testing/fixtures"
RETRIEVE_DIR = "build/multiversionfixtures"
RETRIEVE_LOCK = threading.Lock()
-MULTIVERSION_CLASS_SUFFIX = "_multiversion_class_suffix"
_BUILDERS = {} # type: ignore
-def make_fixture(class_name, logger, job_num, *args, **kwargs):
+def make_fixture(class_name, logger, job_num, *args, enable_feature_flags=True, **kwargs):
"""Provide factory function for creating Fixture instances."""
fixturelib = FixtureLib()
@@ -43,7 +38,7 @@ def make_fixture(class_name, logger, job_num, *args, **kwargs):
# Special case MongoDFixture or _MongosFixture for now since we only add one option.
# If there's more logic, we should add a builder class for them.
- if class_name in ["MongoDFixture", "_MongoSFixture"]:
+ if class_name in ["MongoDFixture", "_MongoSFixture"] and enable_feature_flags:
return _FIXTURES[class_name](logger, job_num, fixturelib, *args,
add_feature_flags=bool(config.ENABLED_FEATURE_FLAGS), **kwargs)
@@ -162,7 +157,7 @@ def build_fixture(self, logger: logging.Logger, job_num: int, fixturelib: Type[F
self._mutate_kwargs(kwargs)
mixed_bin_versions, old_bin_version = _extract_multiversion_options(kwargs)
self._validate_multiversion_options(kwargs, mixed_bin_versions)
- mongod_classes, mongod_executables, mongod_binary_versions = self._get_mongod_assets(
+ mongod_class, mongod_executables, mongod_binary_versions = self._get_mongod_assets(
kwargs, mixed_bin_versions, old_bin_version)
replset = _FIXTURES[self.REGISTERED_NAME](logger, job_num, fixturelib, *args, **kwargs)
@@ -180,7 +175,7 @@ def build_fixture(self, logger: logging.Logger, job_num: int, fixturelib: Type[F
return replset
for node_index in range(replset.num_nodes):
- node = self._new_mongod(replset, node_index, mongod_executables, mongod_classes,
+ node = self._new_mongod(replset, node_index, mongod_executables, mongod_class,
mongod_binary_versions[node_index], is_multiversion)
replset.install_mongod(node)
@@ -188,7 +183,7 @@ def build_fixture(self, logger: logging.Logger, job_num: int, fixturelib: Type[F
if not replset.initial_sync_node:
replset.initial_sync_node_idx = replset.num_nodes
replset.initial_sync_node = self._new_mongod(replset, replset.initial_sync_node_idx,
- mongod_executables, mongod_classes,
+ mongod_executables, mongod_class,
BinVersionEnum.NEW, is_multiversion)
return replset
@@ -240,19 +235,13 @@ def _get_mongod_assets(
and the list of binary versions
"""
executables = {BinVersionEnum.NEW: kwargs["mongod_executable"]}
- classes = {BinVersionEnum.NEW: cls.LATEST_MONGOD_CLASS}
+ _class = cls.LATEST_MONGOD_CLASS
# Default to NEW for all bin versions; may be overridden below.
binary_versions = [BinVersionEnum.NEW for _ in range(kwargs["num_nodes"])]
if mixed_bin_versions is not None:
from buildscripts.resmokelib import multiversionconstants
- old_shell_version = {
- config.MultiversionOptions.LAST_LTS:
- multiversionconstants.LAST_LTS_MONGO_BINARY,
- config.MultiversionOptions.LAST_CONTINUOUS:
- multiversionconstants.LAST_CONTINUOUS_MONGO_BINARY,
- }[old_bin_version]
old_mongod_version = {
config.MultiversionOptions.LAST_LTS:
@@ -262,13 +251,9 @@ def _get_mongod_assets(
}[old_bin_version]
executables[BinVersionEnum.OLD] = old_mongod_version
- classes[BinVersionEnum.OLD] = f"{cls.LATEST_MONGOD_CLASS}{MULTIVERSION_CLASS_SUFFIX}"
binary_versions = [x for x in mixed_bin_versions]
- load_version(version_path_suffix=MULTIVERSION_CLASS_SUFFIX,
- shell_path=old_shell_version)
-
- return classes, executables, binary_versions
+ return _class, executables, binary_versions
@staticmethod
def _get_fcv(is_multiversion: bool, old_bin_version: Optional[str]) -> str:
@@ -293,7 +278,7 @@ def _get_fcv(is_multiversion: bool, old_bin_version: Optional[str]) -> str:
@staticmethod
def _new_mongod(replset: ReplicaSetFixture, replset_node_index: int,
- executables: Dict[str, str], classes: Dict[str, str], cur_version: str,
+ executables: Dict[str, str], _class: str, cur_version: str,
is_multiversion: bool) -> FixtureContainer:
"""Make a fixture container with configured mongod fixture(s) in it.
@@ -303,7 +288,7 @@ def _new_mongod(replset: ReplicaSetFixture, replset_node_index: int,
:param replset: replica set fixture
:param replset_node_index: the index of node in replica set
:param executables: dict with a new and the old (if multiversion) mongod executable names
- :param classes: dict with a new and the old (if multiversion) mongod fixture names
+ :param _class: str with the mongod fixture name
:param cur_version: old or new version
:param is_multiversion: whether we are in multiversion mode
:return: fixture container with configured mongod fixture(s) in it
@@ -315,10 +300,11 @@ def _new_mongod(replset: ReplicaSetFixture, replset_node_index: int,
old_fixture = None
if is_multiversion:
- old_fixture = make_fixture(classes[BinVersionEnum.OLD], mongod_logger, replset.job_num,
- mongod_executable=executables[BinVersionEnum.OLD],
- mongod_options=mongod_options,
- preserve_dbpath=replset.preserve_dbpath)
+ # We do not run old versions with feature flags enabled
+ old_fixture = make_fixture(
+ _class, mongod_logger, replset.job_num, enable_feature_flags=False,
+ mongod_executable=executables[BinVersionEnum.OLD], mongod_options=mongod_options,
+ preserve_dbpath=replset.preserve_dbpath)
# Assign the same port for old and new fixtures so upgrade/downgrade can be done without
# changing the replicaset config.
@@ -326,7 +312,7 @@ def _new_mongod(replset: ReplicaSetFixture, replset_node_index: int,
new_fixture_mongod_options = replset.get_options_for_mongod(replset_node_index)
- new_fixture = make_fixture(classes[BinVersionEnum.NEW], mongod_logger, replset.job_num,
+ new_fixture = make_fixture(_class, mongod_logger, replset.job_num,
mongod_executable=executables[BinVersionEnum.NEW],
mongod_options=new_fixture_mongod_options,
preserve_dbpath=replset.preserve_dbpath, port=new_fixture_port)
@@ -343,40 +329,6 @@ def get_package_name(dir_path: str) -> str:
return dir_path.replace('/', '.').replace("\\", ".")
-def load_version(version_path_suffix=None, shell_path=None):
- """Load the last_lts/last_continuous fixtures."""
- with RETRIEVE_LOCK, registry.suffix(version_path_suffix):
- # Only one thread needs to retrieve the fixtures.
- retrieve_dir = os.path.relpath(os.path.join(RETRIEVE_DIR, version_path_suffix))
- if not os.path.exists(retrieve_dir):
- try:
- # Avoid circular import
- import buildscripts.resmokelib.run.generate_multiversion_exclude_tags as gen_tests
- commit = gen_tests.get_backports_required_hash_for_shell_version(
- mongo_shell_path=shell_path)
- except FileNotFoundError as err:
- print("Error running the mongo shell, please ensure it's in your $PATH: ", err)
- raise
- retrieve_fixtures(retrieve_dir, commit)
-
- package_name = get_package_name(retrieve_dir)
- autoloader.load_all_modules(name=package_name, path=[retrieve_dir]) # type: ignore
-
-
-def retrieve_fixtures(directory, commit):
- """Populate a directory with the fixture files corresponding to a commit."""
- repo = Repo(MONGO_REPO_LOCATION)
- real_commit = repo.commit(commit)
- tree = real_commit.tree / FIXTURE_DIR
-
- os.makedirs(directory, exist_ok=True)
-
- for blob in tree.blobs:
- output = os.path.join(directory, blob.name)
- with io.BytesIO(blob.data_stream.read()) as retrieved, open(output, "w") as file:
- file.write(retrieved.read().decode("utf-8"))
-
-
class ShardedClusterBuilder(FixtureBuilder):
"""Builder class for sharded cluster fixtures."""
@@ -395,8 +347,8 @@ def build_fixture(self, logger: logging.Logger, job_num: int, fixturelib: Type[F
self._mutate_kwargs(kwargs)
mixed_bin_versions, old_bin_version = _extract_multiversion_options(kwargs)
self._validate_multiversion_options(kwargs, mixed_bin_versions)
- mongos_classes, mongos_executables = self._get_mongos_assets(kwargs, mixed_bin_versions,
- old_bin_version)
+ mongos_class, mongos_executables = self._get_mongos_assets(kwargs, mixed_bin_versions,
+ old_bin_version)
sharded_cluster = _FIXTURES[self.REGISTERED_NAME](logger, job_num, fixturelib, *args,
**kwargs)
@@ -408,16 +360,16 @@ def build_fixture(self, logger: logging.Logger, job_num: int, fixturelib: Type[F
rs_shard_index, kwargs["num_rs_nodes_per_shard"])
sharded_cluster.install_rs_shard(rs_shard)
- catalog_shard = kwargs["catalog_shard"]
+ config_shard = kwargs["config_shard"]
config_svr = None
- if catalog_shard is None:
+ if config_shard is None:
config_svr = self._new_configsvr(sharded_cluster, is_multiversion, old_bin_version)
else:
- config_svr = sharded_cluster.shards[catalog_shard]
+ config_svr = sharded_cluster.shards[config_shard]
sharded_cluster.install_configsvr(config_svr)
for mongos_index in range(kwargs["num_mongos"]):
- mongos = self._new_mongos(sharded_cluster, mongos_executables, mongos_classes,
+ mongos = self._new_mongos(sharded_cluster, mongos_executables, mongos_class,
mongos_index, kwargs["num_mongos"], is_multiversion)
sharded_cluster.install_mongos(mongos)
@@ -445,9 +397,9 @@ def _mutate_kwargs(kwargs: Dict[str, Any]) -> None:
config.DEFAULT_MONGOS_EXECUTABLE)
kwargs["mongos_executable"] = mongos_executable
- catalog_shard = pick_catalog_shard_node(
- kwargs.pop("catalog_shard", config.CATALOG_SHARD), num_shards)
- kwargs["catalog_shard"] = catalog_shard
+ config_shard = pick_catalog_shard_node(
+ kwargs.pop("config_shard", config.CONFIG_SHARD), num_shards)
+ kwargs["config_shard"] = config_shard
@staticmethod
def _validate_multiversion_options(kwargs: Dict[str, Any],
@@ -478,16 +430,10 @@ def _get_mongos_assets(cls, kwargs: Dict[str, Any], mixed_bin_versions: Optional
"""
executables = {BinVersionEnum.NEW: kwargs["mongos_executable"]}
- classes = {BinVersionEnum.NEW: cls.LATEST_MONGOS_CLASS}
+ _class = cls.LATEST_MONGOS_CLASS
if mixed_bin_versions is not None:
from buildscripts.resmokelib import multiversionconstants
- old_shell_version = {
- config.MultiversionOptions.LAST_LTS:
- multiversionconstants.LAST_LTS_MONGO_BINARY,
- config.MultiversionOptions.LAST_CONTINUOUS:
- multiversionconstants.LAST_CONTINUOUS_MONGO_BINARY,
- }[old_bin_version]
old_mongos_version = {
config.MultiversionOptions.LAST_LTS:
@@ -497,12 +443,7 @@ def _get_mongos_assets(cls, kwargs: Dict[str, Any], mixed_bin_versions: Optional
}[old_bin_version]
executables[BinVersionEnum.OLD] = old_mongos_version
- classes[BinVersionEnum.OLD] = f"{cls.LATEST_MONGOS_CLASS}{MULTIVERSION_CLASS_SUFFIX}"
-
- load_version(version_path_suffix=MULTIVERSION_CLASS_SUFFIX,
- shell_path=old_shell_version)
-
- return classes, executables
+ return _class, executables
@staticmethod
def _new_configsvr(sharded_cluster: ShardedClusterFixture, is_multiversion: bool,
@@ -556,7 +497,7 @@ def _new_rs_shard(sharded_cluster: ShardedClusterFixture,
@staticmethod
def _new_mongos(sharded_cluster: ShardedClusterFixture, executables: Dict[str, str],
- classes: Dict[str, str], mongos_index: int, total: int,
+ _class: str, mongos_index: int, total: int,
is_multiversion: bool) -> FixtureContainer:
"""Make a fixture container with configured mongos fixture(s) in it.
@@ -565,7 +506,7 @@ def _new_mongos(sharded_cluster: ShardedClusterFixture, executables: Dict[str, s
:param sharded_cluster: sharded cluster fixture we are configuring mongos for
:param executables: dict with a new and the old (if multiversion) mongos executable names
- :param classes: dict with a new and the old (if multiversion) mongos fixture names
+ :param _class: str with the mongos fixture name
:param mongos_index: the index of mongos
:param total: total number of mongos
:param is_multiversion: whether we are in multiversion mode
@@ -578,15 +519,16 @@ def _new_mongos(sharded_cluster: ShardedClusterFixture, executables: Dict[str, s
old_fixture = None
if is_multiversion:
+ # We do not run old versions with feature flags enabled
old_fixture = make_fixture(
- classes[BinVersionEnum.OLD], mongos_logger, sharded_cluster.job_num,
+ _class, mongos_logger, sharded_cluster.job_num, enable_feature_flags=False,
mongos_executable=executables[BinVersionEnum.OLD], **mongos_kwargs)
# We can't restart mongos since explicit ports are not supported.
new_fixture_mongos_kwargs = sharded_cluster.get_mongos_kwargs()
- new_fixture = make_fixture(
- classes[BinVersionEnum.NEW], mongos_logger, sharded_cluster.job_num,
- mongos_executable=executables[BinVersionEnum.NEW], **new_fixture_mongos_kwargs)
+ new_fixture = make_fixture(_class, mongos_logger, sharded_cluster.job_num,
+ mongos_executable=executables[BinVersionEnum.NEW],
+ **new_fixture_mongos_kwargs)
# Always spin up an old mongos if in multiversion mode given mongos is the last thing in the update path.
return FixtureContainer(new_fixture, old_fixture,
diff --git a/buildscripts/resmokelib/testing/fixtures/replicaset.py b/buildscripts/resmokelib/testing/fixtures/replicaset.py
index 61e7d01d7fc57..9ea3d2fa0a6fe 100644
--- a/buildscripts/resmokelib/testing/fixtures/replicaset.py
+++ b/buildscripts/resmokelib/testing/fixtures/replicaset.py
@@ -47,7 +47,7 @@ def __init__(self, logger, job_num, fixturelib, mongod_executable=None, mongod_o
replset_config_options=None, voting_secondaries=True, all_nodes_electable=False,
use_replica_set_connection_string=None, linear_chain=False,
default_read_concern=None, default_write_concern=None, shard_logging_prefix=None,
- replicaset_logging_prefix=None, replset_name=None):
+ replicaset_logging_prefix=None, replset_name=None, config_shard=None):
"""Initialize ReplicaSetFixture."""
interface.ReplFixture.__init__(self, logger, job_num, fixturelib,
@@ -110,6 +110,7 @@ def __init__(self, logger, job_num, fixturelib, mongod_executable=None, mongod_o
self.replset_name = self.mongod_options.setdefault("replSet", "rs")
self.initial_sync_node = None
self.initial_sync_node_idx = -1
+ self.config_shard = config_shard
def setup(self):
"""Set up the replica set."""
diff --git a/buildscripts/resmokelib/testing/fixtures/shard_split.py b/buildscripts/resmokelib/testing/fixtures/shard_split.py
index 5be3e390efa0e..e707254b011df 100644
--- a/buildscripts/resmokelib/testing/fixtures/shard_split.py
+++ b/buildscripts/resmokelib/testing/fixtures/shard_split.py
@@ -247,44 +247,46 @@ def add_recipient_nodes(self, recipient_set_name, recipient_tag_name=None):
# Reconfig the donor to add the recipient nodes as non-voting members
donor_client = self._create_client(self.get_donor_rs())
- repl_config = with_naive_retry(lambda: donor_client.admin.command({"replSetGetConfig": 1})[
- "config"])
- repl_members = repl_config["members"]
-
- for recipient_node in recipient_nodes:
- # It is possible for the reconfig below to fail with a retryable error code like
- # 'InterruptedDueToReplStateChange'. In these cases, we need to run the reconfig
- # again, but some or all of the recipient nodes might have already been added to
- # the member list. Only add recipient nodes which have not yet been added on a
- # retry.
- recipient_host = recipient_node.get_internal_connection_string()
- recipient_entry = {
- "host": recipient_host, "votes": 0, "priority": 0, "hidden": True,
- "tags": {recipient_tag_name: str(ObjectId())}
- }
- member_exists = False
- for index, member in enumerate(repl_members):
- if member["host"] == recipient_host:
- repl_members[index] = recipient_entry
- member_exists = True
-
- if not member_exists:
- repl_members.append(recipient_entry)
-
- # Re-index all members from 0
- for idx, member in enumerate(repl_members):
- member["_id"] = idx
-
- # Prepare the new config
- repl_config["version"] = repl_config["version"] + 1
- repl_config["members"] = repl_members
-
- self.logger.info(
- f"Reconfiguring donor replica set to add non-voting recipient nodes: {repl_config}")
- with_naive_retry(lambda: donor_client.admin.command({
- "replSetReconfig": repl_config, "maxTimeMS": self.AWAIT_REPL_TIMEOUT_MINS * 60 * 1000
- }))
-
+ def reconfig_add_node_rs(client):
+ repl_config = client.admin.command({"replSetGetConfig": 1})["config"]
+ repl_members = repl_config["members"]
+
+ for recipient_node in recipient_nodes:
+ # It is possible for the reconfig below to fail with a retryable error code like
+ # 'InterruptedDueToReplStateChange'. In these cases, we need to run the reconfig
+ # again, but some or all of the recipient nodes might have already been added to
+ # the member list. Only add recipient nodes which have not yet been added on a
+ # retry.
+ recipient_host = recipient_node.get_internal_connection_string()
+ recipient_entry = {
+ "host": recipient_host, "votes": 0, "priority": 0, "hidden": True,
+ "tags": {recipient_tag_name: str(ObjectId())}
+ }
+ member_exists = False
+ for index, member in enumerate(repl_members):
+ if member["host"] == recipient_host:
+ repl_members[index] = recipient_entry
+ member_exists = True
+
+ if not member_exists:
+ repl_members.append(recipient_entry)
+
+ # Re-index all members from 0
+ for idx, member in enumerate(repl_members):
+ member["_id"] = idx
+
+ # Prepare the new config
+ repl_config["version"] = repl_config["version"] + 1
+ repl_config["members"] = repl_members
+
+ self.logger.info(
+ f"Reconfiguring donor replica set to add non-voting recipient nodes: {repl_config}")
+ client.admin.command({
+ "replSetReconfig": repl_config,
+ "maxTimeMS": self.AWAIT_REPL_TIMEOUT_MINS * 60 * 1000
+ })
+
+ with_naive_retry(lambda: reconfig_add_node_rs(donor_client))
# Wait for recipient nodes to become secondaries
self._await_recipient_nodes()
@@ -334,30 +336,37 @@ def remove_recipient_nodes(self, recipient_tag_name=None):
self.fixtures = [donor_rs]
donor_client = self._create_client(self.get_donor_rs())
- repl_config = with_naive_retry(lambda: donor_client.admin.command({"replSetGetConfig": 1})[
- "config"])
- repl_members = [
- member for member in repl_config["members"]
- if not 'tags' in member or not recipient_tag_name in member["tags"]
- ]
- # Re-index all members from 0
- for idx, member in enumerate(repl_members):
- member["_id"] = idx
-
- # Prepare the new config
- repl_config["version"] = repl_config["version"] + 1
- repl_config["members"] = repl_members
-
- # It's possible that the recipient config has been removed in a previous remove attempt.
- if "recipientConfig" in repl_config:
- del repl_config["recipientConfig"]
-
- self.logger.info(
- f"Reconfiguring donor '{donor_rs_name}' to remove recipient nodes: {repl_config}")
- with_naive_retry(lambda: donor_client.admin.command({
- "replSetReconfig": repl_config, "maxTimeMS": self.AWAIT_REPL_TIMEOUT_MINS * 60 * 1000
- }))
+ def reconfig_rs(client):
+ repl_config = client.admin.command({"replSetGetConfig": 1})["config"]
+ repl_members = [
+ member for member in repl_config["members"]
+ if not 'tags' in member or not recipient_tag_name in member["tags"]
+ ]
+
+ if "recipientConfig" in repl_config:
+ del repl_config["recipientConfig"]
+ elif repl_members == repl_config["members"]:
+ # The recipientConfig and recipient nodes have already been cleaned, no need to
+ # reconfig.
+ return
+
+ # Re-index all members from 0
+ for idx, member in enumerate(repl_members):
+ member["_id"] = idx
+
+ # Prepare the new config
+ repl_config["version"] = repl_config["version"] + 1
+ repl_config["members"] = repl_members
+
+ self.logger.info(
+ f"Reconfiguring donor '{donor_rs_name}' to remove recipient nodes: {repl_config}")
+ donor_client.admin.command({
+ "replSetReconfig": repl_config,
+ "maxTimeMS": self.AWAIT_READY_TIMEOUT_SECS * 60 * 1000
+ })
+
+ with_naive_retry(func=lambda: reconfig_rs(donor_client))
self.logger.info("Tearing down recipient nodes and removing data directories.")
for recipient_node in reversed(recipient_nodes):
diff --git a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
index ba2e27099e10c..a561c44f471ba 100644
--- a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
+++ b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
@@ -24,7 +24,7 @@ def __init__(self, logger, job_num, fixturelib, mongos_executable=None, mongos_o
preserve_dbpath=False, num_shards=1, num_rs_nodes_per_shard=1, num_mongos=1,
enable_sharding=None, enable_balancer=True, auth_options=None,
configsvr_options=None, shard_options=None, cluster_logging_prefix=None,
- catalog_shard=None):
+ config_shard=None):
"""Initialize ShardedClusterFixture with different options for the cluster processes."""
interface.Fixture.__init__(self, logger, job_num, fixturelib, dbpath_prefix=dbpath_prefix)
@@ -42,7 +42,7 @@ def __init__(self, logger, job_num, fixturelib, mongos_executable=None, mongos_o
mongod_options.get("set_parameters", {})).copy()
self.mongod_options["set_parameters"]["migrationLockAcquisitionMaxWaitMS"] = \
self.mongod_options["set_parameters"].get("migrationLockAcquisitionMaxWaitMS", 30000)
- self.catalog_shard = catalog_shard
+ self.config_shard = config_shard
self.preserve_dbpath = preserve_dbpath
self.num_shards = num_shards
self.num_rs_nodes_per_shard = num_rs_nodes_per_shard
@@ -91,7 +91,7 @@ def pids(self):
def setup(self):
"""Set up the sharded cluster."""
- if self.catalog_shard is None:
+ if self.config_shard is None:
self.configsvr.setup()
# Start up each of the shards
@@ -139,11 +139,11 @@ def await_ready(self):
# Turn off the balancer if it is not meant to be enabled.
if not self.enable_balancer:
- self.stop_balancer()
+ self.stop_balancer(join_migrations=False)
# Inform mongos about each of the shards
for idx, shard in enumerate(self.shards):
- self._add_shard(client, shard, self.catalog_shard == idx)
+ self._add_shard(client, shard, self.config_shard == idx)
# Ensure that all CSRS nodes are up to date. This is strictly needed for tests that use
# multiple mongoses. In those cases, the first mongos initializes the contents of the config
@@ -192,13 +192,18 @@ def _await_mongod_sharding_initialization(self):
.format(port, interface.Fixture.AWAIT_READY_TIMEOUT_SECS))
time.sleep(0.1)
- def stop_balancer(self, timeout_ms=60000):
+ # TODO SERVER-76343 remove the join_migrations parameter and the if clause depending on it.
+ def stop_balancer(self, timeout_ms=300000, join_migrations=True):
"""Stop the balancer."""
client = interface.build_client(self, self.auth_options)
client.admin.command({"balancerStop": 1}, maxTimeMS=timeout_ms)
+ if join_migrations:
+ for shard in self.shards:
+ shard_client = interface.build_client(shard.get_primary(), self.auth_options)
+ shard_client.admin.command({"_shardsvrJoinMigrations": 1})
self.logger.info("Stopped the balancer")
- def start_balancer(self, timeout_ms=60000):
+ def start_balancer(self, timeout_ms=300000):
"""Start the balancer."""
client = interface.build_client(self, self.auth_options)
client.admin.command({"balancerStart": 1}, maxTimeMS=timeout_ms)
@@ -329,8 +334,8 @@ def get_rs_shard_kwargs(self, index):
auth_options = shard_options.pop("auth_options", self.auth_options)
preserve_dbpath = shard_options.pop("preserve_dbpath", self.preserve_dbpath)
- replset_config_options = self.fixturelib.make_historic(
- shard_options.pop("replset_config_options", {}))
+ replset_config_options = shard_options.pop("replset_config_options", {})
+ replset_config_options = replset_config_options.copy()
replset_config_options["configsvr"] = False
mongod_options = self.mongod_options.copy()
@@ -340,16 +345,24 @@ def get_rs_shard_kwargs(self, index):
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "shard{}".format(index))
mongod_options["replSet"] = self._SHARD_REPLSET_NAME_PREFIX + str(index)
- if self.catalog_shard == index:
+ if self.config_shard == index:
del mongod_options["shardsvr"]
mongod_options["configsvr"] = ""
replset_config_options["configsvr"] = True
- mongod_options["set_parameters"]["featureFlagCatalogShard"] = "true"
mongod_options["set_parameters"]["featureFlagTransitionToCatalogShard"] = "true"
+ mongod_options["storageEngine"] = "wiredTiger"
configsvr_options = self.configsvr_options.copy()
+
+ if "mongod_options" in configsvr_options:
+ mongod_options = self.fixturelib.merge_mongo_option_dicts(
+ mongod_options, configsvr_options["mongod_options"])
+ if "replset_config_options" in configsvr_options:
+ replset_config_options = self.fixturelib.merge_mongo_option_dicts(
+ replset_config_options, configsvr_options["replset_config_options"])
+
for option, value in configsvr_options.items():
- if option == "num_nodes":
+ if option in ("num_nodes", "mongod_options", "replset_config_options"):
continue
if option in shard_options:
if shard_options[option] != value:
@@ -363,8 +376,8 @@ def get_rs_shard_kwargs(self, index):
return {
"mongod_options": mongod_options, "mongod_executable": self.mongod_executable,
"auth_options": auth_options, "preserve_dbpath": preserve_dbpath,
- "replset_config_options": replset_config_options,
- "shard_logging_prefix": shard_logging_prefix, **shard_options
+ "replset_config_options": replset_config_options, "shard_logging_prefix":
+ shard_logging_prefix, "config_shard": self.config_shard, **shard_options
}
def install_rs_shard(self, rs_shard):
@@ -390,7 +403,7 @@ def install_mongos(self, mongos):
"""Install a mongos. Called by a builder."""
self.mongos.append(mongos)
- def _add_shard(self, client, shard, is_catalog_shard):
+ def _add_shard(self, client, shard, is_config_shard):
"""
Add the specified program as a shard by executing the addShard command.
@@ -398,9 +411,9 @@ def _add_shard(self, client, shard, is_catalog_shard):
"""
connection_string = shard.get_internal_connection_string()
- if is_catalog_shard:
- self.logger.info("Adding %s as catalog shard...", connection_string)
- client.admin.command({"transitionToCatalogShard": 1})
+ if is_config_shard:
+ self.logger.info("Adding %s as config shard...", connection_string)
+ client.admin.command({"transitionFromDedicatedConfigServer": 1})
else:
self.logger.info("Adding %s as a shard...", connection_string)
client.admin.command({"addShard": connection_string})
diff --git a/buildscripts/resmokelib/testing/fixtures/standalone.py b/buildscripts/resmokelib/testing/fixtures/standalone.py
index 9c88a64c45a9e..055f06077c5d1 100644
--- a/buildscripts/resmokelib/testing/fixtures/standalone.py
+++ b/buildscripts/resmokelib/testing/fixtures/standalone.py
@@ -236,6 +236,16 @@ def launch_mongod_program(self, logger, job_num, executable=None, process_kwargs
if self.config.MONGOD_SET_PARAMETERS is not None:
suite_set_parameters.update(yaml.safe_load(self.config.MONGOD_SET_PARAMETERS))
+ # Some storage options are both a mongod option (as in config file option and its equivalent
+ # "--xyz" command line parameter) and a "--setParameter". In case of conflict, for instance
+ # due to the config fuzzer adding "xyz" as a "--setParameter" when the "--xyz" option is
+ # already defined in the suite's YAML, the "--setParameter" value shall be preserved and the
+ # "--xyz" option discarded to avoid hitting an error due to conflicting definitions.
+ mongod_option_and_set_parameter_conflicts = ["syncdelay", "journalCommitInterval"]
+ for key in mongod_option_and_set_parameter_conflicts:
+ if (key in mongod_options and key in suite_set_parameters):
+ del mongod_options[key]
+
# Set default log verbosity levels if none were specified.
if "logComponentVerbosity" not in suite_set_parameters:
suite_set_parameters[
@@ -257,6 +267,15 @@ def launch_mongod_program(self, logger, job_num, executable=None, process_kwargs
and "orphanCleanupDelaySecs" not in suite_set_parameters):
suite_set_parameters["orphanCleanupDelaySecs"] = 1
+ # receiveChunkWaitForRangeDeleterTimeoutMS controls the amount of time an incoming migration
+ # will wait for an intersecting range with data in it to be cleared up before failing. The
+ # default is 10 seconds, but in some slower variants this is not enough time for the range
+ # deleter to finish so we increase it here to 90 seconds. Setting a value for this parameter
+ # in the .yml file overrides this.
+ if (("shardsvr" in mongod_options or "configsvr" in mongod_options)
+ and "receiveChunkWaitForRangeDeleterTimeoutMS" not in suite_set_parameters):
+ suite_set_parameters["receiveChunkWaitForRangeDeleterTimeoutMS"] = 90000
+
# The LogicalSessionCache does automatic background refreshes in the server. This is
# race-y for tests, since tests trigger their own immediate refreshes instead. Turn off
# background refreshing for tests. Set in the .yml file to override this.
diff --git a/buildscripts/resmokelib/testing/hook_test_archival.py b/buildscripts/resmokelib/testing/hook_test_archival.py
index 38909056ce379..23ebfda8de34e 100644
--- a/buildscripts/resmokelib/testing/hook_test_archival.py
+++ b/buildscripts/resmokelib/testing/hook_test_archival.py
@@ -6,6 +6,7 @@
from buildscripts.resmokelib import config
from buildscripts.resmokelib import errors
from buildscripts.resmokelib import utils
+from buildscripts.resmokelib.flags import HANG_ANALYZER_CALLED
from buildscripts.resmokelib.utils import globstar
@@ -105,5 +106,9 @@ def _archive_hook_or_test(self, logger, test_name, test, manager):
else:
logger.info("Archive succeeded for %s: %s", test_name, message)
- if not manager.setup_fixture(logger):
+ if HANG_ANALYZER_CALLED.is_set():
+ logger.info("Hang Analyzer has been called. Fixtures will not be restarted.")
+ raise errors.StopExecution(
+ "Hang analyzer has been called. Stopping further execution of tests.")
+ elif not manager.setup_fixture(logger):
raise errors.StopExecution("Error while restarting test fixtures after archiving.")
diff --git a/buildscripts/resmokelib/testing/hooks/aggregate_metrics_background.py b/buildscripts/resmokelib/testing/hooks/aggregate_metrics_background.py
index d392c6ae0b8f9..c6b415087359a 100644
--- a/buildscripts/resmokelib/testing/hooks/aggregate_metrics_background.py
+++ b/buildscripts/resmokelib/testing/hooks/aggregate_metrics_background.py
@@ -4,70 +4,69 @@
internally sleep for 1 second between runs.
"""
-import os.path
+import pymongo
+import random
-from buildscripts.resmokelib import errors
-from buildscripts.resmokelib.testing.hooks import jsfile
-from buildscripts.resmokelib.testing.hooks.background_job import _BackgroundJob, _ContinuousDynamicJSTestCase
+from buildscripts.resmokelib.testing.hooks.bghook import BGHook
-class AggregateResourceConsumptionMetricsInBackground(jsfile.JSHook):
+class AggregateResourceConsumptionMetricsInBackground(BGHook):
"""A hook to run $operationMetrics stage in the background."""
- IS_BACKGROUND = True
-
def __init__(self, hook_logger, fixture, shell_options=None):
"""Initialize AggregateResourceConsumptionMetricsInBackground."""
- description = "Run background $operationMetrics on all mongods while a test is running"
- js_filename = os.path.join("jstests", "hooks", "run_aggregate_metrics_background.js")
- jsfile.JSHook.__init__(self, hook_logger, fixture, js_filename, description,
- shell_options=shell_options)
- self._background_job = None
-
- def before_suite(self, test_report):
- """Start the background thread."""
- self._background_job = _BackgroundJob("AggregateResourceConsumptionMetricsInBackground")
- self.logger.info("Starting the background aggregate metrics thread.")
- self._background_job.start()
-
- def after_suite(self, test_report, teardown_flag=None):
- """Signal the background aggregate metrics thread to exit, and wait until it does."""
- if self._background_job is None:
- return
-
- self.logger.info("Stopping the background aggregate metrics thread.")
- self._background_job.stop()
-
- def before_test(self, test, test_report):
- """Instruct the background aggregate metrics thread to run while 'test' is also running."""
- if self._background_job is None:
- return
- hook_test_case = _ContinuousDynamicJSTestCase.create_before_test(
- test.logger, test, self, self._js_filename, self._shell_options)
- hook_test_case.configure(self.fixture)
-
- self.logger.info("Resuming the background aggregate metrics thread.")
- self._background_job.resume(hook_test_case, test_report)
-
- def after_test(self, test, test_report): # noqa: D205,D400
- """Instruct the background aggregate metrics thread to stop running now that 'test' has
- finished running.
- """
- if self._background_job is None:
- return
-
- self.logger.info("Pausing the background aggregate metrics thread.")
- self._background_job.pause()
-
- if self._background_job.exc_info is not None:
- if isinstance(self._background_job.exc_info[1], errors.TestFailure):
- # If the mongo shell process running the JavaScript file exited with a non-zero
- # return code, then we raise an errors.ServerFailure exception to cause resmoke.py's
- # test execution to stop.
- raise errors.ServerFailure(self._background_job.exc_info[1].args[0])
- else:
- self.logger.error(
- "Encountered an error inside the background aggregate metrics thread.",
- exc_info=self._background_job.exc_info)
- raise self._background_job.exc_info[1]
+ description = "Run background $operationMetrics on all mongods while a test is running"
+ super().__init__(hook_logger, fixture, description, tests_per_cycle=None,
+ loop_delay_ms=1000)
+
+ def run_action(self):
+ """Collects $operationMetrics on all non-arbiter nodes in the fixture."""
+ for node_info in self.fixture.get_node_info():
+ conn = pymongo.MongoClient(port=node_info.port)
+ # Filter out arbiters.
+ if "arbiterOnly" in conn.admin.command({"isMaster": 1}):
+ self.logger.info(
+ "Skipping background aggregation against test node: %s because it is an " +
+ "arbiter and has no data.", node_info.full_name)
+ return
+
+ # Clear the metrics about 10% of the time.
+ clear_metrics = random.random() < 0.1
+ self.logger.info("Running $operationMetrics with {clearMetrics: %s} on host: %s",
+ clear_metrics, node_info.full_name)
+ with conn.admin.aggregate(
+ [{"$operationMetrics": {"clearMetrics": clear_metrics}}]) as cursor:
+ for doc in cursor:
+ try:
+ self.verify_metrics(doc)
+ except:
+ self.logger.info(
+ "caught exception while verifying that all expected fields are in the" +
+ " metrics output: ", doc)
+ raise
+
+ def verify_metrics(self, doc):
+ """Checks whether the output from $operatiomMetrics has the schema we expect."""
+
+ top_level_fields = [
+ "docBytesWritten", "docUnitsWritten", "idxEntryBytesWritten", "idxEntryUnitsWritten",
+ "totalUnitsWritten", "cpuNanos", "db", "primaryMetrics", "secondaryMetrics"
+ ]
+ read_fields = [
+ "docBytesRead", "docUnitsRead", "idxEntryBytesRead", "idxEntryUnitsRead", "keysSorted",
+ "docUnitsReturned"
+ ]
+
+ for key in top_level_fields:
+ assert key in doc, ("The metrics output is missing the property: " + key)
+
+ primary_metrics = doc["primaryMetrics"]
+ for key in read_fields:
+ assert key in primary_metrics, (
+ "The metrics output is missing the property: primaryMetrics." + key)
+
+ secondary_metrics = doc["secondaryMetrics"]
+ for key in read_fields:
+ assert key in secondary_metrics, (
+ "The metrics output is missing the property: secondaryMetrics." + key)
diff --git a/buildscripts/resmokelib/testing/hooks/background_job.py b/buildscripts/resmokelib/testing/hooks/background_job.py
index b01196ba462ac..2adba99b97e76 100644
--- a/buildscripts/resmokelib/testing/hooks/background_job.py
+++ b/buildscripts/resmokelib/testing/hooks/background_job.py
@@ -3,6 +3,7 @@
import sys
import threading
+from buildscripts.resmokelib import errors
from buildscripts.resmokelib.testing.hooks import jsfile
diff --git a/buildscripts/resmokelib/testing/hooks/bghook.py b/buildscripts/resmokelib/testing/hooks/bghook.py
index b61f37b4e2d8e..1b3fed3b10427 100644
--- a/buildscripts/resmokelib/testing/hooks/bghook.py
+++ b/buildscripts/resmokelib/testing/hooks/bghook.py
@@ -13,11 +13,13 @@ class BGJob(threading.Thread):
BGJob will call 'run_action' without any delay and expects the 'run_action' function to add some form of delay.
"""
- def __init__(self, hook):
+ def __init__(self, hook, loop_delay_ms=None):
"""Initialize the background job."""
threading.Thread.__init__(self, name=f"BGJob-{hook.__class__.__name__}")
+ self._loop_delay_ms = loop_delay_ms
self.daemon = True
self._hook = hook
+ self._interrupt_event = threading.Event()
self.__is_alive = True
self.err = None
@@ -29,6 +31,14 @@ def run(self):
try:
self._hook.run_action()
+ if self._loop_delay_ms is not None:
+ # The configured loop delay asked us to wait before running the action again. Do
+ # that wait, but listen to see if we finish running the test or are killed in
+ # the meantime.
+ interrupted = self._interrupt_event.wait(self._loop_delay_ms / 1000.0)
+ if interrupted:
+ self._hook.logger.info("interrupted")
+ break
except Exception as err: # pylint: disable=broad-except
self._hook.logger.error("Background thread caught exception: %s.", err)
self.err = err
@@ -37,6 +47,7 @@ def run(self):
def kill(self):
"""Kill the background job."""
self.__is_alive = False
+ self._interrupt_event.set()
class BGHook(interface.Hook):
@@ -46,8 +57,13 @@ class BGHook(interface.Hook):
# By default, we continuously run the background hook for the duration of the suite.
DEFAULT_TESTS_PER_CYCLE = math.inf
- def __init__(self, hook_logger, fixture, desc, tests_per_cycle=None):
- """Initialize the background hook."""
+ def __init__(self, hook_logger, fixture, desc, tests_per_cycle=None, loop_delay_ms=None):
+ """
+ Initialize the background hook.
+
+ 'tests_per_cycle' or 'loop_delay_ms' can be used to configure how often the background job
+ is restarted, and how often run_action() is called, respectively.
+ """
interface.Hook.__init__(self, hook_logger, fixture, desc)
self.logger = hook_logger
@@ -57,15 +73,21 @@ def __init__(self, hook_logger, fixture, desc, tests_per_cycle=None):
self._test_num = 0
# The number of tests we execute before restarting the background hook.
self._tests_per_cycle = self.DEFAULT_TESTS_PER_CYCLE if tests_per_cycle is None else tests_per_cycle
+ self._loop_delay_ms = loop_delay_ms
def run_action(self):
- """Perform an action. This function will be called continuously in the BgJob."""
+ """
+ Perform an action. This function will be called continuously in the BgJob.
+
+ If a sleep_delay_ms was given, that many milliseconds of sleep will happen between each
+ invocation.
+ """
raise NotImplementedError
def before_suite(self, test_report):
"""Start the background thread."""
self.logger.info("Starting the background thread.")
- self._background_job = BGJob(self)
+ self._background_job = BGJob(self, self._loop_delay_ms)
self._background_job.start()
def after_suite(self, test_report, teardown_flag=None):
@@ -86,7 +108,7 @@ def before_test(self, test, test_report):
return
self.logger.info("Restarting the background thread.")
- self._background_job = BGJob(self)
+ self._background_job = BGJob(self, self._loop_delay_ms)
self._background_job.start()
def after_test(self, test, test_report):
diff --git a/buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py b/buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py
index e391f8e56644e..1c1caaf3cb553 100644
--- a/buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py
+++ b/buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py
@@ -68,11 +68,6 @@ def run_test(self):
exclude_dbs.append(same_db_name)
self.logger.info("Dropping all databases except for %s", exclude_dbs)
- is_sharded_fixture = isinstance(self._hook.fixture, shardedcluster.ShardedClusterFixture)
- # Stop the balancer.
- if is_sharded_fixture and self._hook.fixture.enable_balancer:
- self._hook.fixture.stop_balancer()
-
for db_name in [db for db in db_names if db not in exclude_dbs]:
self.logger.info("Dropping database %s", db_name)
try:
@@ -93,7 +88,3 @@ def run_test(self):
self.logger.exception("Encountered an error while dropping db % collection %s.",
same_db_name, coll)
raise
-
- # Start the balancer.
- if is_sharded_fixture and self._hook.fixture.enable_balancer:
- self._hook.fixture.start_balancer()
diff --git a/buildscripts/resmokelib/testing/hooks/configshard_transition.py b/buildscripts/resmokelib/testing/hooks/configshard_transition.py
new file mode 100644
index 0000000000000..1b66933de28e5
--- /dev/null
+++ b/buildscripts/resmokelib/testing/hooks/configshard_transition.py
@@ -0,0 +1,186 @@
+"""Test hook that periodically transitions the config server in/out of config shard mode."""
+
+import time
+import threading
+import random
+
+from buildscripts.resmokelib import errors
+from buildscripts.resmokelib.testing.hooks import interface
+from buildscripts.resmokelib.testing.hooks import lifecycle as lifecycle_interface
+from buildscripts.resmokelib.testing.fixtures import shardedcluster
+from buildscripts.resmokelib.testing.fixtures import interface as fixture_interface
+
+
+class ContinuousConfigShardTransition(interface.Hook):
+ DESCRIPTION = (
+ "Continuous config shard transition (transitions in/out of config shard mode at regular"
+ " intervals)")
+
+ IS_BACKGROUND = True
+
+ STOPS_FIXTURE = False
+
+ def __init__(self, hook_logger, fixture, auth_options=None):
+ interface.Hook.__init__(self, hook_logger, fixture,
+ ContinuousConfigShardTransition.DESCRIPTION)
+ self._fixture = fixture
+ self._transition_thread = None
+ self._auth_options = auth_options
+
+ def before_suite(self, test_report):
+ """Before suite."""
+ lifecycle = lifecycle_interface.FlagBasedThreadLifecycle()
+
+ if not isinstance(self._fixture, shardedcluster.ShardedClusterFixture):
+ msg = "Can only transition config shard mode for sharded cluster fixtures."
+ self.logger.error(msg)
+ raise errors.ServerFailure(msg)
+
+ self._transition_thread = _TransitionThread(self.logger, lifecycle, self._fixture,
+ self._auth_options)
+ self.logger.info("Starting the transition thread.")
+ self._transition_thread.start()
+
+ def after_suite(self, test_report, teardown_flag=None):
+ """After suite."""
+ self.logger.info("Stopping the transition thread.")
+ self._transition_thread.stop()
+ self.logger.info("Transition thread stopped.")
+
+ def before_test(self, test, test_report):
+ """Before test."""
+ self.logger.info("Resuming the transition thread.")
+ self._transition_thread.pause()
+ self._transition_thread.resume()
+
+ def after_test(self, test, test_report):
+ """After test."""
+ self.logger.info("Pausing the transition thread.")
+ self._transition_thread.pause()
+ self.logger.info("Paused the transition thread.")
+
+
+class _TransitionThread(threading.Thread):
+ CONFIG_SHARD = "config shard mode"
+ DEDICATED = "dedicated config server mode"
+
+ def __init__(self, logger, stepdown_lifecycle, fixture, auth_options):
+ threading.Thread.__init__(self, name="TransitionThread")
+ self.logger = logger
+ self.__lifecycle = stepdown_lifecycle
+ self._fixture = fixture
+ self._auth_options = auth_options
+ self._client = fixture_interface.build_client(self._fixture, self._auth_options)
+ self._current_mode = self._current_fixture_mode()
+
+ self._last_exec = time.time()
+ # Event set when the thread has been stopped using the 'stop()' method.
+ self._is_stopped_evt = threading.Event()
+ # Event set when the thread is not performing stepdowns.
+ self._is_idle_evt = threading.Event()
+ self._is_idle_evt.set()
+
+ def _current_fixture_mode(self):
+ res = self._client.admin.command({"listShards": 1})
+
+ for shard_info in res["shards"]:
+ if shard_info["_id"] == "config":
+ return self.CONFIG_SHARD
+
+ return self.DEDICATED
+
+ def run(self):
+ try:
+ while True:
+ self._is_idle_evt.set()
+
+ permitted = self.__lifecycle.wait_for_action_permitted()
+ if not permitted:
+ break
+
+ self._is_idle_evt.clear()
+ secs = float(10)
+ now = time.time()
+ if now - self._last_exec > secs:
+ self.logger.info("Starting transition from " + self._current_mode)
+ if self._current_mode is self.CONFIG_SHARD:
+ self._transition_to_dedicated()
+ self._current_mode = self.DEDICATED
+ else:
+ self._transition_to_config_shard()
+ self._current_mode = self.CONFIG_SHARD
+ self._last_exec = time.time()
+ self.logger.info("Completed transition to %s in %0d ms", self._current_mode,
+ (self._last_exec - now) * 1000)
+ except Exception: # pylint: disable=W0703
+ # Proactively log the exception when it happens so it will be
+ # flushed immediately.
+ self.logger.exception("Transition Thread threw exception")
+ # The event should be signaled whenever the thread is not performing stepdowns.
+ self._is_idle_evt.set()
+
+ def stop(self):
+ """Stop the thread."""
+ self.__lifecycle.stop()
+ self._is_stopped_evt.set()
+ # Unpause to allow the thread to finish.
+ self.resume()
+ self.join()
+
+ def pause(self):
+ """Pause the thread."""
+ self.__lifecycle.mark_test_finished()
+
+ # Wait until we are no longer executing stepdowns.
+ self._is_idle_evt.wait()
+ # Check if the thread is alive in case it has thrown an exception while running.
+ self._check_thread()
+
+ def resume(self):
+ """Resume the thread."""
+ self.__lifecycle.mark_test_started()
+
+ def _check_thread(self):
+ if not self.is_alive():
+ msg = "The transition thread is not running."
+ self.logger.error(msg)
+ raise errors.ServerFailure(msg)
+
+ def _transition_to_dedicated(self):
+ res = None
+ start_time = time.time()
+ while True:
+ res = self._client.admin.command({"transitionToDedicatedConfigServer": 1})
+
+ if res["state"] == "completed":
+ break
+ elif res["state"] == "ongoing" and res["dbsToMove"]:
+ non_config_shard_id = self._get_non_config_shard_id()
+ for db in res["dbsToMove"]:
+ msg = "running movePrimary for: " + str(db)
+ self.logger.info(msg)
+ self._client.admin.command({"movePrimary": db, "to": non_config_shard_id})
+
+ time.sleep(1)
+
+ if time.time() - start_time > float(300):
+ msg = "Could not transition to dedicated config server. with last response: " + str(
+ res)
+ self.logger.error(msg)
+ raise errors.ServerFailure(msg)
+
+ def _transition_to_config_shard(self):
+ self._client.admin.command({"transitionFromDedicatedConfigServer": 1})
+
+ def _get_non_config_shard_id(self):
+ res = self._client.admin.command({"listShards": 1})
+
+ if len(res["shards"]) < 2:
+ msg = "Did not find a non-config shard"
+ self.logger.error(msg)
+ raise errors.ServerFailure(msg)
+
+ possible_choices = [
+ shard_info["_id"] for shard_info in res["shards"] if shard_info["_id"] != "config"
+ ]
+ return random.choice(possible_choices)
diff --git a/buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py b/buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py
index b2328e1fe0628..a8b679904c2fd 100644
--- a/buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py
+++ b/buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py
@@ -106,7 +106,8 @@ def _add_fixture(self, fixture):
for shard_fixture in fixture.shards:
self._add_fixture(shard_fixture)
- self._add_fixture(fixture.configsvr)
+ if fixture.config_shard is None:
+ self._add_fixture(fixture.configsvr)
for mongos_fixture in fixture.mongos:
self._mongos_fixtures.append(mongos_fixture)
diff --git a/buildscripts/resmokelib/testing/hooks/fcv_upgrade_downgrade.py b/buildscripts/resmokelib/testing/hooks/fcv_upgrade_downgrade.py
new file mode 100644
index 0000000000000..d8d7c21cad603
--- /dev/null
+++ b/buildscripts/resmokelib/testing/hooks/fcv_upgrade_downgrade.py
@@ -0,0 +1,75 @@
+"""Test hook for running background FCV upgrade and downgrade commands.
+
+This hook runs continously in a background thread while the test is running.
+
+This can be run against a replicaset or sharded cluster.
+"""
+
+import os.path
+
+from buildscripts.resmokelib import errors
+from buildscripts.resmokelib.testing.hooks import jsfile
+from buildscripts.resmokelib.testing.hooks.background_job import _BackgroundJob, _ContinuousDynamicJSTestCase
+
+
+class FCVUpgradeDowngradeInBackground(jsfile.JSHook):
+ """A hook to run background FCV upgrade and downgrade against test servers while a test is running."""
+
+ IS_BACKGROUND = True
+
+ def __init__(self, hook_logger, fixture, shell_options=None):
+ """Initialize FCVUpgradeDowngradeInBackground."""
+ description = "Run background FCV upgrade/downgrade while a test is running"
+ js_filename = os.path.join("jstests", "hooks", "run_fcv_upgrade_downgrade_background.js")
+ jsfile.JSHook.__init__(self, hook_logger, fixture, js_filename, description,
+ shell_options=shell_options)
+
+ self._background_job = None
+
+ def before_suite(self, test_report):
+ """Start the background thread."""
+ self._background_job = _BackgroundJob("FCVUpgradeDowngradeInBackground")
+ self.logger.info("Starting the background FCV upgrade/downgrade thread.")
+ self._background_job.start()
+
+ def after_suite(self, test_report, teardown_flag=None):
+ """Signal the background FCV upgrade/downgrade thread to exit, and wait until it does."""
+ if self._background_job is None:
+ return
+
+ self.logger.info("Stopping the background FCV upgrade/downgrade thread.")
+ self._background_job.stop()
+
+ def before_test(self, test, test_report):
+ """Instruct the background FCV upgrade/downgrade thread to run while 'test' is also running."""
+ if self._background_job is None:
+ return
+
+ hook_test_case = _ContinuousDynamicJSTestCase.create_before_test(
+ test.logger, test, self, self._js_filename, self._shell_options)
+ hook_test_case.configure(self.fixture)
+
+ self.logger.info("Resuming the background FCV upgrade/downgrade thread.")
+ self._background_job.resume(hook_test_case, test_report)
+
+ def after_test(self, test, test_report): # noqa: D205,D400
+ """Instruct the background FCV upgrade/downgrade thread to stop running now that 'test' has
+ finished running.
+ """
+ if self._background_job is None:
+ return
+
+ self.logger.info("Pausing the background FCV upgrade/downgrade thread.")
+ self._background_job.pause()
+
+ if self._background_job.exc_info is not None:
+ if isinstance(self._background_job.exc_info[1], errors.TestFailure):
+ # If the mongo shell process running the JavaScript file exited with a non-zero
+ # return code, then we raise an errors.ServerFailure exception to cause resmoke.py's
+ # test execution to stop.
+ raise errors.ServerFailure(self._background_job.exc_info[1].args[0])
+ else:
+ self.logger.error(
+ "Encountered an error inside the background FCV upgrade/downgrade thread.",
+ exc_info=self._background_job.exc_info)
+ raise self._background_job.exc_info[1]
diff --git a/buildscripts/resmokelib/testing/hooks/orphans.py b/buildscripts/resmokelib/testing/hooks/orphans.py
index 38d62f51010c7..2d2d3947aecc5 100644
--- a/buildscripts/resmokelib/testing/hooks/orphans.py
+++ b/buildscripts/resmokelib/testing/hooks/orphans.py
@@ -22,16 +22,3 @@ def __init__(self, hook_logger, fixture, shell_options=None):
js_filename = os.path.join("jstests", "hooks", "run_check_orphans_are_deleted.js")
super().__init__(hook_logger, fixture, js_filename, description,
shell_options=shell_options)
-
- def after_test(self, test, test_report):
- """Run the run_check_orphans_are_deleted.js hook."""
-
- # We temporarily disable the balancer so more work isn't generated for the range deleter
- # while the hook is running.
- if self.fixture.enable_balancer:
- self.fixture.stop_balancer()
-
- super().after_test(test, test_report)
-
- if self.fixture.enable_balancer:
- self.fixture.start_balancer()
diff --git a/buildscripts/resmokelib/testing/hooks/run_query_stats.py b/buildscripts/resmokelib/testing/hooks/run_query_stats.py
new file mode 100644
index 0000000000000..06cb182e422fc
--- /dev/null
+++ b/buildscripts/resmokelib/testing/hooks/run_query_stats.py
@@ -0,0 +1,37 @@
+"""
+Test hook for verifying $queryStats collects expected metrics and can redact query shapes.
+
+This runs in the background as other tests are ongoing.
+"""
+
+from buildscripts.resmokelib.testing.hooks.interface import Hook
+from bson import binary
+
+
+class RunQueryStats(Hook):
+ """Runs $queryStats after every test, and clears the query stats store before every test."""
+
+ IS_BACKGROUND = False
+
+ def __init__(self, hook_logger, fixture):
+ description = "Read query stats data after each test."
+ super().__init__(hook_logger, fixture, description)
+ self.client = self.fixture.mongo_client()
+ self.hmac_key = binary.Binary(("0" * 32).encode('utf-8'))
+
+ def verify_query_stats(self, querystats_spec):
+ """Verify a $queryStats call has all the right properties."""
+ with self.client.admin.aggregate([{"$queryStats": querystats_spec}]) as cursor:
+ for operation in cursor:
+ assert "key" in operation
+ assert "metrics" in operation
+ assert "asOf" in operation
+
+ def after_test(self, test, test_report):
+ self.verify_query_stats({})
+ self.verify_query_stats(
+ {"transformIdentifiers": {"algorithm": "hmac-sha-256", "hmacKey": self.hmac_key}})
+
+ def before_test(self, test, test_report):
+ self.client.admin.command("setParameter", 1, internalQueryStatsCacheSize="0%")
+ self.client.admin.command("setParameter", 1, internalQueryStatsCacheSize="1%")
diff --git a/buildscripts/resmokelib/testing/hooks/stepdown.py b/buildscripts/resmokelib/testing/hooks/stepdown.py
index 1d274c5e1fece..ac73bb01bbf15 100644
--- a/buildscripts/resmokelib/testing/hooks/stepdown.py
+++ b/buildscripts/resmokelib/testing/hooks/stepdown.py
@@ -55,6 +55,10 @@ def __init__(self, hook_logger, fixture, config_stepdown=True, shard_stepdown=Tr
interface.Hook.__init__(self, hook_logger, fixture, ContinuousStepdown.DESCRIPTION)
self._fixture = fixture
+ if hasattr(fixture, "config_shard") and fixture.config_shard is not None and shard_stepdown:
+ # If the config server is a shard, shard_stepdown implies config_stepdown.
+ config_stepdown = shard_stepdown
+
self._config_stepdown = config_stepdown
self._shard_stepdown = shard_stepdown
self._stepdown_interval_secs = float(stepdown_interval_ms) / 1000
@@ -128,8 +132,9 @@ def _add_fixture(self, fixture):
elif isinstance(fixture, shardedcluster.ShardedClusterFixture):
if self._shard_stepdown:
for shard_fixture in fixture.shards:
- self._add_fixture(shard_fixture)
- if self._config_stepdown:
+ if shard_fixture.config_shard is None or self._config_stepdown:
+ self._add_fixture(shard_fixture)
+ if self._config_stepdown and fixture.config_shard is None:
self._add_fixture(fixture.configsvr)
for mongos_fixture in fixture.mongos:
self._mongos_fixtures.append(mongos_fixture)
diff --git a/buildscripts/resmokelib/testing/hooks/tenant_migration.py b/buildscripts/resmokelib/testing/hooks/tenant_migration.py
index 6b805fa137ee1..a629d1206dca4 100644
--- a/buildscripts/resmokelib/testing/hooks/tenant_migration.py
+++ b/buildscripts/resmokelib/testing/hooks/tenant_migration.py
@@ -9,6 +9,7 @@
from bson.binary import Binary, UUID_SUBTYPE
from pymongo.errors import OperationFailure, PyMongoError
+from functools import partial
from buildscripts.resmokelib import errors
from buildscripts.resmokelib.testing.fixtures import tenant_migration
@@ -234,6 +235,7 @@ class _TenantMigrationThread(threading.Thread):
WAIT_SECS_RANGES = [[0.05, 0.1], [0.1, 0.5], [1, 5], [5, 15]]
POLL_INTERVAL_SECS = 0.1
+ MIGRATION_ABORTED_ERR_CODE = 325
NO_SUCH_MIGRATION_ERR_CODE = 327
INTERNAL_ERR_CODE = 1
INVALID_SYNC_SOURCE_ERR_CODE = 119
@@ -364,6 +366,10 @@ def _is_fail_point_abort_reason(self, abort_reason):
return abort_reason["code"] == self.INTERNAL_ERR_CODE and abort_reason[
"errmsg"] == "simulate a tenant migration error"
+ def _is_recipient_failover_abort_reason(self, abort_reason):
+ return abort_reason["code"] == self.MIGRATION_ABORTED_ERR_CODE and abort_reason[
+ "errmsg"].find("Recipient failover happened during migration")
+
def _create_migration_opts(self, donor_rs_index, recipient_rs_index):
donor_rs = self._tenant_migration_fixture.get_replset(donor_rs_index)
recipient_rs = self._tenant_migration_fixture.get_replset(recipient_rs_index)
@@ -432,7 +438,11 @@ def _run_migration(self, migration_opts): # noqa: D205,D400
return True
abort_reason = res["abortReason"]
- if self._is_fail_point_abort_reason(abort_reason):
+ if self._is_recipient_failover_abort_reason(abort_reason):
+ self.logger.info("Tenant migration '%s' aborted due to recipient failover: %s",
+ migration_opts.migration_id, str(res))
+ return False
+ elif self._is_fail_point_abort_reason(abort_reason):
self.logger.info(
"Tenant migration '%s' with donor replica set '%s' aborted due to failpoint: " +
"%s.", migration_opts.migration_id, migration_opts.get_donor_name(), str(res))
@@ -590,7 +600,7 @@ def _drop_tenant_databases_on_recipient(self, migration_opts):
for database in res["databases"]:
db_name = database["name"]
if db_name.startswith(self._tenant_id + "_"):
- recipient_client.drop_database(db_name)
+ with_naive_retry(partial(recipient_client.drop_database, db_name))
except PyMongoError as err:
self.logger.exception(
f"Error dropping databases for tenant '{self._tenant_id}' on replica set '{migration_opts.get_recipient_name()}': '{str(err)}'."
diff --git a/buildscripts/resmokelib/testing/hooks/wait_for_replication.py b/buildscripts/resmokelib/testing/hooks/wait_for_replication.py
index 7e3129b746cfc..6b587f6e1f347 100644
--- a/buildscripts/resmokelib/testing/hooks/wait_for_replication.py
+++ b/buildscripts/resmokelib/testing/hooks/wait_for_replication.py
@@ -25,9 +25,9 @@ def after_test(self, test, test_report):
start_time = time.time()
client_conn = self.fixture.get_driver_connection_url()
js_cmds = """
- conn = '{}';
+ const conn = '{}';
try {{
- rst = new ReplSetTest(conn);
+ const rst = new ReplSetTest(conn);
rst.awaitReplication();
}} catch (e) {{
jsTestLog("WaitForReplication got error: " + tojson(e));
diff --git a/buildscripts/resmokelib/testing/job.py b/buildscripts/resmokelib/testing/job.py
index db29bb06a870c..3379dd51777fa 100644
--- a/buildscripts/resmokelib/testing/job.py
+++ b/buildscripts/resmokelib/testing/job.py
@@ -7,6 +7,7 @@
from buildscripts.resmokelib import config
from buildscripts.resmokelib import errors
from buildscripts.resmokelib.testing import testcases
+from buildscripts.resmokelib.testing.fixtures import shardedcluster
from buildscripts.resmokelib.testing.fixtures.interface import create_fixture_table
from buildscripts.resmokelib.testing.testcases import fixture as _fixture
from buildscripts.resmokelib.utils import queue as _queue
@@ -282,7 +283,13 @@ def _run_hooks_after_tests(self, test, hook_failure_flag, background=False):
@param test: the test after which we run the hooks.
@param background: whether to run background hooks.
"""
+ suite_with_balancer = isinstance(
+ self.fixture, shardedcluster.ShardedClusterFixture) and self.fixture.enable_balancer
+
try:
+ if not background and suite_with_balancer:
+ self.logger.info("Stopping the balancer before running end-test hooks")
+ self.fixture.stop_balancer()
for hook in self.hooks:
if hook.IS_BACKGROUND == background:
self._run_hook(hook, hook.after_test, test, hook_failure_flag)
@@ -307,6 +314,10 @@ def _run_hooks_after_tests(self, test, hook_failure_flag, background=False):
self.report.setError(test, sys.exc_info())
raise
+ if not background and suite_with_balancer:
+ self.logger.info("Resuming the balancer after running end-test hooks")
+ self.fixture.start_balancer()
+
def _fail_test(self, test, exc_info, return_code=1):
"""Provide helper to record a test as a failure with the provided return code.
diff --git a/buildscripts/resmokelib/testing/report.py b/buildscripts/resmokelib/testing/report.py
index cc98664fabeb2..5dd2cbe870b4e 100644
--- a/buildscripts/resmokelib/testing/report.py
+++ b/buildscripts/resmokelib/testing/report.py
@@ -146,13 +146,9 @@ def stopTest(self, test):
try:
# check if there are stacktrace files, if so, invoke the symbolizer here.
- # If there are no stacktrace files for this job, we do not need to invoke the symbolizer at all.
- # Take a lock to download the debug symbols if it hasn't already been downloaded.
# log symbolized output to test.logger.info()
-
symbolizer = ResmokeSymbolizer()
symbolizer.symbolize_test_logs(test)
- # symbolization completed
unittest.TestResult.stopTest(self, test)
@@ -404,7 +400,7 @@ def __init__(self, test_id, test_file, dynamic):
self.evergreen_status = None
self.return_code = None
self.url_endpoint = None
- self.exception_extractors = None
+ self.exception_extractors = []
self.error = None
diff --git a/buildscripts/resmokelib/testing/retry.py b/buildscripts/resmokelib/testing/retry.py
index 3b5287c4b6155..9d722caf9154b 100644
--- a/buildscripts/resmokelib/testing/retry.py
+++ b/buildscripts/resmokelib/testing/retry.py
@@ -22,17 +22,17 @@
]
-def is_retryable_error(exc):
+def is_retryable_error(exc, retryable_error_codes):
if isinstance(exc, ConnectionFailure):
return True
if exc.has_error_label("RetryableWriteError"):
return True
- if isinstance(exc, OperationFailure) and exc.code in retryable_codes:
+ if isinstance(exc, OperationFailure) and exc.code in retryable_error_codes:
return True
return False
-def with_naive_retry(func, timeout=100):
+def with_naive_retry(func, timeout=100, extra_retryable_error_codes=None):
"""
Retry execution of a provided function naively for up to `timeout` seconds.
@@ -41,8 +41,13 @@ def with_naive_retry(func, timeout=100):
:param func: The function to execute
:param timeout: The maximum amount of time to retry execution
+ :param extra_retryable_error_codes: List of additional error codes that should be considered retryable
"""
+ retryable_error_codes = set(retryable_codes)
+ if extra_retryable_error_codes:
+ retryable_error_codes.update(extra_retryable_error_codes)
+
last_exc = None
start = time.monotonic()
while time.monotonic() - start < timeout:
@@ -50,7 +55,7 @@ def with_naive_retry(func, timeout=100):
return func()
except PyMongoError as exc:
last_exc = exc
- if not is_retryable_error(exc):
+ if not is_retryable_error(exc, retryable_error_codes):
raise
time.sleep(0.1)
diff --git a/buildscripts/resmokelib/testing/symbolizer_service.py b/buildscripts/resmokelib/testing/symbolizer_service.py
index 767e8cb50923a..b4468ef317598 100644
--- a/buildscripts/resmokelib/testing/symbolizer_service.py
+++ b/buildscripts/resmokelib/testing/symbolizer_service.py
@@ -11,6 +11,7 @@
from typing import List, Optional, NamedTuple, Set
from buildscripts.resmokelib import config as _config
+from buildscripts.resmokelib.flags import HANG_ANALYZER_CALLED
from buildscripts.resmokelib.testing.testcases.interface import TestCase
# This lock prevents different resmoke jobs from symbolizing stacktraces concurrently,
@@ -57,6 +58,15 @@ def is_windows() -> bool:
"""
return sys.platform == "win32" or sys.platform == "cygwin"
+ @staticmethod
+ def is_macos() -> bool:
+ """
+ Whether we are on MacOS.
+
+ :return: True if on MacOS.
+ """
+ return sys.platform == "darwin"
+
class ResmokeSymbolizer:
"""Symbolize stacktraces inside test logs."""
@@ -135,6 +145,16 @@ def should_symbolize(self, test: TestCase) -> bool:
test.logger.info("Running on Windows, skipping symbolization")
return False
+ if self.config.is_macos():
+ test.logger.info("Running on MacOS, skipping symbolization")
+ return False
+
+ if HANG_ANALYZER_CALLED.is_set():
+ test.logger.info(
+ "Hang analyzer has been called, skipping symbolization to meet timeout constraints."
+ )
+ return False
+
return True
def get_stacktrace_dir(self, test: TestCase) -> Optional[str]:
@@ -305,8 +325,8 @@ def run_symbolizer_script(full_file_path: str, retry_timeout_secs: int) -> str:
"""
symbolizer_args = [
- "python",
- "buildscripts/mongosymb.py",
+ "db-contrib-tool",
+ "symbolize",
"--client-secret",
_config.SYMBOLIZER_CLIENT_SECRET,
"--client-id",
diff --git a/buildscripts/resmokelib/testing/testcases/pretty_printer_testcase.py b/buildscripts/resmokelib/testing/testcases/pretty_printer_testcase.py
new file mode 100644
index 0000000000000..15cb9cff96d36
--- /dev/null
+++ b/buildscripts/resmokelib/testing/testcases/pretty_printer_testcase.py
@@ -0,0 +1,25 @@
+"""The unittest.TestCase for pretty printer tests."""
+import os
+
+from buildscripts.resmokelib import config
+from buildscripts.resmokelib import core
+from buildscripts.resmokelib import utils
+from buildscripts.resmokelib.testing.testcases import interface
+
+
+class PrettyPrinterTestCase(interface.ProcessTestCase):
+ """A pretty printer test to execute."""
+
+ REGISTERED_NAME = "pretty_printer_test"
+
+ def __init__(self, logger, program_executable, program_options=None):
+ """Initialize the PrettyPrinterTestCase with the executable to run."""
+
+ interface.ProcessTestCase.__init__(self, logger, "pretty printer test", program_executable)
+
+ self.program_executable = program_executable
+ self.program_options = utils.default_if_none(program_options, {}).copy()
+
+ def _make_process(self):
+ return core.programs.make_process(self.logger, [self.program_executable],
+ **self.program_options)
diff --git a/buildscripts/resmokelib/utils/__init__.py b/buildscripts/resmokelib/utils/__init__.py
index c1054af800c02..2f5475ddb9426 100644
--- a/buildscripts/resmokelib/utils/__init__.py
+++ b/buildscripts/resmokelib/utils/__init__.py
@@ -92,19 +92,18 @@ def get_task_name_without_suffix(task_name, variant_name):
return re.sub(fr"(_[0-9]+)?(_{variant_name})?$", "", task_name)
-def pick_catalog_shard_node(catalog_shard, num_shards):
- """Get catalog_shard node index or None if no catalog_shard."""
- if catalog_shard is None:
+def pick_catalog_shard_node(config_shard, num_shards):
+ """Get config_shard node index or None if no config_shard."""
+ if config_shard is None:
return None
- if num_shards is None or int(num_shards) <= 0:
- raise ValueError("Num shards > 0 for catalog shard to exist")
+ if config_shard == "any":
+ if num_shards is None or num_shards == 0:
+ return 0
+ return random.randint(0, num_shards - 1)
- if catalog_shard == "any":
- return random.randrange(0, num_shards)
+ config_shard_index = int(config_shard)
+ if config_shard_index < 0 or config_shard_index >= num_shards:
+ raise ValueError("Config shard value must be in range 0..num_shards-1 or \"any\"")
- catalog_shard_index = int(catalog_shard)
- if catalog_shard_index < 0 or catalog_shard_index >= num_shards:
- raise ValueError("Catalog shard value must be in range 0..num_shards-1 or \"any\"")
-
- return catalog_shard_index
+ return config_shard_index
diff --git a/buildscripts/tests/resmoke_end2end/test_resmoke.py b/buildscripts/tests/resmoke_end2end/test_resmoke.py
index 787a4bb51946b..74b83410dece7 100644
--- a/buildscripts/tests/resmoke_end2end/test_resmoke.py
+++ b/buildscripts/tests/resmoke_end2end/test_resmoke.py
@@ -13,7 +13,9 @@
import yaml
-from buildscripts.resmokelib import core
+from buildscripts.ciconfig.evergreen import parse_evergreen_file
+from buildscripts.resmokelib import config, core, suitesconfig
+from buildscripts.resmokelib.utils.dictionary import get_dict_value
# pylint: disable=unsupported-membership-test
@@ -64,8 +66,13 @@ def setUpClass(cls):
cls.archival_file = "test_archival.txt"
def test_archival_on_task_failure(self):
+ # The --originSuite argument is to trick the resmoke local invocation into passing
+ # because when we pass --taskId into resmoke it thinks that it is being ran in evergreen
+ # and cannot normally find an evergreen task associated with
+ # buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_failure.yml
resmoke_args = [
"--suites=buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_failure.yml",
+ "--originSuite=resmoke_end2end_tests",
"--taskId=123",
"--internalParam=test_archival",
"--repeatTests=2",
@@ -79,9 +86,14 @@ def test_archival_on_task_failure(self):
self.assert_dir_file_count(self.test_dir, self.archival_file, archival_dirs_to_expect)
def test_archival_on_task_failure_no_passthrough(self):
+ # The --originSuite argument is to trick the resmoke local invocation into passing
+ # because when we pass --taskId into resmoke it thinks that it is being ran in evergreen
+ # and cannot normally find an evergreen task associated with
+ # buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_failure_no_passthrough.yml
resmoke_args = [
"--suites=buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_failure_no_passthrough.yml",
"--taskId=123",
+ "--originSuite=resmoke_end2end_tests",
"--internalParam=test_archival",
"--repeatTests=2",
"--jobs=2",
@@ -146,9 +158,14 @@ def execute_resmoke(self, resmoke_args, sleep_secs=30, **kwargs): # pylint: dis
self.signal_resmoke()
def test_task_timeout(self):
+ # The --originSuite argument is to trick the resmoke local invocation into passing
+ # because when we pass --taskId into resmoke it thinks that it is being ran in evergreen
+ # and cannot normally find an evergreen task associated with
+ # buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_timeout.yml
resmoke_args = [
"--suites=buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_timeout.yml",
"--taskId=123",
+ "--originSuite=resmoke_end2end_tests",
"--internalParam=test_archival",
"--internalParam=test_analysis",
"--repeatTests=2",
@@ -163,9 +180,14 @@ def test_task_timeout(self):
self.assert_dir_file_count(self.test_dir, self.analysis_file, analysis_pids_to_expect)
def test_task_timeout_no_passthrough(self):
+ # The --originSuite argument is to trick the resmoke local invocation into passing
+ # because when we pass --taskId into resmoke it thinks that it is being ran in evergreen
+ # and cannot normally find an evergreen task associated with
+ # buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_timeout_no_passthrough.yml
resmoke_args = [
"--suites=buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_timeout_no_passthrough.yml",
"--taskId=123",
+ "--originSuite=resmoke_end2end_tests",
"--internalParam=test_archival",
"--internalParam=test_analysis",
"--repeatTests=2",
@@ -181,9 +203,14 @@ def test_task_timeout_no_passthrough(self):
# Test scenarios where an resmoke-launched process launches resmoke.
def test_nested_timeout(self):
+ # The --originSuite argument is to trick the resmoke local invocation into passing
+ # because when we pass --taskId into resmoke it thinks that it is being ran in evergreen
+ # and cannot normally find an evergreen task associated with
+ # buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_nested_timeout.yml
resmoke_args = [
"--suites=buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_nested_timeout.yml",
"--taskId=123",
+ "--originSuite=resmoke_end2end_tests",
"--internalParam=test_archival",
"--internalParam=test_analysis",
"jstests/resmoke_selftest/end2end/timeout/nested/top_level_timeout.js",
@@ -541,3 +568,78 @@ def test_random_shell_seed(self):
self.assertTrue(
len(random_seeds) > 1, msg="Resmoke generated the same random seed 10 times in a row.")
+
+
+# In resmoke we expect certain parts of the evergreen config to be a certain way
+# These tests will fail if something is not as expected and also needs to change somewhere else in resmoke
+class TestEvergreenYML(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.evg_conf = parse_evergreen_file("etc/evergreen.yml")
+ config.CONFIG_DIR = "buildscripts/resmokeconfig"
+
+ def validate_jstestfuzz_selector(self, suite_name):
+ suite_config = suitesconfig.get_suite(suite_name).get_config()
+ expected_selector = ["jstestfuzz/out/*.js"]
+ self.assertEqual(
+ suite_config["selector"]["roots"], expected_selector,
+ msg=f"The jstestfuzz selector for {suite_name} did not match 'jstestfuzz/out/*.js'")
+
+ # This test asserts that the jstestfuzz tasks uploads the the URL we expect it to
+ # If the remote url changes, also change it in the _log_local_resmoke_invocation method
+ # before fixing this test to the correct url
+ def test_jstestfuzz_download_url(self):
+ functions = self.evg_conf.functions
+ run_jstestfuzz = functions["run jstestfuzz"]
+ contains_correct_url = False
+ for item in run_jstestfuzz:
+ if item["command"] != "s3.put":
+ continue
+
+ remote_url = item["params"]["remote_file"]
+ if remote_url == "${project}/${build_variant}/${revision}/jstestfuzz/${task_id}-${execution}.tgz":
+ contains_correct_url = True
+ break
+
+ self.assertTrue(
+ contains_correct_url, msg=
+ "The 'run jstestfuzz' function in evergreen did not contain the remote_url that was expected"
+ )
+
+ # This tasks asserts that the way implicit multiversion tasks are defined has not changed
+ # If this fails, you will need to correct the _log_local_resmoke_invocation method before fixing
+ # this test
+ def test_implicit_multiversion_tasks(self):
+ multiverson_task_names = self.evg_conf.get_task_names_by_tag("multiversion")
+ implicit_multiversion_count = 0
+ for multiversion_task_name in multiverson_task_names:
+ task_config = self.evg_conf.get_task(multiversion_task_name)
+ func = task_config.find_func_command("initialize multiversion tasks")
+ if func is not None:
+ implicit_multiversion_count += 1
+
+ self.assertNotEqual(0, implicit_multiversion_count,
+ msg="Could not find any implicit multiversion tasks in evergreen")
+
+ # This tasks asserts that the way jstestfuzz tasks are defined has not changed
+ # It also asserts that the selector for jstestfuzz tasks always points to jstestfuzz/out/*.js
+ # If this fails, you will need to correct the _log_local_resmoke_invocation method before fixing
+ # this test
+ def test_jstestfuzz_tasks(self):
+ jstestfuzz_count = 0
+ for task in self.evg_conf.tasks:
+ generate_func = task.find_func_command("generate resmoke tasks")
+ if (generate_func is None
+ or get_dict_value(generate_func, ["vars", "is_jstestfuzz"]) != "true"):
+ continue
+
+ jstestfuzz_count += 1
+
+ multiversion_func = task.find_func_command("initialize multiversion tasks")
+ if multiversion_func is not None:
+ for subtask in multiversion_func["vars"]:
+ self.validate_jstestfuzz_selector(subtask)
+ else:
+ self.validate_jstestfuzz_selector(task.get_suite_name())
+
+ self.assertNotEqual(0, jstestfuzz_count, msg="Could not find any jstestfuzz tasks")
diff --git a/buildscripts/tests/resmoke_proxy/test_resmoke_proxy.py b/buildscripts/tests/resmoke_proxy/test_resmoke_proxy.py
index d075b60fb4df9..dbd1bf476c700 100644
--- a/buildscripts/tests/resmoke_proxy/test_resmoke_proxy.py
+++ b/buildscripts/tests/resmoke_proxy/test_resmoke_proxy.py
@@ -1,4 +1,4 @@
-"""Unit tests for resmoke_proxy.py"""
+"""Unit tests for resmoke_proxy.py."""
import unittest
from unittest.mock import MagicMock
diff --git a/buildscripts/tests/resmoke_validation/test_find_suites.py b/buildscripts/tests/resmoke_validation/test_find_suites.py
new file mode 100644
index 0000000000000..6f9bc8bd7fead
--- /dev/null
+++ b/buildscripts/tests/resmoke_validation/test_find_suites.py
@@ -0,0 +1,13 @@
+import subprocess
+import unittest
+import glob
+
+
+class TestFindSuites(unittest.TestCase):
+ def test_find_suites(self):
+ jstests = glob.glob("jstests/core/*.js")
+ resmoke_process = subprocess.run(
+ ["python3", "buildscripts/resmoke.py", "find-suites", jstests[0]])
+
+ self.assertEqual(0, resmoke_process.returncode,
+ msg="find-suites subcommand did not execute successfully.")
diff --git a/buildscripts/tests/resmoke_validation/test_generated_matrix_suites.py b/buildscripts/tests/resmoke_validation/test_generated_matrix_suites.py
index df93b2369e998..2484ef3ecadef 100644
--- a/buildscripts/tests/resmoke_validation/test_generated_matrix_suites.py
+++ b/buildscripts/tests/resmoke_validation/test_generated_matrix_suites.py
@@ -18,11 +18,12 @@ def test_generated_suites(self):
for suite_name in suite_names:
try:
- suite = self.matrix_suite_config.get_config_obj(suite_name)
+ suite = self.matrix_suite_config.get_config_obj_and_verify(suite_name)
self.assertIsNotNone(
suite, msg=
f"{suite_name} was not found. This means either MatrixSuiteConfig.get_named_suites() "
- + "or MatrixSuiteConfig.get_config_obj() are not working as intended.")
+ +
+ "or MatrixSuiteConfig.get_config_obj_and_verify() are not working as intended.")
except Exception as ex:
self.fail(repr(ex))
diff --git a/buildscripts/tests/resmoke_validation/test_matrix_suite_generation.py b/buildscripts/tests/resmoke_validation/test_matrix_suite_generation.py
index c4b47ff74ab15..6c5ec15bef660 100644
--- a/buildscripts/tests/resmoke_validation/test_matrix_suite_generation.py
+++ b/buildscripts/tests/resmoke_validation/test_matrix_suite_generation.py
@@ -25,12 +25,12 @@ def verify_suite_generation(self):
InvalidMatrixSuiteError, msg=
f"{tested_suite} suite should have failed because the generated suite does not exist."
):
- self.matrix_suite_config.get_config_obj(tested_suite)
+ self.matrix_suite_config.get_config_obj_and_verify(tested_suite)
self.matrix_suite_config.generate_matrix_suite_file(tested_suite)
try:
- suite = self.matrix_suite_config.get_config_obj(tested_suite)
+ suite = self.matrix_suite_config.get_config_obj_and_verify(tested_suite)
self.assertIsNotNone(suite, msg=f"{suite} was not found.")
except Exception as ex:
self.fail(repr(ex))
@@ -45,13 +45,13 @@ def verify_altered_generated_suite(self):
with self.assertRaises(
InvalidMatrixSuiteError, msg=
f"{tested_suite} suite should have failed because the generated suite was edited."):
- self.matrix_suite_config.get_config_obj(tested_suite)
+ self.matrix_suite_config.get_config_obj_and_verify(tested_suite)
# restore original file back
self.matrix_suite_config.generate_matrix_suite_file(tested_suite)
try:
- suite = self.matrix_suite_config.get_config_obj(tested_suite)
+ suite = self.matrix_suite_config.get_config_obj_and_verify(tested_suite)
self.assertIsNotNone(suite, msg=f"{suite} was not found.")
except Exception as ex:
self.fail(repr(ex))
diff --git a/buildscripts/tests/resmokelib/core/test_redirect.py b/buildscripts/tests/resmokelib/core/test_redirect.py
index b08514811ac93..0f827a96c816e 100644
--- a/buildscripts/tests/resmokelib/core/test_redirect.py
+++ b/buildscripts/tests/resmokelib/core/test_redirect.py
@@ -15,9 +15,10 @@ class TestStdoutRedirect(unittest.TestCase):
is_windows = os.name == "nt"
def test_process_pipes(self):
- """Write a string, one word per line into the beginning of a chain of processes. The input
- will be tee'd into a temporary file and grepped. Verify the contents of the tee'd file and
- the final output of the grep.
+ """Write a string, one word per line into the beginning of a chain of processes.
+
+ The input will be tee'd into a temporary file and grepped. Verify the contents of
+ the tee'd file and the final output of the grep.
"""
if self.is_windows:
diff --git a/buildscripts/tests/resmokelib/powercycle/test_remote_operations.py b/buildscripts/tests/resmokelib/powercycle/test_remote_operations.py
index 7c00760412695..89f37a0e9d2d7 100755
--- a/buildscripts/tests/resmokelib/powercycle/test_remote_operations.py
+++ b/buildscripts/tests/resmokelib/powercycle/test_remote_operations.py
@@ -1,8 +1,10 @@
#!/usr/bin/env python3
-"""Unit test for buildscripts/remote_operations.py.
+"""
+Unit test for buildscripts/remote_operations.py.
- Note - Tests require sshd to be enabled on localhost with paswordless login
- and can fail otherwise."""
+Note - Tests require sshd to be enabled on localhost with paswordless login
+and can fail otherwise.
+"""
import os
import shutil
diff --git a/buildscripts/tests/resmokelib/run/test_generate_multiversion_exclude_tags.py b/buildscripts/tests/resmokelib/run/test_generate_multiversion_exclude_tags.py
index 56195f468c778..f601caf2d91de 100644
--- a/buildscripts/tests/resmokelib/run/test_generate_multiversion_exclude_tags.py
+++ b/buildscripts/tests/resmokelib/run/test_generate_multiversion_exclude_tags.py
@@ -25,9 +25,7 @@ def assert_contents(self, expected):
self.assertEqual(actual, expected)
def patch_and_run(self, latest, old, old_bin_version):
- """
- Helper to patch and run the test.
- """
+ """Helper to patch and run the test."""
mock_multiversion_methods = {
'get_backports_required_hash_for_shell_version': MagicMock(),
'get_old_yaml': MagicMock(return_value=old)
diff --git a/buildscripts/tests/resmokelib/test_selector.py b/buildscripts/tests/resmokelib/test_selector.py
index 589a99aa2d25d..e94c6c097bcbf 100644
--- a/buildscripts/tests/resmokelib/test_selector.py
+++ b/buildscripts/tests/resmokelib/test_selector.py
@@ -460,7 +460,7 @@ def test_multi_js_test_selector_normal(self):
@unittest.skip("Known broken. SERVER-48969 tracks re-enabling.")
def test_multi_js_test_selector_one_group(self):
- """Test we return only one group if the group size equals number of files"""
+ """Test we return only one group if the group size equals number of files."""
num_files = MockTestFileExplorer.NUM_JS_FILES
config = selector._MultiJSTestSelectorConfig(roots=["dir/**/*.js"], group_size=num_files,
group_count_multiplier=9999999)
diff --git a/buildscripts/tests/resmokelib/testing/fixtures/test_builder.py b/buildscripts/tests/resmokelib/testing/fixtures/test_builder.py
index 29ac90572c9e8..fe29b52b17c86 100644
--- a/buildscripts/tests/resmokelib/testing/fixtures/test_builder.py
+++ b/buildscripts/tests/resmokelib/testing/fixtures/test_builder.py
@@ -1,34 +1,15 @@
"""Unit tests for the resmokelib.testing.fixtures._builder module."""
# pylint: disable=protected-access,invalid-name
import unittest
-import filecmp
-import os
from unittest.mock import MagicMock
from buildscripts.resmokelib import logging, parser, config
from buildscripts.resmokelib.core import network
from buildscripts.resmokelib.testing.fixtures import _builder as under_test
-TEST_COMMIT = "1de5826097917875f48ca1ea4f2e53b40139f9ff"
-TEST_SUFFIX = "_unittest_suffix"
-TEST_RETRIEVE_DIR = os.path.join(under_test.RETRIEVE_DIR, TEST_SUFFIX)
SET_PARAMS = "set_parameters"
-class TestRetrieveFixtures(unittest.TestCase):
- """Class that test retrieve_fixtures methods."""
-
- def test_retrieve_fixtures(self):
- """function to test retrieve_fixtures"""
- expected_standalone = os.path.join("buildscripts", "tests", "resmokelib", "testing",
- "fixtures", "retrieved_fixture.txt")
- under_test.retrieve_fixtures(TEST_RETRIEVE_DIR, TEST_COMMIT)
- retrieved_standalone = os.path.join(TEST_RETRIEVE_DIR, "standalone.py")
- self.assertTrue(
- filecmp.cmpfiles(retrieved_standalone, expected_standalone,
- ["standalone.py", "retrieved_fixture.txt"], shallow=False))
-
-
class TestGetPackageName(unittest.TestCase):
def test_get_package_name_from_posix_path(self):
path = "build/multiversionfixtures/_unittest_suffix"
@@ -49,20 +30,12 @@ class TestBuildShardedCluster(unittest.TestCase):
@classmethod
def setUpClass(cls):
- under_test.retrieve_fixtures(TEST_RETRIEVE_DIR, TEST_COMMIT)
- cls.original_constants["MULTIVERSION_CLASS_SUFFIX"] = under_test.MULTIVERSION_CLASS_SUFFIX
- under_test.MULTIVERSION_CLASS_SUFFIX = TEST_SUFFIX
-
cls.mock_logger = MagicMock(spec=logging.Logger)
logging.loggers._FIXTURE_LOGGER_REGISTRY[cls.job_num] = cls.mock_logger
def tearDown(self):
network.PortAllocator.reset()
- @classmethod
- def tearDownClass(cls):
- under_test.MULTIVERSION_CLASS_SUFFIX = cls.original_constants["MULTIVERSION_CLASS_SUFFIX"]
-
def test_build_sharded_cluster_simple(self):
parser.set_run_options()
fixture_config = {"mongod_options": {SET_PARAMS: {"enableTestCommands": 1}}}
diff --git a/buildscripts/tests/resmokelib/testing/fixtures/test_fixturelib.py b/buildscripts/tests/resmokelib/testing/fixtures/test_fixturelib.py
index b492e74ce2ee8..6b9495c5207b5 100644
--- a/buildscripts/tests/resmokelib/testing/fixtures/test_fixturelib.py
+++ b/buildscripts/tests/resmokelib/testing/fixtures/test_fixturelib.py
@@ -1,4 +1,4 @@
-"""Unittest for the resmokelib.testing.fixturelib.utils module"""
+"""Unittest for the resmokelib.testing.fixturelib.utils module."""
import copy
import unittest
diff --git a/buildscripts/tests/resmokelib/testing/test_symbolizer_service.py b/buildscripts/tests/resmokelib/testing/test_symbolizer_service.py
index ea8ac1003ea86..28519e56391e1 100644
--- a/buildscripts/tests/resmokelib/testing/test_symbolizer_service.py
+++ b/buildscripts/tests/resmokelib/testing/test_symbolizer_service.py
@@ -15,6 +15,7 @@ def mock_resmoke_symbolizer_config():
config_mock.client_id = "client_id"
config_mock.client_secret = "client_secret"
config_mock.is_windows.return_value = False
+ config_mock.is_macos.return_value = False
return config_mock
@@ -85,6 +86,11 @@ def test_should_not_symbolize_if_on_windows(self):
ret = self.resmoke_symbolizer.should_symbolize(MagicMock())
self.assertFalse(ret)
+ def test_should_not_symbolize_if_on_macos(self):
+ self.config_mock.is_macos.return_value = True
+ ret = self.resmoke_symbolizer.should_symbolize(MagicMock())
+ self.assertFalse(ret)
+
def test_should_symbolize_return_true(self):
ret = self.resmoke_symbolizer.should_symbolize(MagicMock())
self.assertTrue(ret)
diff --git a/buildscripts/tests/resmokelib/utils/test_archival.py b/buildscripts/tests/resmokelib/utils/test_archival.py
index 96820b9bc3613..e6bee0b473a2f 100644
--- a/buildscripts/tests/resmokelib/utils/test_archival.py
+++ b/buildscripts/tests/resmokelib/utils/test_archival.py
@@ -1,4 +1,4 @@
-""" Unit tests for archival. """
+"""Unit tests for archival."""
import logging
import os
@@ -15,14 +15,14 @@
def create_random_file(file_name, num_chars_mb):
- """ Creates file with random characters, which will have minimal compression. """
+ """Creates file with random characters, which will have minimal compression."""
with open(file_name, "wb") as fileh:
for _ in range(num_chars_mb * 1024 * 1024):
fileh.write(chr(random.randint(0, 255)))
class MockS3Client(object):
- """ Class to mock the S3 client. """
+ """Class to mock the S3 client."""
def __init__(self, logger):
self.logger = logger
diff --git a/buildscripts/tests/resmokelib/utils/test_scheduler.py b/buildscripts/tests/resmokelib/utils/test_scheduler.py
index 48ffb77117af4..052a5e758064e 100644
--- a/buildscripts/tests/resmokelib/utils/test_scheduler.py
+++ b/buildscripts/tests/resmokelib/utils/test_scheduler.py
@@ -12,6 +12,7 @@ def noop():
class TestScheduler(unittest.TestCase):
"""Unit tests for the Scheduler class."""
+
scheduler = _scheduler.Scheduler
def setUp(self):
diff --git a/buildscripts/tests/test_burn_in_tests.py b/buildscripts/tests/test_burn_in_tests.py
index 5c358b5095391..7e10973ea4bcb 100644
--- a/buildscripts/tests/test_burn_in_tests.py
+++ b/buildscripts/tests/test_burn_in_tests.py
@@ -16,6 +16,7 @@
import buildscripts.burn_in_tests as under_test
from buildscripts.ciconfig.evergreen import parse_evergreen_file, VariantTask
import buildscripts.resmokelib.parser as _parser
+
_parser.set_run_options()
# pylint: disable=protected-access
diff --git a/buildscripts/tests/test_errorcodes.py b/buildscripts/tests/test_errorcodes.py
index 17c102454b5da..5f640b73d98ac 100644
--- a/buildscripts/tests/test_errorcodes.py
+++ b/buildscripts/tests/test_errorcodes.py
@@ -48,9 +48,13 @@ def test_generate_next_code(self):
self.assertEqual(22, next(generator))
def test_generate_next_server_code(self):
- """ This call to `read_error_codes` technically has no bearing on `get_next_code` when a
+ """
+ Test `generate_next_server_code`.
+
+ This call to `read_error_codes` technically has no bearing on `get_next_code` when a
`server_ticket` is passed in. But it maybe makes sense for the test to do so in case a
- future patch changes that relationship."""
+ future patch changes that relationship.
+ """
_, _, seen = errorcodes.read_error_codes(TESTDATA_DIR + 'generate_next_server_code/')
print("Seen: " + str(seen))
generator = errorcodes.get_next_code(seen, server_ticket=12301)
diff --git a/buildscripts/tests/test_evergreen_activate_gen_tasks.py b/buildscripts/tests/test_evergreen_activate_gen_tasks.py
index 1632fab75eaeb..f1c85a9f2221d 100644
--- a/buildscripts/tests/test_evergreen_activate_gen_tasks.py
+++ b/buildscripts/tests/test_evergreen_activate_gen_tasks.py
@@ -17,17 +17,39 @@ def build_mock_task_list(num_tasks):
return [build_mock_task(f"task_{i}", f"id_{i}") for i in range(num_tasks)]
-def build_mock_evg_api(mock_tasks_list, variant_map_side_effects=None):
+class MockVariantData():
+ """An object to help create a mock evg api."""
+
+ def __init__(self, build_id, variant_name, task_list):
+ self.build_id = build_id
+ self.variant_name = variant_name
+ self.task_list = task_list
+
+
+def build_mock_evg_api(variant_data_list):
class VersionPatchedSpec(Version):
"""A patched `Version` with instance properties included for magic mock spec."""
+
build_variants_map = MagicMock()
mock_version = MagicMock(spec_set=VersionPatchedSpec)
- mock_version.build_variants_map.side_effect = variant_map_side_effects
+ mock_version.build_variants_map = {
+ variant_data.variant_name: variant_data.build_id
+ for variant_data in variant_data_list
+ }
mock_evg_api = MagicMock(spec_set=EvergreenApi)
mock_evg_api.version_by_id.return_value = mock_version
- mock_evg_api.tasks_by_build.side_effect = mock_tasks_list
+
+ build_id_mapping = {
+ variant_data.build_id: variant_data.task_list
+ for variant_data in variant_data_list
+ }
+
+ def tasks_by_build_side_effect(build_id):
+ return build_id_mapping[build_id]
+
+ mock_evg_api.tasks_by_build.side_effect = tasks_by_build_side_effect
return mock_evg_api
@@ -39,7 +61,8 @@ def test_task_with_display_name_is_activated(self):
"task_name": "task_3_gen",
})
mock_task_list = build_mock_task_list(5)
- mock_evg_api = build_mock_evg_api([mock_task_list])
+ mock_evg_api = build_mock_evg_api(
+ [MockVariantData("build_id", "non-burn-in-bv", mock_task_list)])
under_test.activate_task(expansions, mock_evg_api)
@@ -52,25 +75,27 @@ def test_task_with_no_matching_name(self):
"task_name": "not_an_existing_task",
})
mock_task_list = build_mock_task_list(5)
- mock_evg_api = build_mock_evg_api([mock_task_list])
+ mock_evg_api = build_mock_evg_api(
+ [MockVariantData("build_id", "non-burn-in-bv", mock_task_list)])
under_test.activate_task(expansions, mock_evg_api)
mock_evg_api.configure_task.assert_not_called()
def test_burn_in_tags_tasks_are_activated(self):
- expansions = under_test.EvgExpansions(
- **{
- "build_id": "build_id",
- "version_id": "version_id",
- "task_name": "burn_in_tags_gen",
- "burn_in_tag_buildvariants": "build_variant_2 build_variant_3",
- })
+ expansions = under_test.EvgExpansions(**{
+ "build_id": "build_id",
+ "version_id": "version_id",
+ "task_name": "burn_in_tags_gen",
+ })
mock_task_list_2 = build_mock_task_list(5)
mock_task_list_2.append(build_mock_task("burn_in_tests", "burn_in_tests_id_2"))
mock_task_list_3 = build_mock_task_list(5)
mock_task_list_3.append(build_mock_task("burn_in_tests", "burn_in_tests_id_3"))
- mock_evg_api = build_mock_evg_api([mock_task_list_2, mock_task_list_3])
+ mock_evg_api = build_mock_evg_api([
+ MockVariantData("1", "variant1-generated-by-burn-in-tags", mock_task_list_2),
+ MockVariantData("2", "variant2-generated-by-burn-in-tags", mock_task_list_3)
+ ])
under_test.activate_task(expansions, mock_evg_api)
@@ -80,25 +105,6 @@ def test_burn_in_tags_tasks_are_activated(self):
])
def test_burn_in_tags_task_skips_non_existing_build_variant(self):
- expansions = under_test.EvgExpansions(
- **{
- "build_id": "build_id",
- "version_id": "version_id",
- "task_name": "burn_in_tags_gen",
- "burn_in_tag_buildvariants": "not_an_existing_build_variant build_variant_2",
- })
- mock_task_list_1 = build_mock_task_list(5)
- mock_task_list_1.append(build_mock_task("burn_in_tags_gen", "burn_in_tags_gen_id_1"))
- mock_task_list_2 = build_mock_task_list(5)
- mock_task_list_2.append(build_mock_task("burn_in_tests", "burn_in_tests_id_2"))
- mock_evg_api = build_mock_evg_api([mock_task_list_1, mock_task_list_2],
- [None, KeyError, None])
-
- under_test.activate_task(expansions, mock_evg_api)
-
- mock_evg_api.configure_task.assert_called_once_with("burn_in_tests_id_2", activated=True)
-
- def test_burn_in_tags_task_with_missing_burn_in_tag_buildvariants_expansion(self):
expansions = under_test.EvgExpansions(**{
"build_id": "build_id",
"version_id": "version_id",
@@ -106,8 +112,13 @@ def test_burn_in_tags_task_with_missing_burn_in_tag_buildvariants_expansion(self
})
mock_task_list_1 = build_mock_task_list(5)
mock_task_list_1.append(build_mock_task("burn_in_tags_gen", "burn_in_tags_gen_id_1"))
- mock_evg_api = build_mock_evg_api(mock_task_list_1)
+ mock_task_list_2 = build_mock_task_list(5)
+ mock_task_list_2.append(build_mock_task("burn_in_tests", "burn_in_tests_id_2"))
+ mock_evg_api = build_mock_evg_api([
+ MockVariantData("1", "variant1-non-burn-in", mock_task_list_1),
+ MockVariantData("2", "variant2-generated-by-burn-in-tags", mock_task_list_2)
+ ])
under_test.activate_task(expansions, mock_evg_api)
- mock_evg_api.configure_task.assert_not_called()
+ mock_evg_api.configure_task.assert_called_once_with("burn_in_tests_id_2", activated=True)
diff --git a/buildscripts/tests/test_evergreen_task_timeout.py b/buildscripts/tests/test_evergreen_task_timeout.py
index 4e681e47f529e..72c6c5f8cc1a5 100644
--- a/buildscripts/tests/test_evergreen_task_timeout.py
+++ b/buildscripts/tests/test_evergreen_task_timeout.py
@@ -110,7 +110,7 @@ def test_looking_up_an_idle_override_should_work(self):
class TestDetermineExecTimeout(unittest.TestCase):
def _validate_exec_timeout(self, idle_timeout, exec_timeout, historic_timeout, evg_alias,
- build_variant, timeout_override, expected_timeout):
+ build_variant, display_name, timeout_override, expected_timeout):
task_name = "task_name"
variant = build_variant
overrides = {}
@@ -121,8 +121,9 @@ def _validate_exec_timeout(self, idle_timeout, exec_timeout, historic_timeout, e
orchestrator = under_test.TaskTimeoutOrchestrator(
timeout_service=MagicMock(spec_set=TimeoutService),
- timeout_overrides=mock_timeout_overrides,
- evg_project_config=MagicMock(spec_set=EvergreenProjectConfig))
+ timeout_overrides=mock_timeout_overrides, evg_project_config=MagicMock(
+ spec_set=EvergreenProjectConfig,
+ get_variant=MagicMock(return_value=MagicMock(display_name=display_name))))
actual_timeout = orchestrator.determine_exec_timeout(
task_name, variant, idle_timeout, exec_timeout, evg_alias, historic_timeout)
@@ -132,78 +133,83 @@ def _validate_exec_timeout(self, idle_timeout, exec_timeout, historic_timeout, e
def test_timeout_used_if_specified(self):
self._validate_exec_timeout(idle_timeout=None, exec_timeout=timedelta(seconds=42),
historic_timeout=None, evg_alias=None, build_variant="variant",
- timeout_override=None, expected_timeout=timedelta(seconds=42))
+ display_name="not required", timeout_override=None,
+ expected_timeout=timedelta(seconds=42))
def test_default_is_returned_with_no_timeout(self):
self._validate_exec_timeout(idle_timeout=None, exec_timeout=None, historic_timeout=None,
- evg_alias=None, build_variant="variant", timeout_override=None,
+ evg_alias=None, build_variant="variant",
+ display_name="not required", timeout_override=None,
expected_timeout=under_test.DEFAULT_NON_REQUIRED_BUILD_TIMEOUT)
def test_default_is_returned_with_timeout_at_zero(self):
self._validate_exec_timeout(idle_timeout=None, exec_timeout=timedelta(seconds=0),
historic_timeout=None, evg_alias=None, build_variant="variant",
- timeout_override=None,
+ display_name="not required", timeout_override=None,
expected_timeout=under_test.DEFAULT_NON_REQUIRED_BUILD_TIMEOUT)
def test_default_required_returned_on_required_variants(self):
self._validate_exec_timeout(idle_timeout=None, exec_timeout=None, historic_timeout=None,
evg_alias=None, build_variant="variant-required",
- timeout_override=None,
+ display_name="! required", timeout_override=None,
expected_timeout=under_test.DEFAULT_REQUIRED_BUILD_TIMEOUT)
def test_override_on_required_should_use_override(self):
self._validate_exec_timeout(idle_timeout=None, exec_timeout=None, historic_timeout=None,
evg_alias=None, build_variant="variant-required",
- timeout_override=3 * 60,
+ display_name="! required", timeout_override=3 * 60,
expected_timeout=timedelta(minutes=3 * 60))
def test_task_specific_timeout(self):
self._validate_exec_timeout(idle_timeout=None, exec_timeout=timedelta(seconds=0),
historic_timeout=None, evg_alias=None, build_variant="variant",
- timeout_override=60, expected_timeout=timedelta(minutes=60))
+ display_name="not required", timeout_override=60,
+ expected_timeout=timedelta(minutes=60))
def test_commit_queue_items_use_commit_queue_timeout(self):
self._validate_exec_timeout(idle_timeout=None, exec_timeout=None, historic_timeout=None,
evg_alias=under_test.COMMIT_QUEUE_ALIAS,
- build_variant="variant", timeout_override=None,
+ build_variant="variant", display_name="not required",
+ timeout_override=None,
expected_timeout=under_test.COMMIT_QUEUE_TIMEOUT)
def test_use_idle_timeout_if_greater_than_exec_timeout(self):
self._validate_exec_timeout(
idle_timeout=timedelta(hours=2), exec_timeout=timedelta(minutes=10),
- historic_timeout=None, evg_alias=None, build_variant="variant", timeout_override=None,
- expected_timeout=timedelta(hours=2))
+ historic_timeout=None, evg_alias=None, build_variant="variant",
+ display_name="not required", timeout_override=None, expected_timeout=timedelta(hours=2))
def test_historic_timeout_should_be_used_if_given(self):
self._validate_exec_timeout(idle_timeout=None, exec_timeout=None,
historic_timeout=timedelta(minutes=15), evg_alias=None,
- build_variant="variant", timeout_override=None,
- expected_timeout=timedelta(minutes=15))
+ build_variant="variant", display_name="not required",
+ timeout_override=None, expected_timeout=timedelta(minutes=15))
def test_commit_queue_should_override_historic_timeouts(self):
self._validate_exec_timeout(
idle_timeout=None, exec_timeout=None, historic_timeout=timedelta(minutes=15),
- evg_alias=under_test.COMMIT_QUEUE_ALIAS, build_variant="variant", timeout_override=None,
+ evg_alias=under_test.COMMIT_QUEUE_ALIAS, build_variant="variant",
+ display_name="not required", timeout_override=None,
expected_timeout=under_test.COMMIT_QUEUE_TIMEOUT)
def test_override_should_override_historic_timeouts(self):
self._validate_exec_timeout(idle_timeout=None, exec_timeout=None,
historic_timeout=timedelta(minutes=15), evg_alias=None,
- build_variant="variant", timeout_override=33,
- expected_timeout=timedelta(minutes=33))
+ build_variant="variant", display_name="not required",
+ timeout_override=33, expected_timeout=timedelta(minutes=33))
def test_historic_timeout_should_not_be_overridden_by_required_bv(self):
self._validate_exec_timeout(idle_timeout=None, exec_timeout=None,
historic_timeout=timedelta(minutes=15), evg_alias=None,
- build_variant="variant-required", timeout_override=None,
- expected_timeout=timedelta(minutes=15))
+ build_variant="variant-required", display_name="! required",
+ timeout_override=None, expected_timeout=timedelta(minutes=15))
def test_historic_timeout_should_not_be_increase_required_bv_timeout(self):
self._validate_exec_timeout(
idle_timeout=None, exec_timeout=None,
historic_timeout=under_test.DEFAULT_REQUIRED_BUILD_TIMEOUT + timedelta(minutes=30),
- evg_alias=None, build_variant="variant-required", timeout_override=None,
- expected_timeout=under_test.DEFAULT_REQUIRED_BUILD_TIMEOUT)
+ evg_alias=None, build_variant="variant-required", display_name="! required",
+ timeout_override=None, expected_timeout=under_test.DEFAULT_REQUIRED_BUILD_TIMEOUT)
class TestDetermineIdleTimeout(unittest.TestCase):
diff --git a/buildscripts/tests/test_packager.py b/buildscripts/tests/test_packager.py
index 022c14c6c78c3..975a85dee3d80 100644
--- a/buildscripts/tests/test_packager.py
+++ b/buildscripts/tests/test_packager.py
@@ -6,14 +6,15 @@
class TestPackager(TestCase):
- """Test packager.py"""
+ """Test packager.py."""
def test_is_nightly(self) -> None:
"""Test is_nightly."""
@dataclass
class Case:
- """Test case data"""
+ """Test case data."""
+
name: str
version: str
want: bool
diff --git a/buildscripts/tests/tooling_metrics_e2e/test_ninja_tooling_metrics.py b/buildscripts/tests/tooling_metrics_e2e/test_ninja_tooling_metrics.py
index 2033b2461fab1..5a8c05c9128ac 100644
--- a/buildscripts/tests/tooling_metrics_e2e/test_ninja_tooling_metrics.py
+++ b/buildscripts/tests/tooling_metrics_e2e/test_ninja_tooling_metrics.py
@@ -5,8 +5,7 @@
import unittest
from unittest.mock import patch
from mock import MagicMock
-from mongo_tooling_metrics import client
-from mongo_tooling_metrics.base_metrics import TopLevelMetrics
+from mongo_tooling_metrics.lib.top_level_metrics import NinjaToolingMetrics
import ninja as under_test
BUILD_DIR = os.path.join(os.getcwd(), 'build')
@@ -34,9 +33,8 @@
"fast.ninja",
"install-platform",
])
-@patch.object(TopLevelMetrics, 'should_collect_metrics', MagicMock(return_value=True))
class TestNinjaAtExitMetricsCollection(unittest.TestCase):
- @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=True))
+ @patch.object(NinjaToolingMetrics, 'should_collect_metrics', MagicMock(return_value=True))
@patch.object(atexit, "register", MagicMock())
def test_at_exit_metrics_collection(self):
with self.assertRaises(SystemExit) as _:
@@ -44,11 +42,10 @@ def test_at_exit_metrics_collection(self):
atexit_functions = [
call for call in atexit.register.call_args_list
- if call[0][0].__name__ == '_verbosity_enforced_save_metrics'
+ if call[0][0].__name__ == '_safe_save_metrics'
]
- generate_metrics = atexit_functions[0][0][1].generate_metrics
kwargs = atexit_functions[0][1]
- metrics = generate_metrics(**kwargs)
+ metrics = NinjaToolingMetrics.generate_metrics(**kwargs)
assert not metrics.is_malformed()
assert len(metrics.build_info.build_artifacts) > 0
@@ -64,10 +61,10 @@ def test_at_exit_metrics_collection(self):
assert metrics.command_info.options['f'] == "fast.ninja"
assert metrics.command_info.options['j'] == "400"
- @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=False))
+ @patch.object(NinjaToolingMetrics, 'should_collect_metrics', MagicMock(return_value=False))
@patch.object(atexit, "register", MagicMock())
def test_no_at_exit_metrics_collection(self):
with self.assertRaises(SystemExit) as _:
under_test.ninja()
atexit_functions = [call[0][0].__name__ for call in atexit.register.call_args_list]
- assert "_verbosity_enforced_save_metrics" not in atexit_functions
+ assert "_safe_save_metrics" not in atexit_functions
diff --git a/buildscripts/tests/tooling_metrics_e2e/test_resmoke_tooling_metrics.py b/buildscripts/tests/tooling_metrics_e2e/test_resmoke_tooling_metrics.py
index 1073e1a76a6f2..be7a34b79e639 100644
--- a/buildscripts/tests/tooling_metrics_e2e/test_resmoke_tooling_metrics.py
+++ b/buildscripts/tests/tooling_metrics_e2e/test_resmoke_tooling_metrics.py
@@ -2,8 +2,8 @@
import unittest
from unittest.mock import patch
from mock import MagicMock
-from mongo_tooling_metrics import client
-from mongo_tooling_metrics.base_metrics import TopLevelMetrics
+from mongo_tooling_metrics.lib.top_level_metrics import ResmokeToolingMetrics
+import mongo_tooling_metrics.lib.utils as metrics_utils
import buildscripts.resmoke as under_test
@@ -16,18 +16,16 @@
@patch("atexit.register")
class TestResmokeAtExitMetricsCollection(unittest.TestCase):
@patch("sys.argv", ['buildscripts/resmoke.py', 'list-suites'])
- @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=True))
- @patch.object(TopLevelMetrics, 'should_collect_metrics', MagicMock(return_value=True))
+ @patch.object(ResmokeToolingMetrics, 'should_collect_metrics', MagicMock(return_value=True))
def test_resmoke_at_exit_metrics_collection(self, mock_atexit_register):
under_test.entrypoint()
atexit_functions = [
call for call in mock_atexit_register.call_args_list
- if call[0][0].__name__ == '_verbosity_enforced_save_metrics'
+ if call[0][0].__name__ == '_safe_save_metrics'
]
- generate_metrics = atexit_functions[0][0][1].generate_metrics
kwargs = atexit_functions[0][1]
- metrics = generate_metrics(**kwargs)
+ metrics = ResmokeToolingMetrics.generate_metrics(**kwargs)
assert not metrics.is_malformed()
assert metrics.command_info.command == ['buildscripts/resmoke.py', 'list-suites']
@@ -35,24 +33,21 @@ def test_resmoke_at_exit_metrics_collection(self, mock_atexit_register):
assert metrics.command_info.positional_args == []
@patch("sys.argv", ['buildscripts/resmoke.py', 'list-suites'])
- @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=True))
- @patch.object(TopLevelMetrics, 'should_collect_metrics', MagicMock(return_value=False))
+ @patch.object(ResmokeToolingMetrics, 'should_collect_metrics', MagicMock(return_value=False))
def test_no_resmoke_at_exit_metrics_collection(self, mock_atexit_register):
under_test.entrypoint()
atexit_functions = [call[0][0].__name__ for call in mock_atexit_register.call_args_list]
- assert "_verbosity_enforced_save_metrics" not in atexit_functions
+ assert "_safe_save_metrics" not in atexit_functions
@patch("sys.argv", ['buildscripts/resmoke.py', 'list-suites'])
- @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=False))
- @patch.object(TopLevelMetrics, 'should_collect_metrics', MagicMock(return_value=True))
+ @patch.object(metrics_utils, '_is_virtual_workstation', MagicMock(return_value=False))
def test_resmoke_no_metric_collection_non_vw(self, mock_atexit_register):
under_test.entrypoint()
atexit_functions = [call[0][0].__name__ for call in mock_atexit_register.call_args_list]
- assert "_verbosity_enforced_save_metrics" not in atexit_functions
+ assert "_safe_save_metrics" not in atexit_functions
@patch("sys.argv", ['buildscripts/resmoke.py', 'run', '--suite', 'buildscripts_test'])
- @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=True))
- @patch.object(TopLevelMetrics, 'should_collect_metrics', MagicMock(return_value=True))
+ @patch.object(ResmokeToolingMetrics, 'should_collect_metrics', MagicMock(return_value=True))
@patch("buildscripts.resmokelib.testing.executor.TestSuiteExecutor._run_tests",
side_effect=Exception())
def test_resmoke_at_exit_metrics_collection_exc(self, mock_exc_method, mock_atexit_register):
@@ -61,11 +56,10 @@ def test_resmoke_at_exit_metrics_collection_exc(self, mock_exc_method, mock_atex
atexit_functions = [
call for call in mock_atexit_register.call_args_list
- if call[0][0].__name__ == '_verbosity_enforced_save_metrics'
+ if call[0][0].__name__ == '_safe_save_metrics'
]
- generate_metrics = atexit_functions[0][0][1].generate_metrics
kwargs = atexit_functions[0][1]
- metrics = generate_metrics(**kwargs)
+ metrics = ResmokeToolingMetrics.generate_metrics(**kwargs)
assert not metrics.is_malformed()
assert metrics.command_info.command == [
diff --git a/buildscripts/tests/tooling_metrics_e2e/test_scons_tooling_metrics.py b/buildscripts/tests/tooling_metrics_e2e/test_scons_tooling_metrics.py
index 9d103e6e5f5ae..fef7c13f9c9db 100644
--- a/buildscripts/tests/tooling_metrics_e2e/test_scons_tooling_metrics.py
+++ b/buildscripts/tests/tooling_metrics_e2e/test_scons_tooling_metrics.py
@@ -4,8 +4,7 @@
import unittest
from unittest.mock import patch
from mock import MagicMock
-from mongo_tooling_metrics import client
-from mongo_tooling_metrics.base_metrics import TopLevelMetrics
+from mongo_tooling_metrics.lib.top_level_metrics import SConsToolingMetrics
import buildscripts.scons as under_test
BUILD_DIR = os.path.join(os.getcwd(), 'build')
@@ -27,9 +26,8 @@
@patch("sys.argv", ARGS)
-@patch.object(TopLevelMetrics, 'should_collect_metrics', MagicMock(return_value=True))
class TestSconsAtExitMetricsCollection(unittest.TestCase):
- @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=True))
+ @patch.object(SConsToolingMetrics, 'should_collect_metrics', MagicMock(return_value=True))
@patch.object(atexit, "register", MagicMock())
def test_at_exit_metrics_collection(self):
with self.assertRaises(SystemExit) as _:
@@ -37,11 +35,10 @@ def test_at_exit_metrics_collection(self):
atexit_functions = [
call for call in atexit.register.call_args_list
- if call[0][0].__name__ == '_verbosity_enforced_save_metrics'
+ if call[0][0].__name__ == '_safe_save_metrics'
]
- generate_metrics = atexit_functions[0][0][1].generate_metrics
kwargs = atexit_functions[0][1]
- metrics = generate_metrics(**kwargs)
+ metrics = SConsToolingMetrics.generate_metrics(**kwargs)
assert not metrics.is_malformed()
assert len(metrics.build_info.build_artifacts) > 0
@@ -50,15 +47,15 @@ def test_at_exit_metrics_collection(self):
f"VARIANT_DIR={VARIANT_DIR}", "install-platform"
]
- @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=False))
+ @patch.object(SConsToolingMetrics, 'should_collect_metrics', MagicMock(return_value=False))
@patch.object(atexit, "register", MagicMock())
def test_no_at_exit_metrics_collection(self):
with self.assertRaises(SystemExit) as _:
under_test.entrypoint()
atexit_functions = [call[0][0].__name__ for call in atexit.register.call_args_list]
- assert "_verbosity_enforced_save_metrics" not in atexit_functions
+ assert "_safe_save_metrics" not in atexit_functions
- @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=True))
+ @patch.object(SConsToolingMetrics, 'should_collect_metrics', MagicMock(return_value=True))
@patch("buildscripts.moduleconfig.get_module_sconscripts", MagicMock(side_effect=Exception()))
@patch.object(atexit, "register", MagicMock())
def test_at_exit_metrics_collection_exc(self):
@@ -67,11 +64,10 @@ def test_at_exit_metrics_collection_exc(self):
atexit_functions = [
call for call in atexit.register.call_args_list
- if call[0][0].__name__ == '_verbosity_enforced_save_metrics'
+ if call[0][0].__name__ == '_safe_save_metrics'
]
- generate_metrics = atexit_functions[0][0][1].generate_metrics
kwargs = atexit_functions[0][1]
- metrics = generate_metrics(**kwargs)
+ metrics = SConsToolingMetrics.generate_metrics(**kwargs)
assert not metrics.is_malformed()
assert metrics.command_info.command == ARGS
diff --git a/buildscripts/tests/util/test_teststats.py b/buildscripts/tests/util/test_teststats.py
index 55b5da308bea6..191e4b0b0208b 100644
--- a/buildscripts/tests/util/test_teststats.py
+++ b/buildscripts/tests/util/test_teststats.py
@@ -155,12 +155,14 @@ def test_get_stats_from_s3_returns_data(self, mock_get):
"num_pass": 74,
"num_fail": 0,
"avg_duration_pass": 23.16216216216216,
+ "max_duration_pass": 27.123,
},
{
"test_name": "shell_advance_cluster_time:ValidateCollections",
"num_pass": 74,
"num_fail": 0,
"avg_duration_pass": 1.662162162162162,
+ "max_duration_pass": 100.0987,
},
]
mock_get.return_value = mock_response
@@ -173,12 +175,14 @@ def test_get_stats_from_s3_returns_data(self, mock_get):
num_pass=74,
num_fail=0,
avg_duration_pass=23.16216216216216,
+ max_duration_pass=27.123,
),
under_test.HistoricalTestInformation(
test_name="shell_advance_cluster_time:ValidateCollections",
num_pass=74,
num_fail=0,
avg_duration_pass=1.662162162162162,
+ max_duration_pass=100.0987,
),
])
diff --git a/buildscripts/util/teststats.py b/buildscripts/util/teststats.py
index c0934f2582f32..11e2e605b9cba 100644
--- a/buildscripts/util/teststats.py
+++ b/buildscripts/util/teststats.py
@@ -21,12 +21,14 @@ class HistoricalTestInformation(NamedTuple):
avg_duration_pass: Average of runtime of test that passed.
num_pass: Number of times the test has passed.
num_fail: Number of times the test has failed.
+ max_duration_pass: Maximum runtime of the test when it passed.
"""
test_name: str
num_pass: int
num_fail: int
avg_duration_pass: float
+ max_duration_pass: Optional[float] = None
class TestRuntime(NamedTuple):
diff --git a/distsrc/LICENSE.OpenSSL b/distsrc/LICENSE.OpenSSL
index 84300a4257671..b1ac5b7c7cf2d 100644
--- a/distsrc/LICENSE.OpenSSL
+++ b/distsrc/LICENSE.OpenSSL
@@ -113,4 +113,4 @@ Original SSLeay License
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
- */
\ No newline at end of file
+ */
diff --git a/distsrc/THIRD-PARTY-NOTICES b/distsrc/THIRD-PARTY-NOTICES
index 5f47c9cb329f9..8c46b92676fce 100644
--- a/distsrc/THIRD-PARTY-NOTICES
+++ b/distsrc/THIRD-PARTY-NOTICES
@@ -1495,32 +1495,6 @@ all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
-SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
-FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
-ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
-
- 24) License notice for MPark.Variant
--------------------------------------
-Boost Software License - Version 1.0 - August 17th, 2003
-
-Permission is hereby granted, free of charge, to any person or organization
-obtaining a copy of the software and accompanying documentation covered by
-this license (the "Software") to use, reproduce, display, distribute,
-execute, and transmit the Software, and to prepare derivative works of the
-Software, and to permit third-parties to whom the Software is furnished to
-do so, all subject to the following:
-
-The copyright notices in the Software and this entire statement, including
-the above license grant, this restriction and the following disclaimer,
-must be included in all copies of the Software, in whole or in part, and
-all derivative works of the Software, unless such copies or derivative
-works are solely in the form of machine-executable object code generated by
-a source language processor.
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
@@ -2441,4 +2415,4 @@ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-END
\ No newline at end of file
+END
diff --git a/docs/building.md b/docs/building.md
index 63fbde131593a..9cc95c2d77bc9 100644
--- a/docs/building.md
+++ b/docs/building.md
@@ -7,7 +7,7 @@ way to get started, rather than building from source.
To build MongoDB, you will need:
-* A modern C++ compiler capable of compiling C++17. One of the following is required:
+* A modern C++ compiler capable of compiling C++20. One of the following is required:
* GCC 11.3 or newer
* Clang 12.0 (or Apple XCode 13.0 Clang) or newer
* Visual Studio 2022 version 17.0 or newer (See Windows section below for details)
diff --git a/docs/command_dispatch.md b/docs/command_dispatch.md
index eb8ff324f6be1..4b2391afbd241 100644
--- a/docs/command_dispatch.md
+++ b/docs/command_dispatch.md
@@ -82,4 +82,4 @@ For details on transport internals, including ingress networking, see [this docu
[transaction_router_h]: ../src/mongo/s/transaction_router.h
[commands_h]: ../src/mongo/db/commands.h
[template_method_pattern]: https://en.wikipedia.org/wiki/Template_method_pattern
-[transport_internals]: ../src/mongo/transport/README.md
\ No newline at end of file
+[transport_internals]: ../src/mongo/transport/README.md
diff --git a/docs/egress_networking.md b/docs/egress_networking.md
index cd5d4c098f41c..65108b479f7b6 100644
--- a/docs/egress_networking.md
+++ b/docs/egress_networking.md
@@ -38,4 +38,4 @@ document][transport_internals].
[network_interface_h]: ../src/mongo/executor/network_interface.h
[dbclient_rs_h]: ../src/mongo/client/dbclient_rs.h
[automatic_failover]: https://docs.mongodb.com/manual/replication/#automatic-failover
-[transport_internals]: ../src/mongo/transport/README.md
\ No newline at end of file
+[transport_internals]: ../src/mongo/transport/README.md
diff --git a/docs/evergreen-testing/burn_in_tags.md b/docs/evergreen-testing/burn_in_tags.md
index 579f432efc43f..9600645a1fcba 100644
--- a/docs/evergreen-testing/burn_in_tags.md
+++ b/docs/evergreen-testing/burn_in_tags.md
@@ -17,3 +17,11 @@ will be generated, each of which will have a `burn_in_tests` task generated by t
[generated task](task_generation.md), may have multiple sub-tasks which run the test suites only for the
new or changed javascript tests (note that a javascript test can be included in multiple test suites). Each of
those tests will be run 2 times minimum, and 1000 times maximum or for 10 minutes, whichever is reached first.
+
+## ! Run All Affected JStests
+The `! Run All Affected JStests` variant has a single `burn_in_tags_gen` task. This task will create &
+activate [`burn_in_tests`](burn_in_tests.md) tasks for all required and suggested
+variants. The end result is that any jstests that have been modified in the patch will
+run on all required and suggested variants. This should give users a clear signal on
+whether their jstests changes have introduced a failure that could potentially lead
+to a revert or follow-up bug fix commit.
diff --git a/docs/evergreen-testing/multiversion.md b/docs/evergreen-testing/multiversion.md
index 3498b696aa6ea..4bda73b7081d0 100644
--- a/docs/evergreen-testing/multiversion.md
+++ b/docs/evergreen-testing/multiversion.md
@@ -51,9 +51,9 @@ versions. In such context we refer to `last-lts` and `last-continuous` versions
version and to `latest` as a `new` version.
A `new` version is compiled in the same way as for non-multiversion tasks. The `old` versions of
-compiled binaries are downloaded from the old branch projects with the following script:
-[evergreen/multiversion_setup.sh](https://github.com/mongodb/mongo/blob/e91cda950e50aa4c707efbdd0be208481493fc96/evergreen/multiversion_setup.sh).
-The script searches for the latest available compiled binaries on the old branch projects in
+compiled binaries are downloaded from the old branch projects with
+[`db-contrib-tool`](https://github.com/10gen/db-contrib-tool).
+`db-contrib-tool` searches for the latest available compiled binaries on the old branch projects in
Evergreen.
@@ -64,17 +64,19 @@ Multiversion suites can be explicit and implicit.
* Explicit - JS tests are aware of the binary versions they are running,
e.g. [multiversion.yml](https://github.com/mongodb/mongo/blob/e91cda950e50aa4c707efbdd0be208481493fc96/buildscripts/resmokeconfig/suites/multiversion.yml).
The version of binaries is explicitly set in JS tests,
-e.g. [jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js](https://github.com/mongodb/mongo/blob/e91cda950e50aa4c707efbdd0be208481493fc96/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js#L33-L42):
+e.g. [jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js](https://github.com/mongodb/mongo/blob/397c8da541940b3fbe6257243f97a342fe7e0d3b/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js#L33-L44):
```js
const versions = [
- {binVersion: '4.0', featureCompatibilityVersion: '4.0', testCollection: 'four_zero'},
- {binVersion: '4.2', featureCompatibilityVersion: '4.2', testCollection: 'four_two'},
{binVersion: '4.4', featureCompatibilityVersion: '4.4', testCollection: 'four_four'},
{binVersion: '5.0', featureCompatibilityVersion: '5.0', testCollection: 'five_zero'},
{binVersion: '6.0', featureCompatibilityVersion: '6.0', testCollection: 'six_zero'},
- {binVersion: 'last-lts', testCollection: 'last_lts'},
- {binVersion: 'last-continuous', testCollection: 'last_continuous'},
+ {binVersion: 'last-lts', featureCompatibilityVersion: lastLTSFCV, testCollection: 'last_lts'},
+ {
+ binVersion: 'last-continuous',
+ featureCompatibilityVersion: lastContinuousFCV,
+ testCollection: 'last_continuous'
+ },
{binVersion: 'latest', featureCompatibilityVersion: latestFCV, testCollection: 'latest'},
];
```
@@ -151,7 +153,7 @@ of shell fixture configuration override:
In implicit multiversion suites the same set of tests may run in similar suites that are using
various mixed version combinations. Those version combinations depend on the type of resmoke
-fixture the suite is running with:
+fixture the suite is running with. These are the recommended version combinations to test against based on the suite fixtures:
* Replica set fixture combinations:
* `last-lts new-new-old` (i.e. suite runs the replica set fixture that spins up the `latest` and
@@ -162,6 +164,13 @@ the `last-lts` versions in a 3-node replica set where the 1st node is the `lates
* `last-continuous new-new-old`
* `last-continuous new-old-new`
* `last-continuous old-new-new`
+ * Ex: [change_streams](https://github.com/10gen/mongo/blob/88d59bfe9d5ee2c9938ae251f7a77a8bf1250a6b/buildscripts/resmokeconfig/suites/change_streams.yml) uses a [`ReplicaSetFixture`](https://github.com/10gen/mongo/blob/88d59bfe9d5ee2c9938ae251f7a77a8bf1250a6b/buildscripts/resmokeconfig/suites/change_streams.yml#L50) so the corresponding multiversion suites are
+ * [`change_streams_last_continuous_new_new_old`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_last_continuous_new_new_old.yml)
+ * [`change_streams_last_continuous_new_old_new`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_last_continuous_new_old_new.yml)
+ * [`change_streams_last_continuous_old_new_new`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_last_continuous_old_new_new.yml)
+ * [`change_streams_last_lts_new_new_old`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_last_lts_new_new_old.yml)
+ * [`change_streams_last_lts_new_old_new`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_last_lts_new_old_new.yml)
+ * [`change_streams_last_lts_old_new_new`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_last_lts_old_new_new.yml)
* Sharded cluster fixture combinations:
* `last-lts new-old-old-new` (i.e. suite runs the sharded cluster fixture that spins up the
@@ -169,14 +178,21 @@ the `last-lts` versions in a 3-node replica set where the 1st node is the `lates
replica sets per shard where the 1st node of the 1st shard is the `latest`, 2nd node of 1st
shard - `last-lts`, 1st node of 2nd shard - `last-lts`, 2nd node of 2nd shard - `latest`, etc.)
* `last-continuous new-old-old-new`
+ * Ex: [change_streams_downgrade](https://github.com/10gen/mongo/blob/a96b83b2fa7010a5823fefac2469b4a06a697cf1/buildscripts/resmokeconfig/suites/change_streams_downgrade.yml) uses a [`ShardedClusterFixture`](https://github.com/10gen/mongo/blob/a96b83b2fa7010a5823fefac2469b4a06a697cf1/buildscripts/resmokeconfig/suites/change_streams_downgrade.yml#L408) so the corresponding multiversion suites are
+ * [`change_streams_downgrade_last_continuous_new_old_old_new`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_downgrade_last_continuous_new_old_old_new.yml)
+ * [`change_streams_downgrade_last_lts_new_old_old_new`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_downgrade_last_lts_new_old_old_new.yml)
* Shell fixture combinations:
* `last-lts` (i.e. suite runs the shell fixture that spins up `last-lts` as the `old` versions,
etc.)
* `last-continuous`
+ * Ex: [initial_sync_fuzzer](https://github.com/10gen/mongo/blob/908625ffdec050a71aa2ce47c35788739f629c60/buildscripts/resmokeconfig/suites/initial_sync_fuzzer.yml) uses a Shell Fixture, so the corresponding multiversion suites are
+ * [`initial_sync_fuzzer_last_lts`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/initial_sync_fuzzer_last_lts.yml)
+ * [`initial_sync_fuzzer_last_continuous`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/initial_sync_fuzzer_last_continuous.yml)
-If `last-lts` and `last-continuous` versions happen to be the same, we skip `last-continuous` and
-run multiversion suites with only `last-lts` combinations in Evergreen.
+
+If `last-lts` and `last-continuous` versions happen to be the same, or last-continuous is EOL, we skip `last-continuous`
+and run multiversion suites with only `last-lts` combinations in Evergreen.
## Working with multiversion tasks in Evergreen
@@ -212,6 +228,13 @@ below the specified FCV version, e.g. when the `latest` version is `6.2`, `last-
tasks that run `latest` with `last-lts`, but will run in multiversion tasks that run `lastest` with
`last-continuous`.
+In addition to disabling multiversion tests based on FCV, there is no need to run in-development `featureFlagXYZ` tests
+(featureFlags that have `default: false`) because these tests will most likely fail on older versions that
+have not implemented this feature. For multiversion tasks, we pass the `--runNoFeatureFlagTests` flag to avoid these
+failures on `all feature flag` variants.
+
+For more info on FCV, take a look at [FCV_AND_FEATURE_FLAG_README.md](https://github.com/10gen/mongo/blob/master/src/mongo/db/repl/FCV_AND_FEATURE_FLAG_README.md).
+
Another common case could be that the changes on master branch are breaking multiversion tests,
but with those changes backported to the older branches the multiversion tests should work.
In order to temporarily disable the test from running in multiversion it can be added to the
diff --git a/docs/exception_architecture.md b/docs/exception_architecture.md
index 3e5203f60befb..05c6eb752a1d7 100644
--- a/docs/exception_architecture.md
+++ b/docs/exception_architecture.md
@@ -19,7 +19,7 @@ __Note__: Calling C function `assert` is not allowed. Use one of the above inste
The following types of assertions are deprecated:
-- `verify`
+- `MONGO_verify`
- Checks per-operation invariants. A synonym for massert but doesn't require an error code.
Process fatal in debug mode. Do not use for new code; use invariant or fassert instead.
- `dassert`
@@ -39,7 +39,7 @@ Some assertions will increment an assertion counter. The `serverStatus` command
"asserts" section including these counters:
- `regular`
- - Incremented by `verify`.
+ - Incremented by `MONGO_verify`.
- `warning`
- Always 0. Nothing increments this anymore.
- `msg`
@@ -55,7 +55,7 @@ Some assertions will increment an assertion counter. The `serverStatus` command
## Considerations
When per-operation invariant checks fail, the current operation fails, but the process and
-connection persist. This means that `massert`, `uassert`, `iassert` and `verify` only
+connection persist. This means that `massert`, `uassert`, `iassert` and `MONGO_verify` only
terminate the current operation, not the whole process. Be careful not to corrupt process state by
mistakenly using these assertions midway through mutating process state.
diff --git a/docs/images/shard_merge_diagram.png b/docs/images/shard_merge_diagram.png
new file mode 100644
index 0000000000000..0578f7a39499d
Binary files /dev/null and b/docs/images/shard_merge_diagram.png differ
diff --git a/docs/libfuzzer.md b/docs/libfuzzer.md
new file mode 100644
index 0000000000000..3ab836e641d18
--- /dev/null
+++ b/docs/libfuzzer.md
@@ -0,0 +1,87 @@
+---
+title: LibFuzzer
+---
+
+LibFuzzer is a tool for performing coverage guided fuzzing of C/C++
+code. LibFuzzer will try to trigger AUBSAN failures in a function you
+provide, by repeatedly calling it with a carefully crafted byte array as
+input. Each input will be assigned a "score". Byte arrays which exercise
+new or more regions of code will score better. LibFuzzer will merge and
+mutate high scoring inputs in order to gradually cover more and more
+possible behavior.
+
+# When to use LibFuzzer
+
+LibFuzzer is great for testing functions which accept a opaque blob of
+untrusted user-provided data.
+
+# How to use LibFuzzer
+
+LibFuzzer implements `int main`, and expects to be linked with an object
+file which provides the function under test. You will achieve this by
+writing a cpp file which implements
+
+``` cpp
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
+ // Your code here
+}
+```
+
+`LLVMFuzzerTestOneInput` will be called repeatedly, with fuzzer
+generated bytes in `Data`. `Size` will always truthfully tell your
+implementation how many bytes are in `Data`. If your function crashes or
+induces an AUBSAN fault, LibFuzzer will consider that to be a finding
+worth reporting.
+
+Keep in mind that your function will often "just" be adapting `Data` to
+whatever format our internal C++ functions requires. However, you have a
+lot of freedom in exactly what you choose to do. Just make sure your
+function crashes or produces an invariant when something interesting
+happens! As just a few ideas:
+
+- You might choose to call multiple implementations of a single
+ operation, and validate that they produce the same output when
+ presented the same input.
+- You could tease out individual bytes from `Data` and provide them as
+ different arguments to the function under test.
+
+Finally, your cpp file will need a SCons target. There is a method which
+defines fuzzer targets, much like how we define unittests. For example:
+
+``` python
+ env.CppLibfuzzerTest(
+ target='op_msg_fuzzer',
+ source=[
+ 'op_msg_fuzzer.cpp',
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
+ 'op_msg_fuzzer_fixture',
+ ],
+)
+```
+
+# Running LibFuzzer
+
+Your test's object file and **all** of its dependencies must be compiled
+with the "fuzzer" sanitizer, plus a set of sanitizers which might
+produce interesting runtime errors like AUBSAN. Evergreen has a build
+variant, whose name will include the string "FUZZER", which will compile
+and run all of the fuzzer tests.
+
+The fuzzers can be built locally, for development and debugging. Check
+our Evergreen configuration for the current SCons arguments.
+
+LibFuzzer binaries will accept a path to a directory containing its
+"corpus". A corpus is a list of examples known to produce interesting
+outputs. LibFuzzer will start producing interesting results more quickly
+if starts off with a set of inputs which it can begin mutating. When its
+done, it will write down any new inputs it discovered into its corpus.
+Re-using a corpus across executions is a good way to make LibFuzzer
+return more results in less time. Our Evergreen tasks will try to
+acquire and re-use a corpus from an earlier commit, if it can.
+
+# References
+
+- [LibFuzzer's official
+ documentation](https://llvm.org/docs/LibFuzzer.html)
diff --git a/docs/linting.md b/docs/linting.md
index a499505442d44..6149808671dfb 100644
--- a/docs/linting.md
+++ b/docs/linting.md
@@ -29,12 +29,12 @@ assertion codes are distinct. You can see the usage by running the following com
Ex: `buildscripts/errorcodes.py`
-### `quickcpplint.py`
-The `buildscripts/quickcpplint.py` script runs a simple MongoDB C++ linter. You can see the usage
-by running the following command: `buildscripts/quickcpplint.py --help`. You can take a look at
-`buildscripts/linter/simplecpplint.py` to better understand the rules for this linter.
+### `quickmongolint.py`
+The `buildscripts/quickmongolint.py` script runs a simple MongoDB C++ linter. You can see the usage
+by running the following command: `buildscripts/quickmongolint.py --help`. You can take a look at
+`buildscripts/linter/mongolint.py` to better understand the rules for this linter.
-Ex: `buildscripts/quickcpplint.py lint`
+Ex: `buildscripts/quickmongolint.py lint`
## Javascript Linters
The `buildscripts/eslint.py` wrapper script runs the `eslint` javascript linter. You can see the
@@ -87,11 +87,11 @@ Here are some examples:
| SCons Target | Linter(s) | Example |
| --- | --- | --- |
-| `lint` | `clang-format` `errorcodes.py` `quickcpplint.py` `eslint` `pylint` `mypy` `pydocstyle` `yapf` | `buildscripts/scons.py lint` |
+| `lint` | `clang-format` `errorcodes.py` `quickmongolint.py` `eslint` `pylint` `mypy` `pydocstyle` `yapf` | `buildscripts/scons.py lint` |
| `lint-fast` | `clang-format` `errorcodes.py` `eslint` `pylint` `mypy` `pydocstyle` `yapf` | `buildscripts/scons.py lint-fast` |
| `lint-clang-format` | `clang-format` | `buildscripts/scons.py lint-clang-format` |
| `lint-errorcodes` | `errorcodes.py` | `buildscripts/scons.py lint-errorcodes` |
-| `lint-lint.py` | `quickcpplint.py` | `buildscripts/scons.py lint-lint.py` |
+| `lint-lint.py` | `quickmongolint.py` | `buildscripts/scons.py lint-lint.py` |
| `lint-eslint` | `eslint` | `buildscripts/scons.py lint-eslint` |
| `lint-pylinters` | `pylint` `mypy` `pydocstyle` `yapf` | `buildscripts/scons.py lint-pylinters` |
| `lint-sconslinters` | `yapf` | `buildscripts/scons.py lint-sconslinters` |
diff --git a/docs/primary_only_service.md b/docs/primary_only_service.md
index 415d9da51006d..269af5671c0a6 100644
--- a/docs/primary_only_service.md
+++ b/docs/primary_only_service.md
@@ -103,4 +103,4 @@ responsible for deleting its state document, such logic needs to be careful as t
document is deleted, the corresponding PrimaryOnlyService is no longer keeping that Instance alive.
If an Instance has any additional logic or internal state to update after deleting its state
document, it must extend its own lifetime by capturing a shared_ptr to itself by calling
-shared_from_this() before deleting its state document.
\ No newline at end of file
+shared_from_this() before deleting its state document.
diff --git a/docs/testing/fsm_concurrency_testing_framework.md b/docs/testing/fsm_concurrency_testing_framework.md
index c8dd4b0d29007..3ff2ce8f5e0f6 100644
--- a/docs/testing/fsm_concurrency_testing_framework.md
+++ b/docs/testing/fsm_concurrency_testing_framework.md
@@ -116,29 +116,29 @@ $config = (function() {
function getRand() {
return Random.randInt(10);
}
-
+
function init(db, collName) {
this.start = getRand() * this.tid;
}
-
+
function scanGT(db, collName) {
db[collName].find({ _id: { $gt: this.start } }).itcount();
}
-
+
function scanLTE(db, collName) {
db[collName].find({ _id: { $lte: this.start } }).itcount();
}
-
-
+
+
return {
init: init,
scanGT: scanGT,
scanLTE: scanLTE
};
})();
-
+
/* ... */
-
+
return {
/* ... */
states: states,
@@ -204,7 +204,7 @@ $config = (function() {
printjson(db.serverCmdLineOpts());
});
}
-
+
function teardown(db, collName, cluster) {
cluster.executeOnMongodNodes(function(db) {
db.adminCommand({ setParameter: 1, internalQueryExecYieldIterations: 128 });
@@ -288,7 +288,7 @@ engine, and work as you would expect. One thing to note is that before calling
either isMMAPv1 or isWiredTiger, first verify isMongod. When special casing
functionality for sharded environments or storage engines, try to special case a
test for the exceptionality while still leaving in place assertions for either
-case.
+case.
#### indexed_noindex.js
@@ -300,12 +300,11 @@ workload you are extending has a function in its data object called
"getIndexSpec" that returns the spec for the index to be removed.
```javascript
-
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
-load('jstests/concurrency/fsm_workloads/workload_with_index.js'); //for $config
-
-$config = extendWorkload($config, indexedNoIndex);
+import {$config as $baseConfig} from 'jstests/concurrency/fsm_workloads/workload_with_index.js';
+
+export const $config = extendWorkload($baseConfig, indexedNoIndex);
```
#### drop_utils.js
@@ -336,7 +335,7 @@ will always correspond to the mongod the mongo shell initially connected to.
Serial is the simplest of all three modes and basically works as explained
above. Setup is run single threaded, data is copied into multiple threads where
the states are executed, and once all the threads have finished a teardown
-function is run and the runner moves onto the next workload.
+function is run and the runner moves onto the next workload.
![fsm_serial_example.png](../images/testing/fsm_serial_example.png)
@@ -393,7 +392,7 @@ runWorkloads functions, the third argument, can contain the following options
Runs all workloads serially. For each workload, `$config.threadCount` threads
are spawned and each thread runs for exactly `$config.iterations` steps starting
at `$config.startState` and transitioning to other states based on the
-transition probabilities defined in $config.transitions.
+transition probabilities defined in $config.transitions.
#### fsm_all_composed.js
@@ -408,7 +407,7 @@ composition of workloads. By default, each workload in each subset is run
between 2 and 3 times. The number of threads used during composition equals the
sum of the `$config.threadCount` values for each workload in each subset.
-#### fsm_all_simultaneous.js
+#### fsm_all_simultaneous.js
options: numSubsets, subsetSize
@@ -533,4 +532,4 @@ and OwnColl assertions.
fewer than 20% of the threads fail while spawning we allow the non-failed
threads to continue with the test. The 20% threshold is somewhat arbitrary;
the goal is to abort if "mostly all" of the threads failed but to tolerate "a
- few" threads failing.
\ No newline at end of file
+ few" threads failing.
diff --git a/etc/backports_required_for_multiversion_tests.yml b/etc/backports_required_for_multiversion_tests.yml
index e0cb1e6b7b49e..58e005f2d58e4 100644
--- a/etc/backports_required_for_multiversion_tests.yml
+++ b/etc/backports_required_for_multiversion_tests.yml
@@ -4,19 +4,20 @@
#
# Usage:
# Add the server ticket number and the path to the test file for the test you intend to denylist
-# under the appropriate suite. Any test in a (ticket, test_file) pair that appears in this file but
+# under the appropriate multiversion branch. Any test in a (ticket, test_file) pair that appears in this file but
# not in the last-lts or last-continuous branch version of this file indicates that a commit has
# not yet been backported to the last-lts or last-continuous branch and will be excluded from the
# multiversion suite corresponding to the root level suite key.
#
-# Example: To prevent 'my_test_file.js' from running in the 'replica_sets_multiversion' suite with the last-continuous binary
-# replica_sets_multiversion:
-# - ticket: SERVER-1000
-# test_file: jstests/core/my_test_file.js
+# Example: To prevent 'my_test_file.js' from running with the last-continuous binary
+# last-continuous:
+# all:
+# - test_file: jstests/core/my_test_file.js
+# ticket: SERVER-1000
#
# The above example will denylist jstests/core/my_test_file.js from the
-# 'replica_sets_multiversion_gen' task until this file has been updated with the same
-# (ticket, test_file) pair on the last-lts branch.
+# last-continuous branch until this file has been updated with the same
+# (ticket, test_file) pair on the last-continuous branch.
#
last-continuous:
all:
@@ -200,6 +201,8 @@ last-continuous:
ticket: SERVER-65022
- test_file: jstests/sharding/database_versioning_all_commands.js
ticket: SERVER-65101
+ - test_file: jstests/sharding/database_versioning_all_commands.js
+ ticket: SERVER-75911
- test_file: jstests/sharding/sessions_collection_auto_healing.js
ticket: SERVER-65188
- test_file: jstests/replsets/sessions_collection_auto_healing.js
@@ -265,7 +268,7 @@ last-continuous:
- test_file: jstests/sharding/collection_uuid_shard_capped_collection.js
ticket: SERVER-67885
- test_file: jstests/sharding/prepare_transaction_then_migrate.js
- ticket: SERVER-68361
+ ticket: SERVER-71219
- test_file: jstests/core/txns/txn_ops_allowed_on_buckets_coll.js
ticket: SERVER-68556
- test_file: jstests/core/txns/no_writes_to_system_collections_in_txn.js
@@ -328,8 +331,6 @@ last-continuous:
ticket: SERVER-72224
- test_file: jstests/sharding/internal_txns/incomplete_transaction_history_during_migration.js
ticket: SERVER-73938
- - test_file: jstests/sharding/shard_keys_with_dollar_sign.js
- ticket: SERVER-74124
- test_file: jstests/core/query/partial_index_logical.js
ticket: SERVER-68434
- test_file: jstests/core/timeseries/timeseries_collmod.js
@@ -344,6 +345,8 @@ last-continuous:
ticket: SERVER-67105
- test_file: jstests/core/clustered/clustered_collection_bounded_scan.js
ticket: SERVER-67105
+ - test_file: jstests/noPassthrough/clustered_collection_sorted_scan.js
+ ticket: SERVER-76102
- test_file: src/mongo/db/modules/enterprise/jstests/fle2/collection_coll_stats.js
ticket: SERVER-74461
- test_file: src/mongo/db/modules/enterprise/jstests/fle2/top_command.js
@@ -362,9 +365,61 @@ last-continuous:
ticket: SERVER-75517
- test_file: jstests/replsets/startup_recovery_for_restore_needs_rollback.js
ticket: SERVER-67180
+ - test_file: jstests/sharding/analyze_shard_key/cardinality_and_frequency_basic.js
+ ticket: SERVER-75886
+ - test_file: jstests/core/clustered/clustered_collection_hint.js
+ ticket: SERVER-73482
+ - test_file: jstests/core/command_let_variables.js
+ ticket: SERVER-75356
+ - test_file: jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js
+ ticket: SERVER-76311
+ - test_file: jstests/sharding/refine_collection_shard_key_basic.js
+ ticket: SERVER-76394
+ - test_file: jstests/sharding/shard_drain_works_with_chunks_of_any_size.js
+ ticket: SERVER-76550
+ - test_file: jstests/sharding/analyze_shard_key/monotonicity_hashed_sharding_compound.js
+ ticket: SERVER-76719
+ - test_file: jstests/sharding/ddl_commits_with_two_phase_oplog_notification.js
+ ticket: SERVER-76908
+ - test_file: jstests/sharding/shard_keys_with_dollar_sign.js
+ ticket: SERVER-76948
+ - test_file: jstests/sharding/merge_let_params_size_estimation.js
+ ticket: SERVER-74806
+ - test_file: jstests/sharding/resharding_with_multi_deletes_reduced_ticket_pool_size.js
+ ticket: SERVER-77097
+ - test_file: jstests/sharding/move_primary_donor_cleaned_up_if_coordinator_steps_up_aborted.js
+ ticket: SERVER-76872
+ - test_file: jstests/sharding/resharding_update_tag_zones_large.js
+ ticket: SERVER-76988
+ - test_file: jstests/sharding/analyze_shard_key/configure_query_analyzer_persistence.js
+ ticket: SERVER-77247
+ - test_file: jstests/core/timeseries/timeseries_create_collection.js
+ ticket: SERVER-77382
+ - test_file: jstests/replsets/tenant_migration_retryable_internal_transaction.js
+ ticket: SERVER-77237
+ - test_file: jstests/sharding/cluster_time_across_add_shard.js
+ ticket: SERVER-60466
+ - test_file: jstests/sharding/move_chunk_deferred_lookup.js
+ ticket: SERVER-78050
+ - test_file: jstests/replsets/tenant_migrations_back_to_back_2.js
+ ticket: SERVER-78176
+ - test_file: jstests/sharding/transfer_mods_large_batches.js
+ ticket: SERVER-78414
+ - test_file: jstests/core/query/elemmatch/elemmatch_or_pushdown_paths.js
+ ticket: SERVER-74954
+ - test_file: jstests/core/find_with_resume_after_param.js
+ ticket: SERVER-77386
+ - test_file: jstests/core/timeseries/timeseries_resume_after.js
+ ticket: SERVER-77386
+ - test_file: jstests/sharding/analyze_shard_key/timeseries.js
+ ticket: SERVER-78595
+ - test_file: jstests/replsets/config_txns_reaping_interrupt.js
+ ticket: SERVER-78187
suites: null
last-lts:
all:
+ - test_file: jstests/sharding/database_versioning_all_commands.js
+ ticket: SERVER-75911
- test_file: jstests/core/query/null_query_semantics.js
ticket: SERVER-21929
- test_file: jstests/core/query/or/or_to_in.js
@@ -434,7 +489,7 @@ last-lts:
- test_file: jstests/core/txns/errors_on_committed_transaction.js
ticket: SERVER-52547
- test_file: jstests/sharding/prepare_transaction_then_migrate.js
- ticket: SERVER-52906
+ ticket: SERVER-71219
- test_file: jstests/sharding/migration_waits_for_majority_commit.js
ticket: SERVER-52906
- test_file: jstests/sharding/migration_ignore_interrupts_1.js
@@ -703,8 +758,6 @@ last-lts:
ticket: SERVER-67723
- test_file: jstests/sharding/collection_uuid_shard_capped_collection.js
ticket: SERVER-67885
- - test_file: jstests/sharding/prepare_transaction_then_migrate.js
- ticket: SERVER-68361
- test_file: jstests/core/txns/txn_ops_allowed_on_buckets_coll.js
ticket: SERVER-68556
- test_file: jstests/core/txns/no_writes_to_system_collections_in_txn.js
@@ -765,8 +818,6 @@ last-lts:
ticket: SERVER-72224
- test_file: jstests/sharding/internal_txns/incomplete_transaction_history_during_migration.js
ticket: SERVER-73938
- - test_file: jstests/sharding/shard_keys_with_dollar_sign.js
- ticket: SERVER-74124
- test_file: jstests/core/timeseries/timeseries_filter_extended_range.js
ticket: SERVER-69952
- test_file: jstests/replsets/log_ddl_ops.js
@@ -775,6 +826,8 @@ last-lts:
ticket: SERVER-67105
- test_file: jstests/core/clustered/clustered_collection_bounded_scan.js
ticket: SERVER-67105
+ - test_file: jstests/noPassthrough/clustered_collection_sorted_scan.js
+ ticket: SERVER-76102
- test_file: src/mongo/db/modules/enterprise/jstests/fle2/collection_coll_stats.js
ticket: SERVER-74461
- test_file: src/mongo/db/modules/enterprise/jstests/fle2/top_command.js
@@ -791,4 +844,78 @@ last-lts:
ticket: SERVER-75517
- test_file: jstests/replsets/startup_recovery_for_restore_needs_rollback.js
ticket: SERVER-67180
+ - test_file: jstests/replsets/crud_ops_do_not_throw_locktimeout_on_ticket_exhaustion.js
+ ticket: SERVER-76012
+ - test_file: jstests/replsets/transactions_committed_with_tickets_exhausted.js
+ ticket: SERVER-76012
+ - test_file: jstests/replsets/transactions_reaped_with_tickets_exhausted.js
+ ticket: SERVER-76012
+ - test_file: jstests/sharding/cancel_coordinate_txn_commit_with_tickets_exhausted.js
+ ticket: SERVER-76012
+ - test_file: jstests/sharding/coordinate_txn_commit_with_tickets_exhausted.js
+ ticket: SERVER-76012
+ - test_file: jstests/sharding/analyze_shard_key/cardinality_and_frequency_basic.js
+ ticket: SERVER-75886
+ - test_file: jstests/core/clustered/clustered_collection_hint.js
+ ticket: SERVER-73482
+ - test_file: jstests/core/command_let_variables.js
+ ticket: SERVER-75356
+ - test_file: jstests/sharding/invalid_shard_identity_doc.js
+ ticket: SERVER-76310
+ - test_file: jstests/sharding/auth_catalog_shard_localhost_exception.js
+ ticket: SERVER-76310
+ - test_file: jstests/sharding/catalog_shard_mongos_ops_on_config_and_admin.js
+ ticket: SERVER-76310
+ - test_file: jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js
+ ticket: SERVER-76311
+ - test_file: jstests/sharding/refine_collection_shard_key_basic.js
+ ticket: SERVER-76394
+ - test_file: jstests/sharding/shard_drain_works_with_chunks_of_any_size.js
+ ticket: SERVER-76550
+ - test_file: jstests/sharding/analyze_shard_key/monotonicity_hashed_sharding_compound.js
+ ticket: SERVER-76719
+ - test_file: jstests/sharding/implicit_create_collection_triggered_by_DDLs.js
+ ticket: SERVER-76489
+ - test_file: jstests/sharding/ddl_commits_with_two_phase_oplog_notification.js
+ ticket: SERVER-76908
+ - test_file: jstests/sharding/shard_keys_with_dollar_sign.js
+ ticket: SERVER-76948
+ - test_file: jstests/sharding/merge_let_params_size_estimation.js
+ ticket: SERVER-74806
+ - test_file: jstests/sharding/resharding_with_multi_deletes_reduced_ticket_pool_size.js
+ ticket: SERVER-77097
+ - test_file: jstests/sharding/move_primary_donor_cleaned_up_if_coordinator_steps_up_aborted.js
+ ticket: SERVER-76872
+ - test_file: jstests/sharding/resharding_update_tag_zones_large.js
+ ticket: SERVER-76988
+ - test_file: jstests/sharding/analyze_shard_key/configure_query_analyzer_persistence.js
+ ticket: SERVER-77247
+ - test_file: jstests/core/timeseries/timeseries_create_collection.js
+ ticket: SERVER-77382
+ - test_file: jstests/replsets/tenant_migration_retryable_internal_transaction.js
+ ticket: SERVER-77237
+ - test_file: jstests/sharding/cluster_time_across_add_shard.js
+ ticket: SERVER-60466
+ - test_file: jstests/sharding/move_chunk_deferred_lookup.js
+ ticket: SERVER-78050
+ - test_file: jstests/core/find_with_resume_after_param.js
+ ticket: SERVER-77386
+ - test_file: jstests/core/timeseries/timeseries_resume_after.js
+ ticket: SERVER-77386
+ - test_file: jstests/replsets/tenant_migrations_back_to_back_2.js
+ ticket: SERVER-78176
+ - test_file: jstests/core/index/wildcard/compound_wildcard_index_or.js
+ ticket: SERVER-78307
+ - test_file: jstests/core/index/wildcard/compound_wildcard_index_unbounded.js
+ ticket: SERVER-78307
+ - test_file: jstests/core/index/wildcard/wildcard_index_basic_index_bounds.js
+ ticket: SERVER-78307
+ - test_file: jstests/sharding/transfer_mods_large_batches.js
+ ticket: SERVER-78414
+ - test_file: jstests/core/query/elemmatch/elemmatch_or_pushdown_paths.js
+ ticket: SERVER-74954
+ - test_file: jstests/sharding/analyze_shard_key/timeseries.js
+ ticket: SERVER-78595
+ - test_file: jstests/replsets/config_txns_reaping_interrupt.js
+ ticket: SERVER-78187
suites: null
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index 28313f81151b3..45c69e99f8f6b 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -53,27 +53,22 @@
# - func: "set up venv"
include:
-- filename: etc/evergreen_yml_components/project_and_distro_settings.yml
- filename: etc/evergreen_yml_components/definitions.yml
- filename: etc/evergreen_yml_components/variants/task_generation.yml
- filename: etc/evergreen_yml_components/variants/sanitizer.yml
- filename: etc/evergreen_yml_components/variants/in_memory.yml
- filename: etc/evergreen_yml_components/variants/ninja.yml
- filename: etc/evergreen_yml_components/variants/compile_static_analysis.yml
+- filename: etc/evergreen_yml_components/variants/config_shard.yml
variables:
-- &libfuzzertests
- name: libfuzzertests!
- execution_tasks:
- - compile_and_archive_libfuzzertests
- - fetch_and_run_libfuzzertests
-
# Common compile variant dependency specifications.
+# THIS WAS COPIED TO config_shard.yml - ANY MODIFICATIONS HERE SHOULD ALSO BE MADE IN THAT FILE.
- &linux_x86_dynamic_compile_variant_dependency
depends_on:
- name: archive_dist_test_debug
- variant: &linux_x86_dynamic_compile_variant_name linux-x86-dynamic-compile-required
+ variant: &linux_x86_dynamic_compile_variant_name linux-x86-dynamic-compile
- name: version_gen
variant: generate-tasks-for-version
# This is added because of EVG-18211.
@@ -89,8 +84,6 @@ variables:
# This is added because of EVG-18211.
# Without this we are adding extra dependencies on evergreen and it is causing strain
omit_generated_tasks: true
- - name: version_burn_in_gen
- variant: generate-tasks-for-version
- &linux_x86_dynamic_debug_compile_variant_dependency
depends_on:
@@ -121,9 +114,8 @@ variables:
# This is added because of EVG-18211.
# Without this we are adding extra dependencies on evergreen and it is causing strain
omit_generated_tasks: true
- - name: version_burn_in_gen
- variant: generate-tasks-for-version
+# THIS WAS COPIED TO config_shard.yml - ANY MODIFICATIONS HERE SHOULD ALSO BE MADE IN THAT FILE.
- &linux_x86_generic_expansions
multiversion_platform: rhel80
multiversion_edition: enterprise
@@ -145,7 +137,7 @@ variables:
# If you add anything to san_options, make sure the appropriate changes are
# also made to SConstruct.
-# and also to the san_options in compile_static_analysis.yml
+# and also to the san_options in compile_static_analysis.yml and sanitizer.yml
- aubsan_options: &aubsan_options
>-
UBSAN_OPTIONS="print_stacktrace=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer"
@@ -190,6 +182,11 @@ variables:
distros:
- windows-vsCurrent-large
- name: burn_in_tests_gen
+ depends_on:
+ - name: version_burn_in_gen
+ variant: generate-tasks-for-version
+ omit_generated_tasks: true
+ - name: archive_dist_test_debug
- name: .aggfuzzer .common !.feature_flag_guarded
- name: audit
- name: auth_audit_gen
@@ -203,7 +200,6 @@ variables:
- windows-2016-dc
- name: .jscore .common !.sharding
- name: .jstestfuzz .common
- - name: .logical_session_cache
- name: replica_sets_auth_gen
- name: sasl
- name: sharding_auth_audit_gen
@@ -263,7 +259,6 @@ variables:
- name: jsCore_txns_large_txns_format
- name: .jstestfuzz .common
- name: libunwind_tests
- - name: .logical_session_cache .one_sec
- name: .ocsp
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
@@ -280,8 +275,8 @@ variables:
- name: test_packages
distros:
- ubuntu2004-package
- # TODO: BF-24515 re-enable when build failure cause determined and resolved
- # - name: selinux_rhel7_enterprise
+ - name: vector_search
+ - name: selinux_rhel7_enterprise
- name: generate_buildid_to_debug_symbols_mapping
@@ -390,120 +385,6 @@ buildvariants:
tasks:
- name: tla_plus
-- &enterprise-rhel80-dynamic-gcc-debug-experimental-template
- name: &enterprise-rhel80-dynamic-gcc-cxx20-debug-experimental enterprise-rhel80-dynamic-gcc-cxx20-debug-experimental
- display_name: "~ Shared Library Enterprise RHEL 8.0 Toolchain GCC C++20 DEBUG"
- cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
- modules:
- - enterprise
- run_on:
- - rhel80-small
- expansions: &enterprise-rhel80-dynamic-gcc-debug-experimental-expansions
- additional_package_targets: >-
- archive-mongocryptd
- archive-mongocryptd-debug
- archive-mh
- archive-mh-debug
- compile_flags: >-
- --dbg=on
- --opt=on
- -j$(grep -c ^processor /proc/cpuinfo)
- --link-model=dynamic
- --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- --variables-files=etc/scons/scons_experimental_scheduler.vars
- --cxx-std=20
- has_packages: false
- scons_cache_scope: shared
- scons_cache_mode: all
- large_distro_name: rhel80-medium
- num_scons_link_jobs_available: 0.99
- compile_variant: *enterprise-rhel80-dynamic-gcc-cxx20-debug-experimental
- tasks: &enterprise-rhel80-dynamic-gcc-debug-experimental-tasks
- - name: compile_test_and_package_parallel_core_stream_TG
- distros:
- - rhel80-xlarge
- - name: compile_test_and_package_parallel_unittest_stream_TG
- distros:
- - rhel80-xlarge
- - name: compile_test_and_package_parallel_dbtest_stream_TG
- distros:
- - rhel80-xlarge
- - name: compile_integration_and_test_parallel_stream_TG
- distros:
- - rhel80-large
- - name: test_api_version_compatibility
- - name: .aggfuzzer !.multiversion !.feature_flag_guarded
- - name: .aggregation !.multiversion !.feature_flag_guarded
- - name: audit
- - name: .auth !.multiversion
- - name: .causally_consistent !.sharding
- - name: .change_streams !.multiversion
- - name: .misc_js !.multiversion
- - name: .concurrency !.large !.ubsan !.no_txns !.debug_only !.multiversion
- - name: .concurrency .large !.ubsan !.no_txns !.debug_only !.multiversion
- distros:
- - rhel80-xlarge
- - name: disk_wiredtiger
- - name: .encrypt !.multiversion
- - name: idl_tests
- - name: jsCore
- distros:
- - rhel80-xlarge
- - name: .jscore .common !jsCore !.multiversion
- - name: jsCore_min_batch_repeat_queries_ese_gsm
- - name: jsCore_txns_large_txns_format
- - name: json_schema
- - name: libunwind_tests
- - name: mqlrun
- - name: .multi_shard !.multiversion
- - name: multi_stmt_txn_jscore_passthrough_with_migration_gen
- - name: .read_write_concern .large !.multiversion
- distros:
- - rhel80-xlarge
- - name: .read_write_concern !.large !.multiversion
- - name: .replica_sets !.encrypt !.auth !.multiversion
- distros:
- - rhel80-xlarge
- - name: replica_sets_api_version_jscore_passthrough_gen
- - name: replica_sets_reconfig_jscore_passthrough_gen
- - name: retryable_writes_jscore_passthrough_gen
- - name: .read_only !.multiversion
- - name: sasl
- - name: search
- - name: search_auth
- - name: search_pinned_connections_auth
- - name: search_ssl
- - name: session_jscore_passthrough
- - name: .sharding .jscore !.wo_snapshot !.multi_stmt !.multiversion
- - name: sharding_api_version_jscore_passthrough_gen
- - name: .sharding .txns !.multiversion
- - name: .sharding .common !.multiversion !.jstestfuzz
- - name: .stitch
- - name: secondary_reads_passthrough_gen
- - name: server_discovery_and_monitoring_json_test_TG
- - name: .serverless !.multiversion
- distros:
- - rhel80-xlarge
- - name: server_selection_json_test_TG
- distros:
- - rhel80-xlarge
- - name: generate_buildid_to_debug_symbols_mapping
-
-- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-template
- name: &enterprise-rhel80-dynamic-clang-cxx20-debug-experimental enterprise-rhel80-dynamic-clang-cxx20-debug-experimental
- display_name: "~ Shared Library Enterprise RHEL 8.0 Toolchain Clang C++20 DEBUG"
- expansions:
- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-expansions
- compile_flags: >-
- --dbg=on
- --opt=on
- -j$(grep -c ^processor /proc/cpuinfo)
- --link-model=dynamic
- --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars
- --variables-files=etc/scons/scons_experimental_scheduler.vars
- --cxx-std=20
- compile_variant: *enterprise-rhel80-dynamic-clang-cxx20-debug-experimental
-
- name: &enterprise-rhel-80-64-bit-coverage enterprise-rhel-80-64-bit-coverage
display_name: "~ Enterprise RHEL 8.0 DEBUG Code Coverage"
modules:
@@ -603,140 +484,6 @@ buildvariants:
tasks:
*enterprise-rhel-80-64-bit-coverage-tasks
-- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-template
- name: enterprise-rhel80-dynamic-gcc-cxx20-debug-pm-1328-experimental
- display_name: "~ Shared Library Enterprise RHEL 8.0 GCC C++20 DEBUG + PM-1328"
- cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter
- expansions:
- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-expansions
- compile_flags: >-
- --dbg=on
- --opt=on
- -j$(grep -c ^processor /proc/cpuinfo)
- --link-model=dynamic
- --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- --cxx-std=20
- --experimental-optimization=*
- --experimental-runtime-hardening=*
- --disable-warnings-as-errors
-
-- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-template
- name: &enterprise-rhel80-dynamic-clang-cxx20-debug-pm-1328-experimental enterprise-rhel80-dynamic-clang-cxx20-debug-pm-1328-experimental
- display_name: "~ Shared Library Enterprise RHEL 8.0 Clang C++20 DEBUG + PM-1328"
- cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter
- expansions:
- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-expansions
- compile_flags: >-
- --dbg=on
- --opt=on
- -j$(grep -c ^processor /proc/cpuinfo)
- --link-model=dynamic
- --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars
- --cxx-std=20
- --experimental-optimization=*
- --experimental-runtime-hardening=*
- compile_variant: *enterprise-rhel80-dynamic-clang-cxx20-debug-pm-1328-experimental
-
-- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-template
- name: &enterprise-rhel80-gcc-cxx20-pm-1328-experimental enterprise-rhel80-gcc-cxx20-pm-1328-experimental
- display_name: "~ Enterprise RHEL 8.0 Toolchain GCC C++20 + PM-1328"
- cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter
- expansions:
- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-expansions
- compile_flags: >-
- --opt=on
- -j$(grep -c ^processor /proc/cpuinfo)
- --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- --cxx-std=20
- --experimental-optimization=*
- --experimental-runtime-hardening=*
- --disable-warnings-as-errors
- compile_variant: *enterprise-rhel80-gcc-cxx20-pm-1328-experimental
- tasks: &enterprise-rhel80-dynamic-gcc-debug-experimental-tasks-no-unittests
- - name: compile_test_and_package_parallel_core_stream_TG
- distros:
- - rhel80-xlarge
- - name: compile_test_and_package_parallel_dbtest_stream_TG
- distros:
- - rhel80-xlarge
- - name: compile_integration_and_test_parallel_stream_TG
- distros:
- - rhel80-large
- - name: test_api_version_compatibility
- - name: .aggfuzzer !.multiversion !.feature_flag_guarded
- - name: .aggregation !.multiversion !.feature_flag_guarded
- - name: audit
- - name: .auth !.multiversion
- - name: .causally_consistent !.sharding
- - name: .change_streams !.multiversion
- - name: .misc_js !.multiversion
- - name: .concurrency !.large !.ubsan !.no_txns !.debug_only !.multiversion
- - name: .concurrency .large !.ubsan !.no_txns !.debug_only !.multiversion
- distros:
- - rhel80-xlarge
- - name: disk_wiredtiger
- - name: .encrypt !.multiversion
- - name: idl_tests
- - name: jsCore
- distros:
- - rhel80-xlarge
- - name: .jscore .common !jsCore !.multiversion
- - name: jsCore_min_batch_repeat_queries_ese_gsm
- - name: jsCore_txns_large_txns_format
- - name: json_schema
- - name: libunwind_tests
- - name: mqlrun
- - name: .multi_shard !.multiversion
- - name: multi_stmt_txn_jscore_passthrough_with_migration_gen
- - name: .read_write_concern .large !.multiversion
- distros:
- - rhel80-xlarge
- - name: .read_write_concern !.large !.multiversion
- - name: .replica_sets !.encrypt !.auth !.multiversion
- distros:
- - rhel80-xlarge
- - name: replica_sets_api_version_jscore_passthrough_gen
- - name: replica_sets_reconfig_jscore_passthrough_gen
- - name: retryable_writes_jscore_passthrough_gen
- - name: .read_only !.multiversion
- - name: sasl
- - name: search
- - name: search_auth
- - name: search_pinned_connections_auth
- - name: search_ssl
- - name: session_jscore_passthrough
- - name: .sharding .jscore !.wo_snapshot !.multi_stmt !.multiversion
- - name: sharding_api_version_jscore_passthrough_gen
- - name: .sharding .txns !.multiversion
- - name: .sharding .common !.multiversion
- - name: .stitch
- - name: secondary_reads_passthrough_gen
- - name: server_discovery_and_monitoring_json_test_TG
- - name: .serverless !.multiversion
- distros:
- - rhel80-xlarge
- - name: server_selection_json_test_TG
- distros:
- - rhel80-xlarge
- - name: generate_buildid_to_debug_symbols_mapping
-
-
-- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-template
- name: &enterprise-rhel80-clang-cxx20-pm-1328-experimental enterprise-rhel80-clang-cxx20-pm-1328-experimental
- display_name: "~ Enterprise RHEL 8.0 Toolchain Clang C++20 + PM-1328"
- cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter
- expansions:
- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-expansions
- compile_flags: >-
- --opt=on
- -j$(grep -c ^processor /proc/cpuinfo)
- --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars
- --cxx-std=20
- --experimental-optimization=*
- --experimental-runtime-hardening=*
- compile_variant: *enterprise-rhel80-clang-cxx20-pm-1328-experimental
- tasks: *enterprise-rhel80-dynamic-gcc-debug-experimental-tasks-no-unittests
-
- name: &stm-daily-cron stm-daily-cron
modules:
- enterprise
@@ -765,11 +512,20 @@ buildvariants:
tasks:
- name: blackduck_scanner
-- name: tooling-metrics
- display_name: "* Tooling Metrics"
+- name: tooling-metrics-x86
+ display_name: "* Tooling Metrics x86"
+ cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
+ run_on:
+ - ubuntu2204-small
+ stepback: false
+ tasks:
+ - name: tooling_metrics_test
+
+- name: tooling-metrics-arm64
+ display_name: "* Tooling Metrics ARM64"
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
run_on:
- - ubuntu1804-small
+ - ubuntu2204-arm64-small
stepback: false
tasks:
- name: tooling_metrics_test
@@ -818,12 +574,19 @@ buildvariants:
- name: .causally_consistent !.sharding
- name: .change_streams
- name: .misc_js !.non_win_dbg
- - name: .concurrency .debug_only
+ - name: .concurrency !.ubsan
+ distros:
+ - windows-vsCurrent-large
+ - name: .config_fuzzer !.large !.linux_only
+ - name: .config_fuzzer .large !.linux_only !.sharded
distros:
- windows-vsCurrent-large
- name: disk_wiredtiger
- name: free_monitoring
- name: initial_sync_fuzzer_gen
+ - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen
- name: .jscore .common !.auth !.sharding
- name: jsCore_txns_large_txns_format
- name: json_schema
@@ -881,7 +644,6 @@ buildvariants:
test_flags: *windows_common_test_excludes
external_auth_jobs_max: 1
tasks:
- - name: burn_in_tests_gen
- name: audit
- name: auth_audit_gen
- name: causally_consistent_jscore_txns_passthrough
@@ -942,12 +704,15 @@ buildvariants:
tasks:
- name: cqf
- name: cqf_disabled_pipeline_opt
- - name: cqf_passthrough
- name: cqf_parallel
- name: query_golden_classic
- - name: query_golden_cqf
- - name: burn_in_tests_gen
# - name: burn_in_tasks_gen
+ # depends_on:
+ # - name: version_burn_in_gen
+ # variant: generate-tasks-for-version
+ # omit_generated_tasks: true
+ # - name: archive_dist_test_debug
+ # variant: *windows_compile_variant_name
- name: audit
- name: auth_audit_gen
- name: buildscripts_test
@@ -977,77 +742,10 @@ buildvariants:
- name: .sharding .txns
- name: sharding_auth_audit_gen
- name: sharding_max_mirroring_opportunistic_secondary_targeting_ese_gen
- - name: telemetry_passthrough
+ - name: query_stats_passthrough
+ - name: query_stats_passthrough_writeonly
- name: unittest_shell_hang_analyzer_gen
-- name: &enterprise-windows-cxx20-debug-experimental enterprise-windows-cxx20-debug-experimental
- display_name: "~ Enterprise Windows C++20 DEBUG"
- cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
- modules:
- - enterprise
- run_on:
- - windows-vsCurrent-small
- expansions:
- exe: ".exe"
- compile_variant: *enterprise-windows-cxx20-debug-experimental
- additional_package_targets: >-
- archive-mongocryptd
- archive-mongocryptd-debug
- msi
- archive-mh
- archive-mh-debug
- content_type: application/zip
- compile_flags: >-
- --dbg=on
- --opt=on
- --ssl
- MONGO_DISTMOD=windows
- CPPPATH="c:/sasl/include"
- LIBPATH="c:/sasl/lib"
- -j$(bc <<< "$(grep -c '^processor' /proc/cpuinfo) / 1.5")
- --win-version-min=win10
- --cxx-std=20
- num_scons_link_jobs_available: 0.25
- python: '/cygdrive/c/python/python37/python.exe'
- ext: zip
- scons_cache_scope: shared
- multiversion_platform: windows
- multiversion_edition: enterprise
- jstestfuzz_num_generated_files: 35
- target_resmoke_time: 20
- max_sub_suites: 3
- large_distro_name: windows-vsCurrent-large
- test_flags: *windows_common_test_excludes
- exec_timeout_secs: 14400 # 3 hour timeout
- tasks:
- - name: compile_test_and_package_serial_TG
- distros:
- - windows-vsCurrent-large
- - name: .aggfuzzer !.feature_flag_guarded
- - name: .aggregation !.auth !.encrypt !.unwind !.feature_flag_guarded
- - name: auth_gen
- - name: causally_consistent_jscore_txns_passthrough
- - name: .misc_js
- # Some concurrency workloads require a lot of memory, so we use machines
- # with more RAM for these suites.
- - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.common !.debug_only
- distros:
- - windows-vsCurrent-large
- - name: .concurrency .common
- - name: disk_wiredtiger
- - name: .jscore .common !.auth
- - name: json_schema
- - name: .query_fuzzer
- - name: .read_write_concern
- - name: replica_sets_gen
- - name: replica_sets_jscore_passthrough_gen
- - name: .sharding .jscore !.wo_snapshot !.multi_stmt !.multiversion
- - name: .sharding .txns !.multiversion
- - name: .sharding .common !.csrs !.multiversion !.gcm
- - name: .ssl
- - name: .stitch
- - name: .updatefuzzer !.multiversion
-
- name: &enterprise-windows-debug-unoptimized enterprise-windows-debug-unoptimized
display_name: "Enterprise Windows DEBUG (Unoptimized)"
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
@@ -1128,8 +826,6 @@ buildvariants:
- name: replica_sets_max_mirroring_large_txns_format_gen
- name: .ssl
- name: .stitch
- - name: unittest_shell_hang_analyzer_gen
- - name: generate_buildid_to_debug_symbols_mapping
- name: &enterprise-macos-rosetta-2 enterprise-macos-rosetta-2
display_name: "Enterprise macOS Via Rosetta 2"
@@ -1156,24 +852,22 @@ buildvariants:
- name: causally_consistent_jscore_txns_passthrough
- name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore !.ssl
- name: .jscore .common !.decimal !.sharding
- - name: .logical_session_cache .one_sec
- name: mqlrun
# TODO(SERVER-64009): Re-enable replica_sets_auth_gen.
# - name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough_gen
- name: sasl
- name: .crypt
- - name: generate_buildid_to_debug_symbols_mapping
-- name: &enterprise-macos-cxx20 enterprise-macos-cxx20
- display_name: "Enterprise macOS C++20 DEBUG"
+- name: &enterprise-macos enterprise-macos
+ display_name: "Enterprise macOS DEBUG"
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
modules:
- enterprise
run_on:
- macos-1100
expansions:
- compile_variant: *enterprise-macos-cxx20
+ compile_variant: *enterprise-macos
test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_gcm
compile_env: DEVELOPER_DIR=/Applications/Xcode13.app
compile_flags: >-
@@ -1183,7 +877,6 @@ buildvariants:
-j$(sysctl -n hw.logicalcpu)
--libc++
--variables-files=etc/scons/xcode_macosx.vars
- --cxx-std=20
resmoke_jobs_max: 6
num_scons_link_jobs_available: 0.99
tasks:
@@ -1191,16 +884,13 @@ buildvariants:
- name: audit
- name: auth_audit_gen
- name: causally_consistent_jscore_txns_passthrough
- # TODO: SERVER-66945 Re-enable ESE on enterprise macos
- # - name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore !.ssl
+ - name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore !.ssl
- name: .jscore .common !.decimal !.sharding
- - name: .logical_session_cache .one_sec
- name: mqlrun
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough_gen
- name: sasl
- name: .crypt
- - name: generate_buildid_to_debug_symbols_mapping
- name: &enterprise-macos-arm64 enterprise-macos-arm64
display_name: "~ Enterprise macOS arm64"
@@ -1226,18 +916,47 @@ buildvariants:
- name: causally_consistent_jscore_txns_passthrough
- name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore
- name: .jscore .common !.decimal !.sharding
- - name: .logical_session_cache .one_sec
- name: mqlrun
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
- name: sasl
- name: .crypt
- - name: generate_buildid_to_debug_symbols_mapping
###########################################
# Redhat buildvariants #
###########################################
+- name: run-all-affected-jstests
+ display_name: "! Run All Affected JStests"
+ patch_only: true
+ run_on:
+ - rhel80-medium
+ expansions:
+ large_distro_name: rhel80-large
+ burn_in_tag_include_all_required_and_suggested: true
+ burn_in_tag_exclude_build_variants: >-
+ macos-debug-suggested
+ burn_in_tag_include_build_variants: >-
+ enterprise-rhel-80-64-bit-inmem
+ enterprise-rhel-80-64-bit-multiversion
+ burn_in_tag_compile_task_dependency: archive_dist_test_debug
+ compile_variant: *amazon_linux2_arm64_compile_variant_name
+ depends_on:
+ - name: archive_dist_test_debug
+ variant: *amazon_linux2_arm64_compile_variant_name
+ - name: version_gen
+ variant: generate-tasks-for-version
+ # This is added because of EVG-18211.
+ # Without this we are adding extra dependencies on evergreen and it is causing strain
+ omit_generated_tasks: true
+ - name: version_burn_in_gen
+ variant: generate-tasks-for-version
+ # This is added because of EVG-18211.
+ # Without this we are adding extra dependencies on evergreen and it is causing strain
+ omit_generated_tasks: true
+ tasks:
+ - name: burn_in_tags_gen
+
- &enterprise-rhel-80-64-bit-dynamic-template
<<: *linux_x86_dynamic_compile_variant_dependency
name: &enterprise-rhel-80-64-bit-dynamic enterprise-rhel-80-64-bit-dynamic
@@ -1247,6 +966,7 @@ buildvariants:
- enterprise
run_on:
- rhel80-small
+ # THIS WAS COPIED TO config_shard.yml - ANY MODIFICATIONS HERE SHOULD ALSO BE MADE IN THAT FILE.
expansions: &enterprise-rhel-80-64-bit-dynamic-expansions
<<: *linux_x86_generic_expansions
scons_cache_scope: shared
@@ -1259,10 +979,6 @@ buildvariants:
idle_timeout_factor: 1.5
exec_timeout_factor: 1.5
large_distro_name: rhel80-medium
- burn_in_tag_buildvariants: >-
- enterprise-rhel-80-64-bit-inmem
- enterprise-rhel-80-64-bit-multiversion
- burn_in_tag_compile_task_dependency: archive_dist_test_debug
depends_on:
- name: archive_dist_test_debug
variant: *linux_x86_dynamic_compile_variant_name
@@ -1271,10 +987,7 @@ buildvariants:
# This is added because of EVG-18211.
# Without this we are adding extra dependencies on evergreen and it is causing strain
omit_generated_tasks: true
- - name: version_burn_in_gen
- variant: generate-tasks-for-version
tasks:
- - name: burn_in_tests_gen
- name: .aggfuzzer !.feature_flag_guarded
- name: .aggregation !.feature_flag_guarded
- name: aggregation_repeat_queries
@@ -1286,8 +999,8 @@ buildvariants:
- name: .change_streams
- name: .change_stream_fuzzer
- name: .misc_js
- - name: .concurrency !.large !.ubsan !.no_txns !.debug_only
- - name: .concurrency .large !.ubsan !.no_txns !.debug_only
+ - name: .concurrency !.large !.ubsan !.no_txns
+ - name: .concurrency .large !.ubsan !.no_txns
distros:
- rhel80-medium
- name: .config_fuzzer !.large
@@ -1298,6 +1011,9 @@ buildvariants:
- name: .encrypt
- name: idl_tests
- name: initial_sync_fuzzer_gen
+ - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen
- name: jsCore
distros:
- rhel80-xlarge
@@ -1340,6 +1056,7 @@ buildvariants:
- name: search_pinned_connections_auth
- name: search_ssl
- name: session_jscore_passthrough
+ - name: sharded_collections_single_writes_without_shard_key_jscore_passthrough_gen
- name: .sharding .jscore !.wo_snapshot !.multi_stmt
- name: sharding_api_version_jscore_passthrough_gen
- name: sharding_api_strict_passthrough_gen
@@ -1350,6 +1067,7 @@ buildvariants:
- name: .serverless
distros:
- rhel80-xlarge
+ - name: vector_search
- <<: *enterprise-rhel-80-64-bit-dynamic-template
name: &enterprise-rhel-80-64-bit-dynamic-debug-mode enterprise-rhel-80-64-bit-dynamic-debug-mode
@@ -1393,8 +1111,8 @@ buildvariants:
- name: .change_streams !.no_debug_mode
- name: .change_stream_fuzzer
- name: .misc_js !.no_debug_mode
- - name: .concurrency !.large !.ubsan !.no_txns !.debug_only !.no_debug_mode
- - name: .concurrency .large !.ubsan !.no_txns !.debug_only !.no_debug_mode
+ - name: .concurrency !.large !.ubsan !.no_txns !.no_debug_mode
+ - name: .concurrency .large !.ubsan !.no_txns !.no_debug_mode
distros:
- rhel80-medium
- name: .config_fuzzer !.large
@@ -1408,6 +1126,10 @@ buildvariants:
- name: .encrypt !.no_debug_mode
- name: idl_tests
- name: initial_sync_fuzzer_gen
+ - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen
+ # TODO (SERVER-78417) reenable after ticket is complete
+ #- name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen
+ #- name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen
- name: jsCore
distros:
- rhel80-xlarge
@@ -1466,12 +1188,13 @@ buildvariants:
distros:
- rhel80-xlarge
- name: streams
+ - name: vector_search
- name: generate_buildid_to_debug_symbols_mapping
-- &enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-template
+- &enterprise-rhel-80-64-bit-dynamic-all-feature-flags-template
<<: *linux_x86_dynamic_compile_variant_dependency
- name: &enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required
- display_name: "! Shared Library Enterprise RHEL 8.0 (all feature flags)"
+ name: &enterprise-rhel-80-64-bit-dynamic-all-feature-flags enterprise-rhel-80-64-bit-dynamic-all-feature-flags
+ display_name: "* Shared Library Enterprise RHEL 8.0 (all feature flags)"
cron: "0 */4 * * *" # From the ${project_required_suggested_cron} parameter
modules:
- enterprise
@@ -1497,21 +1220,21 @@ buildvariants:
# This is added because of EVG-18211.
# Without this we are adding extra dependencies on evergreen and it is causing strain
omit_generated_tasks: true
- - name: version_burn_in_gen
- variant: generate-tasks-for-version
tasks:
- name: analyze_shard_key_jscore_passthrough_gen
- name: cqf
- name: cqf_disabled_pipeline_opt
- - name: cqf_passthrough
- name: cqf_parallel
- name: query_golden_classic
- - name: query_golden_cqf
- name: lint_fuzzer_sanity_patch
- name: test_api_version_compatibility
- - name: burn_in_tests_gen
- - name: burn_in_tags_gen
# - name: burn_in_tasks_gen
+ # depends_on:
+ # - name: version_burn_in_gen
+ # variant: generate-tasks-for-version
+ # omit_generated_tasks: true
+ # - name: archive_dist_test_debug
+ # variant: *linux_x86_dynamic_compile_variant_name
- name: check_feature_flag_tags
- name: check_for_todos
- name: .aggfuzzer
@@ -1525,21 +1248,30 @@ buildvariants:
- name: .causally_consistent !.sharding
- name: .change_streams
- name: .change_stream_fuzzer
+ # TODO SERVER-57866: Remove the explicit mentions of change stream multitenant suites.
- name: change_streams_multitenant_passthrough
- name: change_streams_multitenant_sharded_collections_passthrough
- name: .misc_js
- name: .clustered_collections
- - name: .concurrency !.large !.ubsan !.no_txns !.debug_only
- - name: .concurrency .large !.ubsan !.no_txns !.debug_only
+ - name: .concurrency !.large !.ubsan !.no_txns
+ - name: .concurrency .large !.ubsan !.no_txns
distros:
- rhel80-medium
- - name: sharding_auth_catalog_shard_gen
- name: .config_fuzzer !.large
+ - name: .config_fuzzer .large
+ distros:
+ - rhel80-medium
+ - name: .config_fuzzer_stress
+ distros:
+ - rhel80-large
- name: disk_wiredtiger
- name: .encrypt
- name: feature_flag_multiversion_gen
- name: idl_tests
- name: initial_sync_fuzzer_gen
+ - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen
- name: .jscore .common
- name: jsCore_column_store_indexes
- name: jsCore_min_batch_repeat_queries_ese_gsm
@@ -1588,6 +1320,7 @@ buildvariants:
- name: sharding_api_strict_passthrough_gen
- name: .sharding .txns
- name: .sharding .common
+ - name: sharded_collections_single_writes_without_shard_key_jscore_passthrough_gen
- name: sharded_multi_stmt_txn_jscore_passthrough
- name: .serverless
- name: sharding_max_mirroring_opportunistic_secondary_targeting_ese_gcm_gen
@@ -1596,7 +1329,9 @@ buildvariants:
- name: .shard_split
- name: .shard_merge
- name: streams
- - name: telemetry_passthrough
+ - name: vector_search
+ - name: query_stats_passthrough
+ - name: query_stats_passthrough_writeonly
- &enterprise-rhel-80-64-bit-dynamic-classic-engine
<<: *linux_x86_dynamic_compile_variant_dependency
@@ -1617,10 +1352,6 @@ buildvariants:
test_flags: >-
--mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}"
large_distro_name: rhel80-medium
- burn_in_tag_buildvariants: >-
- enterprise-rhel-80-64-bit-inmem
- enterprise-rhel-80-64-bit-multiversion
- burn_in_tag_compile_task_dependency: archive_dist_test_debug
depends_on:
- name: archive_dist_test_debug
variant: *linux_x86_dynamic_compile_variant_name
@@ -1629,8 +1360,6 @@ buildvariants:
# This is added because of EVG-18211.
# Without this we are adding extra dependencies on evergreen and it is causing strain
omit_generated_tasks: true
- - name: version_burn_in_gen
- variant: generate-tasks-for-version
tasks:
- name: .aggfuzzer !.sbe_only
- name: .aggregation !.sbe_only
@@ -1638,8 +1367,8 @@ buildvariants:
- name: .causally_consistent !.sharding
- name: .change_stream_fuzzer
- name: .change_streams
- - name: .concurrency !.large !.ubsan !.no_txns !.debug_only !.compute_mode
- - name: .concurrency .large !.ubsan !.no_txns !.debug_only !.compute_mode
+ - name: .concurrency !.large !.ubsan !.no_txns !.compute_mode
+ - name: .concurrency .large !.ubsan !.no_txns !.compute_mode
distros:
- rhel80-medium
- name: .encrypt
@@ -1648,6 +1377,7 @@ buildvariants:
- name: .misc_js
- name: .multi_shard
- name: .query_fuzzer
+ - name: query_golden_classic
- name: .random_multiversion_ds
- name: .read_only
- name: .read_write_concern !.large
@@ -1667,19 +1397,26 @@ buildvariants:
- name: .updatefuzzer
- name: aggregation_repeat_queries
- name: audit
- - name: burn_in_tags_gen
- name: burn_in_tests_gen
+ depends_on:
+ - name: version_burn_in_gen
+ variant: generate-tasks-for-version
+ omit_generated_tasks: true
+ - name: archive_dist_test_debug
+ variant: *linux_x86_dynamic_compile_variant_name
- name: check_feature_flag_tags
- name: check_for_todos
- name: disk_wiredtiger
- name: initial_sync_fuzzer_gen
+ - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen
- name: jsCore
distros:
- rhel80-xlarge
- name: jsCore_min_batch_repeat_queries_ese_gsm
- name: jsCore_txns_large_txns_format
- name: json_schema
- - name: lint_fuzzer_sanity_patch
- name: mqlrun
- name: multi_stmt_txn_jscore_passthrough_with_migration_gen
- name: multiversion_gen
@@ -1704,26 +1441,154 @@ buildvariants:
- name: sharding_api_version_jscore_passthrough_gen
- name: test_api_version_compatibility
- name: unittest_shell_hang_analyzer_gen
+ - name: vector_search
-- <<: *linux_x86_dynamic_compile_variant_dependency
- name: &enterprise-rhel-80-64-bit-large-txns-format enterprise-rhel-80-64-bit-large-txns-format
- display_name: "Enterprise RHEL 8.0 (large transactions format)"
- cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter
- modules:
- - enterprise
- run_on:
- - rhel80-small
+# The CQF feature flag is currently part of the always-disabled feature flags list, so it is not
+# enabled in all-feature-flags variants besides this one. This variant allows us to get some initial
+# coverage for CQF without disrupting coverage for other feature flags (in particular, SBE). Once
+# CQF is enabled by default, it will be tested in the release variants and all-feature-flags
+# variants, and we will no longer need this dedicated variant to test it. At that point, we will
+# replace this variant with a dedicated variant for SBE stage builders, similar in spirit to the
+# Classic Engine variant above.
+# TODO SERVER-71163: Replace this variant with a dedicated variant for stage builders once the CQF
+# feature flag is not always-disabled.
+- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-template
+ name: enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-and-cqf-enabled
+ display_name: "Shared Library Enterprise RHEL 8.0 Query (all feature flags and CQF enabled)"
+ cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
expansions:
- <<: *linux_x86_generic_expansions
+ <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-expansions
test_flags: >-
- --mongodSetParameters="{maxNumberOfTransactionOperationsInSingleOplogEntry: 2}"
- --excludeWithAnyTags=exclude_from_large_txns
+ --additionalFeatureFlagsFile all_feature_flags.txt
+ --excludeWithAnyTags=incompatible_with_shard_merge
+ --excludeWithAnyTags=cqf_incompatible
+ --mongosSetParameters="{featureFlagCommonQueryFramework: true, internalQueryFrameworkControl: 'tryBonsai'}"
+ --mongodSetParameters="{featureFlagCommonQueryFramework: true, internalQueryFrameworkControl: 'tryBonsai'}"
tasks:
- - name: auth_gen
- - name: auth_audit_gen
- - name: causally_consistent_jscore_txns_passthrough
- - name: change_streams
- - name: change_streams_whole_db_passthrough
+ - name: analyze_shard_key_jscore_passthrough_gen
+ - name: .cqf
+ - name: lint_fuzzer_sanity_patch
+ - name: test_api_version_compatibility
+ # - name: burn_in_tasks_gen
+ # depends_on:
+ # - name: version_burn_in_gen
+ # variant: generate-tasks-for-version
+ # omit_generated_tasks: true
+ # - name: archive_dist_test_debug
+ # variant: *linux_x86_dynamic_compile_variant_name
+ - name: check_feature_flag_tags
+ - name: check_for_todos
+ - name: .aggfuzzer
+ - name: .aggregation
+ - name: aggregation_repeat_queries
+ - name: audit
+ - name: .auth
+ - name: buildscripts_test
+ - name: resmoke_end2end_tests
+ - name: unittest_shell_hang_analyzer_gen
+ - name: .causally_consistent !.sharding
+ - name: .change_streams
+ - name: .change_stream_fuzzer
+ # TODO SERVER-57866: Remove the explicit mentions of change stream multitenant suites.
+ - name: change_streams_multitenant_passthrough
+ - name: change_streams_multitenant_sharded_collections_passthrough
+ - name: .misc_js
+ - name: .clustered_collections
+ - name: .concurrency !.large !.ubsan !.no_txns
+ - name: .concurrency .large !.ubsan !.no_txns
+ distros:
+ - rhel80-medium
+ - name: .config_fuzzer !.large
+ - name: .config_fuzzer .large
+ distros:
+ - rhel80-medium
+ - name: .config_fuzzer_stress
+ distros:
+ - rhel80-large
+ - name: disk_wiredtiger
+ - name: .encrypt
+ - name: feature_flag_multiversion_gen
+ - name: idl_tests
+ - name: initial_sync_fuzzer_gen
+ - name: .jscore .common
+ - name: jsCore_column_store_indexes
+ - name: jsCore_min_batch_repeat_queries_ese_gsm
+ - name: jsCore_txns_large_txns_format
+ - name: jsCore_wildcard_indexes
+ - name: json_schema
+ - name: .jstestfuzz !.flow_control # Flow control jstestfuzz take longer.
+ - name: libunwind_tests
+ - name: .multiversion_sanity_check
+ - name: mqlrun
+ - name: .multi_shard
+ - name: multi_stmt_txn_jscore_passthrough_with_migration_gen
+ - name: multiversion_gen
+ - name: powercycle_smoke
+ - name: .query_fuzzer
+ - name: .random_multiversion_ds
+ - name: .read_write_concern .large
+ distros:
+ - rhel80-medium
+ - name: .read_write_concern !.large
+ - name: .replica_sets !.encrypt !.auth
+ distros:
+ - rhel80-medium
+ - name: replica_sets_api_version_jscore_passthrough_gen
+ - name: replica_sets_reconfig_jscore_passthrough_gen
+ - name: replica_sets_reconfig_jscore_stepdown_passthrough_gen
+ distros:
+ - rhel80-medium
+ - name: replica_sets_reconfig_kill_primary_jscore_passthrough_gen
+ distros:
+ - rhel80-medium
+ - name: retryable_writes_jscore_passthrough_gen
+ - name: retryable_writes_jscore_stepdown_passthrough_gen
+ distros:
+ - rhel80-medium
+ - name: .read_only
+ - name: .rollbackfuzzer
+ - name: sasl
+ - name: search
+ - name: search_auth
+ - name: search_pinned_connections_auth
+ - name: search_ssl
+ - name: session_jscore_passthrough
+ - name: .sharding .jscore !.wo_snapshot !.multi_stmt
+ - name: sharding_api_version_jscore_passthrough_gen
+ - name: sharding_api_strict_passthrough_gen
+ - name: .sharding .txns
+ - name: .sharding .common
+ - name: sharded_collections_single_writes_without_shard_key_jscore_passthrough_gen
+ - name: sharded_multi_stmt_txn_jscore_passthrough
+ - name: .serverless
+ - name: sharding_max_mirroring_opportunistic_secondary_targeting_ese_gcm_gen
+ - name: .updatefuzzer
+ - name: secondary_reads_passthrough_gen
+ - name: .shard_split
+ - name: .shard_merge
+ - name: streams
+ - name: query_stats_passthrough
+ - name: query_stats_passthrough_writeonly
+
+- <<: *linux_x86_dynamic_compile_variant_dependency
+ name: &enterprise-rhel-80-64-bit-large-txns-format enterprise-rhel-80-64-bit-large-txns-format
+ display_name: "Enterprise RHEL 8.0 (large transactions format)"
+ cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter
+ modules:
+ - enterprise
+ run_on:
+ - rhel80-small
+ expansions:
+ <<: *linux_x86_generic_expansions
+ test_flags: >-
+ --mongodSetParameters="{maxNumberOfTransactionOperationsInSingleOplogEntry: 2}"
+ --excludeWithAnyTags=exclude_from_large_txns
+ tasks:
+ - name: auth_gen
+ - name: auth_audit_gen
+ - name: causally_consistent_jscore_txns_passthrough
+ - name: change_streams
+ - name: change_streams_whole_db_passthrough
- name: change_streams_whole_cluster_passthrough
- name: concurrency_replication_gen
- name: concurrency_replication_multi_stmt_txn_gen
@@ -1739,9 +1604,11 @@ buildvariants:
- name: concurrency_sharded_with_stepdowns_and_balancer_gen
- name: concurrency_sharded_initial_sync_gen
- name: initial_sync_fuzzer_gen
+ - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen
- name: jsCore
- name: jsCore_txns
- - name: .logical_session_cache .repl
- name: .multi_shard
- name: multi_stmt_txn_jscore_passthrough_with_migration_gen
- name: multiversion_auth_gen
@@ -1791,43 +1658,55 @@ buildvariants:
# No feature flag tests since they aren't compatible with the older binaries.
test_flags: >-
--runNoFeatureFlagTests
- --excludeWithAnyTags=incompatible_with_shard_merge
+ --excludeWithAnyTags=incompatible_with_shard_merge,
-- <<: *linux-x86-multiversion-template
+# This variant exists becuase this is the only way to test future multiversion tags
+# version_expansions_gen will pretend we are upgrading to "bv_future_git_tag"
+# which is like simulating a branching task
+- &enterprise-rhel-80-64-bit-future-git-tag-multiversion-template
+ <<: *linux-x86-multiversion-template
name: &enterprise-rhel-80-64-bit-future-git-tag-multiversion enterprise-rhel-80-64-bit-future-git-tag-multiversion
display_name: "Enterprise RHEL 8.0 (future git tag multiversion)"
expansions:
- compile_flags: >-
- -j$(grep -c ^processor /proc/cpuinfo)
- --ssl
- --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- MONGO_DISTMOD=rhel80
- --link-model=dynamic
- multiversion_platform: rhel80
- multiversion_edition: enterprise
- repo_edition: enterprise
+ <<: *linux_x86_generic_expansions
scons_cache_scope: shared
scons_cache_mode: all
- num_scons_link_jobs_available: 0.99
- tooltags: "ssl sasl gssapi"
- build_mongoreplay: true
- large_distro_name: rhel80-medium
- resmoke_jobs_factor: 0.25
+ resmoke_jobs_factor: 0.5
bv_future_git_tag: r100.0.0-9999
+ compile_variant: linux-x86-dynamic-compile-future-tag-multiversion
test_flags: >-
--excludeWithAnyTags=future_git_tag_incompatible
- compile_variant: *enterprise-rhel-80-64-bit-future-git-tag-multiversion
+ depends_on:
+ - name: version_expansions_gen
+ variant: &enterprise-rhel-80-64-bit-future-git-tag-multiversion-version-gen enterprise-rhel-80-64-bit-future-git-tag-multiversion-version-gen
+ - name: version_gen
+ variant: generate-tasks-for-version
+ # This is added because of EVG-18211.
+ # Without this we are adding extra dependencies on evergreen and it is causing strain
+ omit_generated_tasks: true
+ - name: archive_dist_test
+ variant: linux-x86-dynamic-compile-future-tag-multiversion
+ tasks:
+ - name: .multiversion !.future_git_tag_incompatible
+ - name: .multiversion_future_git_tag
+ # This task does not work because it depends on archive_dist_test_debug
+ # Because we override the task dependencies in the future git tag varients we can't have
+ # multiple tasks in one varient depend on muliple different compile variant tasks
+ # If we decide we need this task we can add it to its own variant that depends on archive_dist_test_debug
+ # - name: generate_buildid_to_debug_symbols_mapping
+
+# This variant exists becuase this is the only way to correctly have
+# enterprise-rhel-80-64-bit-future-git-tag-multiversion depend the the "correct" version_expansions_gen task
+# Without this extra variant depending on version_expansions_gen will yeild the version_expansions_gen task in version_gen
+# Adding this variant removes that race condition
+- <<: *enterprise-rhel-80-64-bit-future-git-tag-multiversion-template
+ name: *enterprise-rhel-80-64-bit-future-git-tag-multiversion-version-gen
+ display_name: "Enterprise RHEL 8.0 (future git tag multiversion) (version gen)"
depends_on: null
tasks:
- name: version_expansions_gen
distros:
- rhel80-small
- - name: compile_test_and_package_parallel_core_stream_TG
- distros:
- - rhel80-xlarge
- - name: .multiversion !.future_git_tag_incompatible
- - name: .multiversion_future_git_tag
- - name: generate_buildid_to_debug_symbols_mapping
- name: &enterprise-rhel-80-64-bit-suggested enterprise-rhel-80-64-bit-suggested
display_name: "* Enterprise RHEL 8.0"
@@ -1873,7 +1752,6 @@ buildvariants:
- name: .jscore .common !.decimal !.sharding
- name: jsCore_txns_large_txns_format
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: libunwind_tests
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
@@ -1889,8 +1767,8 @@ buildvariants:
- name: test_packages
distros:
- ubuntu2004-package
- # TODO: BF-24515 re-enable when build failure cause determined and resolved
- #- name: selinux_rhel8_enterprise
+ - name: vector_search
+ - name: selinux_rhel8_enterprise
- name: generate_buildid_to_debug_symbols_mapping
- name: &enterprise-rhel-80-64-bit-build-metrics enterprise-rhel-80-64-bit-build-metrics
@@ -1919,14 +1797,14 @@ buildvariants:
modules:
- enterprise
run_on:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
expansions:
compile_flags: >-
--ssl
MONGO_DISTMOD=rhel82
-j$(grep -c ^processor /proc/cpuinfo)
repo_edition: enterprise
- large_distro_name: amazon2022-arm64-large
+ large_distro_name: amazon2023.0-arm64-large
num_scons_link_jobs_available: 0.1
tasks:
- name: build_metrics_gen_TG
@@ -1996,35 +1874,6 @@ buildvariants:
- name: noPassthroughHotBackups_gen
- name: generate_buildid_to_debug_symbols_mapping
-- name: &ubuntu1804-container ubuntu1804-container
- display_name: "Ubuntu 18.04 Container"
- cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
- run_on:
- - ubuntu1804-container-server
- expansions:
- resmoke_jobs_factor: 1
- compile_variant: *ubuntu1804-container
- disable_shared_scons_cache: true
- compile_flags: >-
- MONGO_DISTMOD=ubuntu1804
- --opt=on
- -j$(grep -c ^processor /proc/cpuinfo)
- --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- tooltags: ""
- build_mongoreplay: true
- test_flags: >-
- --excludeWithAnyTags=requires_os_access
- tasks:
- - name: compile_and_archive_dist_test_then_package_TG
- distros:
- - ubuntu1804-build
- - name: jsCore
- - name: sharding_gen
- - name: replica_sets_gen
- - name: generate_buildid_to_debug_symbols_mapping
- distros:
- - ubuntu1804-build
-
- name: &enterprise-rhel-72-s390x-compile enterprise-rhel-72-s390x-compile
display_name: "Enterprise RHEL 7.2 s390x Compile"
modules:
@@ -2097,8 +1946,8 @@ buildvariants:
- &enterprise-amazon-linux2-arm64-all-feature-flags-template
<<: *amazon_linux2_arm64_compile_variant_dependency
name: &enterprise-amazon-linux2-arm64-all-feature-flags enterprise-amazon-linux2-arm64-all-feature-flags
- display_name: "* Amazon Linux 2 arm64 (all feature flags)"
- cron: "0 4 * * *" # From the ${project_required_suggested_cron} parameter
+ display_name: "! Amazon Linux 2 arm64 (all feature flags)"
+ cron: "0 */4 * * *" # From the ${project_required_suggested_cron} parameter
modules:
- enterprise
run_on:
@@ -2119,15 +1968,10 @@ buildvariants:
--additionalFeatureFlagsFile all_feature_flags.txt
--excludeWithAnyTags=incompatible_with_amazon_linux,incompatible_with_shard_merge,requires_external_data_source
tasks:
- - name: cqf
- - name: cqf_disabled_pipeline_opt
- - name: cqf_passthrough
- - name: cqf_parallel
+ - name: analyze_shard_key_jscore_passthrough_gen
- name: query_golden_classic
- - name: query_golden_cqf
- name: lint_fuzzer_sanity_patch
- name: test_api_version_compatibility
- - name: burn_in_tests_gen
- name: check_feature_flag_tags
- name: check_for_todos
- name: .aggfuzzer
@@ -2135,19 +1979,22 @@ buildvariants:
- name: aggregation_repeat_queries
- name: audit
- name: .auth
- #- name: burn_in_tags_gen
- name: buildscripts_test
- name: resmoke_end2end_tests
- name: unittest_shell_hang_analyzer_gen
- name: .causally_consistent !.sharding
- name: .change_streams
- name: .change_stream_fuzzer
+ # TODO SERVER-57866: Remove the explicit mentions of change stream multitenant suites.
- name: change_streams_multitenant_passthrough
- name: change_streams_multitenant_sharded_collections_passthrough
+ - name: cqf
+ - name: cqf_disabled_pipeline_opt
+ - name: cqf_parallel
- name: .misc_js
- name: .clustered_collections
- - name: .concurrency !.large !.ubsan !.no_txns !.debug_only
- - name: .concurrency .large !.ubsan !.no_txns !.debug_only
+ - name: .concurrency !.large !.ubsan !.no_txns
+ - name: .concurrency .large !.ubsan !.no_txns
- name: .config_fuzzer !.large
- name: .config_fuzzer .large
distros:
@@ -2178,7 +2025,8 @@ buildvariants:
- name: .random_multiversion_ds
- name: .read_write_concern .large
- name: .read_write_concern !.large
- - name: .replica_sets !.encrypt !.auth
+ - name: .replica_sets !.encrypt !.auth !.ignore_non_generated_replica_sets_jscore_passthrough
+ - name: replica_sets_jscore_passthrough_gen
- name: replica_sets_api_version_jscore_passthrough_gen
- name: replica_sets_reconfig_jscore_passthrough_gen
- name: replica_sets_reconfig_jscore_stepdown_passthrough_gen
@@ -2192,112 +2040,31 @@ buildvariants:
- name: search_auth
- name: search_pinned_connections_auth
- name: search_ssl
+ - name: secondary_reads_passthrough_gen
+ - name: .serverless
+ distros:
+ - amazon2-arm64-large
- name: session_jscore_passthrough
+ - name: .shard_split
+ - name: .shard_merge
- name: .sharding .jscore !.wo_snapshot !.multi_stmt
- name: sharding_api_version_jscore_passthrough_gen
+ - name: sharding_api_strict_passthrough_gen
- name: .sharding .txns
- name: .sharding .common
- - name: sharded_multi_stmt_txn_jscore_passthrough
- - name: .serverless
- distros:
- - amazon2-arm64-large
- name: sharding_max_mirroring_opportunistic_secondary_targeting_ese_gcm_gen
+ - name: sharded_collections_single_writes_without_shard_key_jscore_passthrough_gen
+ - name: sharded_multi_stmt_txn_jscore_passthrough
+ - name: streams
- name: .updatefuzzer
- - name: secondary_reads_passthrough_gen
- - name: .shard_split
- - name: .shard_merge
- - name: telemetry_passthrough
-
+ - name: vector_search
+ - name: query_stats_passthrough
+ - name: query_stats_passthrough_writeonly
###########################################
# Experimental buildvariants #
###########################################
-- name: &rhel80-debug-asan-classic-engine rhel80-debug-asan-classic-engine
- display_name: "* ASAN Enterprise RHEL 8.0 DEBUG (Classic Engine)"
- cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
- modules:
- - enterprise
- run_on:
- - rhel80-build
- stepback: false
- expansions:
- additional_package_targets: >-
- archive-mongocryptd
- archive-mongocryptd-debug
- lang_environment: LANG=C
- san_options: *aubsan_options
- compile_flags: >-
- --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars
- --dbg=on
- --opt=on
- --allocator=system
- --sanitize=address
- --ssl
- --ocsp-stapling=off
- --enable-free-mon=on
- -j$(grep -c ^processor /proc/cpuinfo)
- compile_variant: *rhel80-debug-asan-classic-engine
- test_flags: >-
- --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}"
- --excludeWithAnyTags=requires_fast_memory,requires_ocsp_stapling
- multiversion_platform: rhel80
- multiversion_edition: enterprise
- resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under ASAN build.
- hang_analyzer_dump_core: false
- scons_cache_scope: shared
- exec_timeout_secs: 14400 # 3 hour timeout
- separate_debug: off
- large_distro_name: rhel80-build
- tasks:
- - name: compile_test_benchmark_and_package_serial_TG
- - name: .aggregation !.sbe_only
- - name: .auth
- - name: audit
- - name: .benchmarks
- - name: .causally_consistent !.wo_snapshot
- - name: .change_streams
- - name: .misc_js
- - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.compute_mode
- - name: .encrypt
- - name: free_monitoring
- - name: external_auth
- - name: external_auth_aws
- - name: external_auth_oidc
- - name: initial_sync_fuzzer_gen
- - name: compile_integration_and_test_parallel_stream_TG
- distros:
- - rhel80-large
- - name: .jscore .common !.sbe_only
- - name: jsCore_min_batch_repeat_queries_ese_gsm
- - name: jsCore_txns_large_txns_format
- - name: json_schema
- - name: .logical_session_cache
- - name: .multi_shard .common
- - name: .query_fuzzer
- - name: .read_write_concern
- - name: replica_sets_large_txns_format_jscore_passthrough
- - name: .replica_sets !.multi_oplog
- - name: .replica_sets .encrypt
- - name: .resharding_fuzzer
- - name: .retry
- - name: .read_only
- - name: .rollbackfuzzer
- - name: .updatefuzzer
- - name: sasl
- - name: secondary_reads_passthrough_gen
- - name: session_jscore_passthrough
- - name: .sharding .jscore !.wo_snapshot
- - name: .sharding .common !.csrs !.jstestfuzz
- - name: .watchdog
- - name: .stitch
- - name: .serverless
- - name: unittest_shell_hang_analyzer_gen
- - name: .updatefuzzer
- - name: server_discovery_and_monitoring_json_test_TG
- - name: server_selection_json_test_TG
- - name: generate_buildid_to_debug_symbols_mapping
-
- &rhel80-debug-ubsan-all-feature-flags-template
name: &rhel80-debug-ubsan-all-feature-flags rhel80-debug-ubsan-all-feature-flags
display_name: "* Shared Library UBSAN Enterprise RHEL 8.0 DEBUG (all feature flags)"
@@ -2327,7 +2094,7 @@ buildvariants:
# To force disable feature flags even on the all feature flags variant, please use this file:
# buildscripts/resmokeconfig/fully_disabled_feature_flags.yml
test_flags: >-
- --excludeWithAnyTags=requires_ocsp_stapling
+ --excludeWithAnyTags=requires_ocsp_stapling,requires_increased_memlock_limits
--excludeWithAnyTags=incompatible_with_shard_merge
--additionalFeatureFlagsFile all_feature_flags.txt
multiversion_platform: rhel80
@@ -2342,62 +2109,66 @@ buildvariants:
- name: disk_wiredtiger
- name: generate_buildid_to_debug_symbols_mapping
-- name: &rhel80-debug-ubsan-classic-engine rhel80-debug-ubsan-classic-engine
- display_name: "* UBSAN Enterprise RHEL 8.0 DEBUG (Classic Engine)"
- cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
+- &rhel80-debug-aubsan-lite-template
+ <<: *linux_debug_aubsan_compile_variant_dependency
+ name: &rhel80-debug-aubsan-lite rhel80-debug-aubsan-lite
+ display_name: "* Shared Library {A,UB}SAN Enterprise RHEL 8.0 DEBUG"
+ cron: "0 */4 * * *" # From the ${project_required_suggested_cron} parameter
modules:
- enterprise
run_on:
- rhel80-build
- stepback: false
- expansions:
- additional_package_targets: >-
- archive-mongocryptd
- archive-mongocryptd-debug
+ expansions: &aubsan-lite-required-expansions
+ compile_variant: *linux_debug_aubsan_compile_variant_name
lang_environment: LANG=C
san_options: *aubsan_options
- compile_variant: *rhel80-debug-ubsan-classic-engine
- compile_flags: >-
- --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars
- --dbg=on
- --opt=on
- --sanitize=undefined
- --ssl
- --ocsp-stapling=off
- --enable-free-mon=on
- -j$(grep -c ^processor /proc/cpuinfo)
- test_flags: >-
- --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}"
- --excludeWithAnyTags=requires_ocsp_stapling
+ test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_increased_memlock_limits
+ resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under {A,UB}SAN build.
+ hang_analyzer_dump_core: false
+ max_sub_suites: 3
+ num_scons_link_jobs_available: 0.99
+ large_distro_name: rhel80-build
multiversion_platform: rhel80
multiversion_edition: enterprise
- resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under UBSAN build.
- scons_cache_scope: shared
- separate_debug: off
- large_distro_name: rhel80-build
+ gcov_tool: /opt/mongodbtoolchain/v4/bin/gcov
+
+ tasks:
+ - name: jsCore
+ - name: jsCore_txns
+
+- <<: *rhel80-debug-aubsan-lite-template
+ name: &rhel80-debug-aubsan-classic-engine rhel80-debug-aubsan-classic-engine
+ display_name: "* {A,UB}SAN Enterprise RHEL 8.0 DEBUG (Classic Engine)"
+ cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
+ expansions:
+ <<: *aubsan-lite-required-expansions
+ test_flags: >-
+ --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}"
+ --excludeWithAnyTags=requires_ocsp_stapling,requires_increased_memlock_limits
tasks:
- - name: compile_test_benchmark_and_package_serial_TG
- name: .aggregation !.sbe_only
- name: .auth
- name: audit
- - name: .benchmarks
- name: .causally_consistent !.wo_snapshot
- name: .change_streams
+ # - name: disk_wiredtiger
- name: .misc_js
- - name: .concurrency !.no_txns !.repl !.kill_terminate !.compute_mode
- - name: disk_wiredtiger
+ - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.compute_mode
- name: .encrypt
- name: free_monitoring
+ - name: external_auth
+ - name: external_auth_aws
+ - name: external_auth_oidc
- name: initial_sync_fuzzer_gen
- - name: compile_integration_and_test_parallel_stream_TG
- distros:
- - rhel80-large
+ - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen
- name: .jscore .common !.sbe_only
- name: jsCore_min_batch_repeat_queries_ese_gsm
- name: jsCore_txns_large_txns_format
- name: json_schema
- - name: .logical_session_cache .one_sec
- name: .multi_shard .common
+ - name: .query_fuzzer
- name: .read_write_concern
- name: replica_sets_large_txns_format_jscore_passthrough
- name: .replica_sets !.multi_oplog
@@ -2411,41 +2182,12 @@ buildvariants:
- name: session_jscore_passthrough
- name: .sharding .jscore !.wo_snapshot
- name: .sharding .common !.csrs !.jstestfuzz
- - name: .stitch
- name: .updatefuzzer
- name: .serverless
- - name: watchdog_wiredtiger
- - name: server_discovery_and_monitoring_json_test_TG
- - name: server_selection_json_test_TG
+ - name: unittest_shell_hang_analyzer_gen
+ - name: .watchdog
- name: generate_buildid_to_debug_symbols_mapping
-- &rhel80-debug-aubsan-lite-template
- <<: *linux_debug_aubsan_compile_variant_dependency
- name: &rhel80-debug-aubsan-lite rhel80-debug-aubsan-lite
- display_name: "* Shared Library {A,UB}SAN Enterprise RHEL 8.0 DEBUG"
- cron: "0 */4 * * *" # From the ${project_required_suggested_cron} parameter
- modules:
- - enterprise
- run_on:
- - rhel80-build
- expansions: &aubsan-lite-required-expansions
- compile_variant: *linux_debug_aubsan_compile_variant_name
- lang_environment: LANG=C
- san_options: *aubsan_options
- test_flags: --excludeWithAnyTags=requires_ocsp_stapling
- resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under {A,UB}SAN build.
- hang_analyzer_dump_core: false
- max_sub_suites: 3
- num_scons_link_jobs_available: 0.99
- large_distro_name: rhel80-build
- multiversion_platform: rhel80
- multiversion_edition: enterprise
- gcov_tool: /opt/mongodbtoolchain/v4/bin/gcov
-
- tasks:
- - name: jsCore
- - name: jsCore_txns
-
- <<: *rhel80-debug-aubsan-lite-template
name: &rhel80-debug-aubsan-lite-all-feature-flags-required rhel80-debug-aubsan-lite-all-feature-flags-required
display_name: "! Shared Library {A,UB}SAN Enterprise RHEL 8.0 DEBUG (all feature flags)"
@@ -2480,33 +2222,37 @@ buildvariants:
- name: audit
- name: .aggregation
- name: .auth
+ - name: .concurrency !.no_txns !.repl !.kill_terminate
- name: .config_fuzzer
- name: .config_fuzzer_stress
- name: cqf
- name: cqf_disabled_pipeline_opt
- - name: cqf_passthrough
- name: cqf_parallel
- name: .causally_consistent !.wo_snapshot
- name: .change_streams
+ # TODO SERVER-57866: Remove the explicit mentions of change stream multitenant suites.
- name: change_streams_multitenant_passthrough
- name: change_streams_multitenant_sharded_collections_passthrough
+ # - name: disk_wiredtiger
- name: external_auth
- name: external_auth_aws
- name: external_auth_oidc
- name: .encrypt
- name: free_monitoring
+ - name: generate_buildid_to_debug_symbols_mapping
- name: initial_sync_fuzzer_gen
+ - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen
+ - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen
- name: .jscore .common
- name: jsCore_column_store_indexes
- name: jsCore_min_batch_repeat_queries_ese_gsm
- name: jsCore_txns_large_txns_format
- name: jsCore_wildcard_indexes
- name: json_schema
- - name: .logical_session_cache
- name: .misc_js
- name: .multi_shard .common
- name: query_golden_classic
- - name: query_golden_cqf
- name: .query_fuzzer
- name: .read_write_concern
- name: replica_sets_large_txns_format_jscore_passthrough
@@ -2525,7 +2271,8 @@ buildvariants:
- name: .serverless
- name: .shard_split
- name: .shard_merge
- - name: telemetry_passthrough
+ - name: query_stats_passthrough
+ - name: query_stats_passthrough_writeonly
- name: .updatefuzzer
- name: .watchdog
@@ -2575,38 +2322,43 @@ buildvariants:
large_distro_name: rhel80-medium
multiversion_platform: rhel80
multiversion_edition: enterprise
+ test_flags: >-
+ --excludeWithAnyTags=tsan_incompatible
tasks:
- name: compile_test_and_package_serial_TG
# - name: compile_integration_and_test_parallel_stream_TG # Not passing
# - name: test_api_version_compatibility # Not relevant for TSAN
- # - name: burn_in_tests_gen # No burn in tests needed
- name: check_feature_flag_tags
- # - name: .aggfuzzer !.feature_flag_guarded # Not passing
+ - name: .aggfuzzer !.feature_flag_guarded
# - name: .aggregation !.feature_flag_guarded # Not passing
- name: audit
# - name: .auth # Not passing
# - name: burn_in_tags_gen # No burn in tests needed
+ # depends_on:
+ # - name: version_burn_in_gen
+ # variant: generate-tasks-for-version
+ # omit_generated_tasks: true
+ # - name: archive_dist_test_debug
+ # variant: *enterprise-rhel80-debug-tsan
- name: buildscripts_test
- # - name: resmoke_end2end_tests # Not passing
# - name: unittest_shell_hang_analyzer_gen # Not passing
# - name: .config_fuzzer # Not passing
- name: config_fuzzer_jsCore
- name: cqf
- name: cqf_disabled_pipeline_opt
- - name: cqf_passthrough
- name: cqf_parallel
# - name: .causally_consistent !.sharding # Not passing
# - name: .change_streams # Not passing
# - name: .change_stream_fuzzer # Not passing
# - name: .misc_js # Not passing
- # - name: .concurrency !.large !.ubsan !.no_txns !.debug_only # Not passing
- # - name: .concurrency .large !.ubsan !.no_txns !.debug_only # Not passing
+ # - name: .concurrency !.large !.ubsan !.no_txns # Not passing
+ # - name: .concurrency .large !.ubsan !.no_txns # Not passing
# distros:
# - rhel80-medium
- name: disk_wiredtiger
# - name: .encrypt # Not passing
# - name: idl_tests # Not relevant for TSAN
- # - name: initial_sync_fuzzer_gen # Not passing
+ - name: initial_sync_fuzzer_gen
# distros:
# - rhel80-medium
- name: jsCore
@@ -2616,7 +2368,7 @@ buildvariants:
- name: jsCore_min_batch_repeat_queries_ese_gsm
- name: jsCore_txns_large_txns_format
- name: json_schema
- # - name: .jstestfuzz !.flow_control # Flow control jstestfuzz take longer. # Not passing
+ - name: .jstestfuzz !.flow_control # Flow control jstestfuzz take longer.
# - name: libunwind_tests # Cant be used because tsan does not use libunwind
# - name: .multiversion_sanity_check # Multiversion does not pass yet, also making this work is going to be pretty tricky
- name: mqlrun
@@ -2625,7 +2377,6 @@ buildvariants:
# - name: multiversion_gen # Multiversion does not pass yet, also making this work is going to be pretty tricky
- name: .query_fuzzer
- name: query_golden_classic
- - name: query_golden_cqf
# - name: .random_multiversion_ds # Multiversion does not pass yet, also making this work is going to be pretty tricky
# - name: .read_write_concern .large # Not passing
# distros:
@@ -2646,9 +2397,8 @@ buildvariants:
# - name: retryable_writes_jscore_stepdown_passthrough_gen # Not passing
# distros:
# - rhel80-medium
- - name: read_only # TODO: replace with .read_only after read_only_sharded is fixed
- # - name: .read_only # Not passing, see above
- # - name: .rollbackfuzzer # Not passing
+ - name: .read_only
+ - name: .rollbackfuzzer
- name: sasl
- name: search
- name: search_auth
@@ -2673,6 +2423,7 @@ buildvariants:
- name: server_selection_json_test_TG
distros:
- rhel80-xlarge
+ - name: vector_search
- name: generate_buildid_to_debug_symbols_mapping
- &enterprise-rhel80-debug-complete-tsan-template
@@ -2691,24 +2442,20 @@ buildvariants:
tasks:
- name: compile_test_and_package_serial_TG
- name: compile_integration_and_test_parallel_stream_TG
- - name: .aggfuzzer !.feature_flag_guarded
- name: .aggregation !.feature_flag_guarded
- name: .auth
- - name: resmoke_end2end_tests
- name: unittest_shell_hang_analyzer_gen
- name: .causally_consistent !.sharding
- name: .change_streams
- name: .change_stream_fuzzer
- name: .misc_js
- - name: .concurrency !.large !.ubsan !.no_txns !.debug_only !.compute_mode
- - name: .concurrency .large !.ubsan !.no_txns !.debug_only !.compute_mode
+ - name: .concurrency !.large !.ubsan !.no_txns !.compute_mode
+ - name: .concurrency .large !.ubsan !.no_txns !.compute_mode
distros:
- rhel80-large
- name: .config_fuzzer
- name: .encrypt
- - name: initial_sync_fuzzer_gen
- name: .jscore .common !jsCore
- - name: .jstestfuzz !.flow_control # Flow control jstestfuzz take longer.
# - name: libunwind_tests # Cant be used because tsan does not use libunwind
# - name: .multiversion_sanity_check # Multiversion does not pass yet, also making this work is going to be pretty tricky
- name: .multi_shard
@@ -2734,8 +2481,6 @@ buildvariants:
- name: retryable_writes_jscore_stepdown_passthrough_gen
distros:
- rhel80-medium
- - name: .read_only
- - name: .rollbackfuzzer
- name: .sharding .jscore !.wo_snapshot !.multi_stmt
- name: sharding_api_version_jscore_passthrough_gen
- name: .sharding .txns
@@ -2853,7 +2598,6 @@ buildvariants:
- name: .jscore .common
- name: noPassthrough_gen
- name: noPassthroughWithMongod_gen
- - name: .logical_session_cache .one_sec
- name: .sharding .jscore !.wo_snapshot !.multi_stmt
- name: .sharding .common !.csrs !.encrypt
- name: sharding_max_mirroring_opportunistic_secondary_targeting_gen
@@ -2897,7 +2641,6 @@ buildvariants:
- name: .sharding .causally_consistent !.wo_snapshot
- name: .concurrency .common !.kill_terminate
- name: .jscore .common
- - name: .logical_session_cache .one_sec
- name: .sharding .jscore !.wo_snapshot !.multi_stmt
- name: .sharding .common !.csrs !.encrypt
- name: sharding_max_mirroring_opportunistic_secondary_targeting_gen
@@ -2940,7 +2683,6 @@ buildvariants:
- name: .sharding .causally_consistent !.wo_snapshot
- name: .concurrency .common !.kill_terminate
- name: .jscore .common
- - name: .logical_session_cache .one_sec
- name: .sharding .jscore !.wo_snapshot !.multi_stmt
- name: .sharding .common !.csrs !.encrypt
- name: sharding_max_mirroring_opportunistic_secondary_targeting_gen
@@ -2988,7 +2730,6 @@ buildvariants:
- name: .sharding .causally_consistent !.wo_snapshot
- name: .concurrency .common !.kill_terminate
- name: .jscore .common
- - name: .logical_session_cache .one_sec
- name: .sharding .jscore !.wo_snapshot !.multi_stmt
- name: .sharding .common !.csrs !.encrypt
- name: sharding_max_mirroring_opportunistic_secondary_targeting_gen
@@ -3036,7 +2777,6 @@ buildvariants:
- name: .sharding .causally_consistent !.wo_snapshot
- name: .concurrency .common !.kill_terminate
- name: .jscore .common
- - name: .logical_session_cache .one_sec
- name: .sharding .jscore !.wo_snapshot !.multi_stmt
- name: .sharding .common !.csrs !.encrypt
- name: sharding_max_mirroring_opportunistic_secondary_targeting_gen
@@ -3083,8 +2823,6 @@ buildvariants:
patch_only: true
- name: .jscore .common
patch_only: true
- - name: .logical_session_cache .one_sec
- patch_only: true
- name: .sharding .jscore !.wo_snapshot !.multi_stmt
patch_only: true
- name: .sharding .common !.csrs !.encrypt
@@ -3112,20 +2850,20 @@ buildvariants:
tasks:
- name: win_shared_scons_cache_pruning
-- <<: *enterprise-rhel-80-64-bit-dynamic-template
+- <<: *enterprise-amazon-linux2-arm64-all-feature-flags-template
name: &commit-queue commit-queue
display_name: "~ Commit Queue"
cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter
stepback: false
expansions:
- <<: *linux_x86_generic_expansions
+ <<: *linux_arm64_generic_expansions
scons_cache_scope: shared
scons_cache_mode: all
- commit_queue_alternate_cache: linux-x86-dynamic-compile-required
+ commit_queue_alternate_cache: amazon-linux2-arm64-compile
has_packages: false
compile_flags: >-
--ssl
- MONGO_DISTMOD=rhel80
+ MONGO_DISTMOD=amazon2
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
--link-model=dynamic
@@ -3139,26 +2877,28 @@ buildvariants:
depends_on: []
tasks:
- name: compile_ninja_quick_TG
- distros:
- - rhel80-xlarge-commitqueue
- name: compile_test_and_package_parallel_core_stream_TG
distros:
- - rhel80-xlarge-commitqueue
+ - amazon2-arm64-xlarge-commitqueue
- name: compile_test_and_package_parallel_unittest_stream_TG
distros:
- - rhel80-xlarge-commitqueue
+ - amazon2-arm64-xlarge-commitqueue
- name: compile_test_and_package_parallel_dbtest_stream_TG
distros:
- - rhel80-xlarge-commitqueue
+ - amazon2-arm64-xlarge-commitqueue
- name: jsCore
distros:
- - rhel80-xlarge-commitqueue
+ - amazon2-arm64-large
- name: .lint
- name: test_api_version_compatibility
- name: validate_commit_message
+ - name: lint_large_files_check
- name: check_feature_flag_tags
- name: compile_venv_deps_check
- name: resmoke_validation_tests
+ - name: version_gen_validation
+ distros:
+ - ubuntu2004-small
- name: &windows-dynamic-visibility-test windows-dynamic-visibility-test
display_name: "~ Shared Library Windows (visibility test)"
@@ -3185,38 +2925,6 @@ buildvariants:
### QO & QE Patch-Specific Build Variants ###
-# The CQF feature flag is currently part of the always-disabled feature flags list, so it is not
-# enabled in all-feature-flags variants besides this one. This variant allows us to get some initial
-# coverage for CQF without disrupting coverage for other feature flags (in particular, SBE).
-# TODO SERVER-71163: Remove this variant once the CQF feature flag is not always-disabled.
-- <<: *linux_x86_dynamic_compile_variant_dependency
- name: enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-and-cqf-enabled-patch-only
- display_name: "Shared Library Enterprise RHEL 8.0 Query Patch Only (all feature flags and CQF enabled)"
- cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter # This is a patch-only variant but we run on mainline to pick up task history.
- modules:
- - enterprise
- run_on:
- - rhel80-small
- stepback: false
- expansions:
- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-expansions
- test_flags: >-
- --excludeWithAnyTags=cqf_incompatible
- --excludeWithAnyTags=resource_intensive
- --mongosSetParameters="{featureFlagCommonQueryFramework: true, internalQueryFrameworkControl: 'tryBonsai'}"
- --mongodSetParameters="{featureFlagCommonQueryFramework: true, internalQueryFrameworkControl: 'tryBonsai'}"
- tasks:
- - name: .aggregation .common
- - name: causally_consistent_jscore_txns_passthrough
- - name: cqf
- - name: cqf_disabled_pipeline_opt
- - name: cqf_parallel
- - name: .jscore .common
- - name: .jstestfuzz !.flow_control # Flow control jstestfuzz take longer.
- - name: noPassthrough_gen
- - name: query_golden_cqf
- - name: retryable_writes_jscore_passthrough_gen
-
- <<: *enterprise-rhel-80-64-bit-dynamic-classic-engine
name: &enterprise-rhel-80-64-bit-dynamic-classic-engine-query-patch-only enterprise-rhel-80-64-bit-dynamic-classic-engine-query-patch-only
display_name: "~ Shared Library Enterprise RHEL 8.0 Query Patch Only (Classic Engine)"
@@ -3231,7 +2939,7 @@ buildvariants:
--mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}"
--excludeWithAnyTags=resource_intensive
-- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-template
+- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-template
name: &enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-query-patch-only enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-query-patch-only
display_name: "~ Shared Library Enterprise RHEL 8.0 Query Patch Only (all feature flags)"
cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter # This is a patch-only variant but we run on mainline to pick up task history.
@@ -3246,7 +2954,8 @@ buildvariants:
--excludeWithAnyTags=resource_intensive
--excludeWithAnyTags=incompatible_with_shard_merge
-- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-template
+### Security Patch-Specific Build Variants ###
+- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-template
name: &enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-security-patch-only enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-security-patch-only
display_name: "~ Shared Library Enterprise RHEL 8.0 Security Patch Only (all feature flags)"
cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter # This is a patch-only variant but we run on mainline to pick up task history.
@@ -3256,14 +2965,33 @@ buildvariants:
max_sub_suites: 15
tasks:
- name: burn_in_tests_gen
+ depends_on:
+ - name: version_burn_in_gen
+ variant: generate-tasks-for-version
+ omit_generated_tasks: true
+ - name: archive_dist_test_debug
+ variant: *linux_x86_dynamic_compile_variant_name
- name: .audit .patch_build
- - name: .sasl .patch_build
- name: .encrypt .patch_build
+ - name: .sasl .patch_build
- name: external_auth
- name: external_auth_aws
- name: external_auth_oidc
- name: lint_fuzzer_sanity_patch
+- <<: *enterprise-windows-template
+ name: &windows-compile-security-patch-only windows-compile-security-patch-only
+ display_name: "~ Windows Security Patch Only"
+ cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter # This is a patch-only variant but we run on mainline to pick up task history.
+ expansions:
+ <<: *windows_required_expansions
+ exe: ".exe"
+ tasks:
+ - name: .encrypt .patch_build
+ - name: .sasl .patch_build
+ - name: external_auth_aws
+ - name: external_auth_oidc
+
- name: &enterprise-ubuntu1804-64-libvoidstar enterprise-ubuntu1804-64-libvoidstar
display_name: "~ Enterprise Ubuntu 18.04 w/ libvoidstar"
modules:
@@ -3329,43 +3057,3 @@ buildvariants:
- name: compile_dist_test_TG
distros:
- windows-vsCurrent-large
-
-- &rhel80-debug-aubsan-lite_fuzzer-template
- name: &rhel80-debug-aubsan-lite_fuzzer rhel80-debug-aubsan-lite_fuzzer
- display_name: "{A,UB}SAN Enterprise RHEL 8.0 FUZZER"
- cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
- modules:
- - enterprise
- run_on:
- - rhel80-build
- stepback: false
- expansions:
- additional_package_targets: >-
- archive-mongocryptd
- archive-mongocryptd-debug
- lang_environment: LANG=C
- toolchain_version: stable
- # If you add anything to san_options, make sure the appropriate changes are
- # also made to SConstruct.
- san_options: *aubsan_options
- compile_flags: >-
- LINKFLAGS=-nostdlib++
- LIBS=stdc++
- --variables-files=etc/scons/mongodbtoolchain_${toolchain_version}_clang.vars
- --dbg=on
- --opt=on
- --allocator=system
- --sanitize=undefined,address,fuzzer
- --ssl
- --ocsp-stapling=off
- -j$(grep -c ^processor /proc/cpuinfo)
- test_flags: --excludeWithAnyTags=requires_ocsp_stapling
- resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under {A,UB}SAN build.
- hang_analyzer_dump_core: false
- scons_cache_scope: shared
- separate_debug: off
- compile_variant: *rhel80-debug-aubsan-lite_fuzzer
- display_tasks:
- - *libfuzzertests
- tasks:
- - name: compile_archive_and_run_libfuzzertests_TG
diff --git a/etc/evergreen_nightly.yml b/etc/evergreen_nightly.yml
index 548a69d6ecb93..4736899719170 100644
--- a/etc/evergreen_nightly.yml
+++ b/etc/evergreen_nightly.yml
@@ -5,7 +5,7 @@ include:
- filename: etc/evergreen_yml_components/variants/task_generation.yml
- filename: etc/evergreen_yml_components/variants/atlas.yml
- filename: etc/evergreen_yml_components/variants/misc_release.yml
-### Comment out when using this file for a LTS or Rapid release branch. ###
+### Comment out when using this file for a Rapid release branch. ###
- filename: etc/evergreen_yml_components/variants/ibm.yml
### Uncomment when using this file for a LTS release branch. ###
# - filename: etc/evergreen_yml_components/variants/in_memory.yml
@@ -13,6 +13,10 @@ include:
# - filename: etc/evergreen_yml_components/variants/sanitizer.yml
### Uncomment when using this file for a LTS or Rapid release branch. ###
# - filename: etc/evergreen_yml_components/variants/ninja.yml
+### Uncomment when using this file for a LTS or Rapid release branch. ###
+# - filename: etc/evergreen_yml_components/variants/classic_engine.yml
+### Uncomment when using this file for a LTS or Rapid release branch. ###
+# - filename: etc/evergreen_yml_components/variants/config_shard.yml
parameters:
diff --git a/etc/evergreen_timeouts.yml b/etc/evergreen_timeouts.yml
index c86d764470582..e7e35fe26f65a 100644
--- a/etc/evergreen_timeouts.yml
+++ b/etc/evergreen_timeouts.yml
@@ -12,41 +12,37 @@
overrides:
enterprise-macos:
- - task: concurrency
- idle_timeout: 15
- task: replica_sets_jscore_passthrough
exec_timeout: 150 # 2.5 hours
enterprise-macos-arm64:
- - task: concurrency
- idle_timeout: 15
- task: replica_sets_jscore_passthrough
exec_timeout: 150 # 2.5 hours
- enterprise-rhel-80-64-bit-coverage:
- - task: replica_sets_jscore_passthrough
- exec_timeout: 150 # 2.5 hours.
+ enterprise-rhel80-debug-complete-tsan:
+ - task: aggregation_timeseries_fuzzer
+ exec_timeout: 150 # 2.5 hours
+
+ enterprise-rhel80-debug-tsan:
+ - task: aggregation_timeseries_fuzzer
+ exec_timeout: 150 # 2.5 hours
macos:
- - task: concurrency
- idle_timeout: 15
- task: replica_sets_jscore_passthrough
exec_timeout: 150 # 2.5 hours
macos-arm64:
- - task: concurrency
- idle_timeout: 15
- task: replica_sets_jscore_passthrough
exec_timeout: 150 # 2.5 hours
+ rhel80-asan:
+ - task: aggregation_timeseries_fuzzer
+ exec_timeout: 150 # 2.5 hours
+
rhel80-debug-ubsan:
- task: update_timeseries_fuzzer
exec_timeout: 150 # 2.5 hours
- rhel80-debug-suggested:
- - task: replica_sets_jscore_passthrough
- exec_timeout: 180 # 3 hours.
-
rhel80-debug-ubsan-classic-engine:
- task: update_timeseries_fuzzer
exec_timeout: 150 # 2.5 hours
@@ -54,3 +50,7 @@ overrides:
rhel80-debug-aubsan-all-feature-flags:
- task: update_timeseries_fuzzer
exec_timeout: 150 # 2.5 hours
+
+ ubuntu1804-asan:
+ - task: aggregation_timeseries_fuzzer
+ exec_timeout: 150 # 2.5 hours
diff --git a/etc/evergreen_yml_components/definitions.yml b/etc/evergreen_yml_components/definitions.yml
index b2cd5bc1bdd11..175ba9caaed2b 100644
--- a/etc/evergreen_yml_components/definitions.yml
+++ b/etc/evergreen_yml_components/definitions.yml
@@ -45,6 +45,9 @@ parameters:
- key: antithesis_image_tag
description: "The docker tag to use when pushing images to Antithesis"
+- key: build_patch_id
+ description: "Patch id of evergreen patch to pull binaries from for testing."
+
## Cron parameters.
- key: project_required_suggested_cron
value: "0 */4 * * *" # Every 4 hours starting at 0000 UTC
@@ -96,6 +99,9 @@ variables:
depends_on:
- name: version_gen
variant: generate-tasks-for-version
+ # This is added because of EVG-18211.
+ # Without this we are adding extra dependencies on evergreen and it is causing strain
+ omit_generated_tasks: true
- name: archive_dist_test_debug
commands:
- func: "generate resmoke tasks"
@@ -105,10 +111,11 @@ variables:
- &gen_burn_in_task_template
name: gen_burn_in_task_template
depends_on:
- - name: version_gen
- variant: generate-tasks-for-version
- name: version_burn_in_gen
variant: generate-tasks-for-version
+ # This is added because of EVG-18211.
+ # Without this we are adding extra dependencies on evergreen and it is causing strain
+ omit_generated_tasks: true
- name: archive_dist_test_debug
commands:
- func: "generate resmoke tasks"
@@ -158,7 +165,12 @@ variables:
is_jstestfuzz: true
num_files: 15
num_tasks: 5 # Upperbound by `max_sub_suites` specified through the variant with each task still running `num_files` files.
- resmoke_args: --help # resmoke_args needs to be overridden to specify one of the jstestfuzz suites
+ # It is error prone to require each fuzzer-related Evergreen task to need to override the
+ # resmoke_args variable. However, the resmoke_args variable must be defined as a string in the
+ # task generation configuration to satisfy mongodb/mongo-task-generator. We therefore specify an
+ # empty string for the variable to reflect there are no additional arguments provided to resmoke
+ # by default for the fuzzer-related tasks.
+ resmoke_args: ""
resmoke_jobs_max: 1
should_shuffle: false
continue_on_failure: false
@@ -175,6 +187,9 @@ variables:
depends_on:
- name: version_gen
variant: generate-tasks-for-version
+ # This is added because of EVG-18211.
+ # Without this we are adding extra dependencies on evergreen and it is causing strain
+ omit_generated_tasks: true
- archive_dist_test_debug
commands:
- func: "generate resmoke tasks"
@@ -240,7 +255,6 @@ variables:
# "set up venv".
- func: "set up venv"
- func: "upload pip requirements"
- - func: "get all modified patch files"
- func: "f_expansions_write"
- func: "configure evergreen api credentials"
- func: "get buildnumber"
@@ -291,8 +305,8 @@ variables:
- mongocryptd_variants: &mongocryptd_variants
- enterprise-amazon2
- enterprise-amazon2-arm64
- - enterprise-amazon2022
- - enterprise-amazon2022-arm64
+ - enterprise-amazon2023
+ - enterprise-amazon2023-arm64
- enterprise-debian10-64
- enterprise-debian11-64
- enterprise-linux-64-amazon-ami
@@ -338,8 +352,8 @@ variables:
- enterprise-rhel-90-64-bit
- enterprise-rhel-90-arm64
- enterprise-amazon2-arm64
- - enterprise-amazon2022
- - enterprise-amazon2022-arm64
+ - enterprise-amazon2023
+ - enterprise-amazon2023-arm64
- enterprise-ubuntu1804-64
- enterprise-ubuntu2004-64
- enterprise-ubuntu2204-64
@@ -350,12 +364,12 @@ variables:
- amazon
- enterprise-linux-64-amazon-ami
- amazon2
- - amazon2022
- - amazon2022-arm64
+ - amazon2023
+ - amazon2023-arm64
- enterprise-amazon2
- enterprise-amazon2-arm64
- - enterprise-amazon2022
- - enterprise-amazon2022-arm64
+ - enterprise-amazon2023
+ - enterprise-amazon2023-arm64
- debian10
- debian11
- enterprise-debian10-64
@@ -529,7 +543,12 @@ functions:
params:
binary: bash
args:
- - "src/evergreen/functions/binaries_extract.sh"
+ - "src/evergreen/run_python_script.sh"
+ - "evergreen/functions/binaries_extract.py"
+ - "--tarball=mongo-binaries.tgz"
+ - "--extraction-command=${decompress}"
+ - "--change-dir=${extraction_change_dir}"
+ - "${move_outputs}"
"get version expansions": &get_version_expansions
command: s3.get
@@ -567,16 +586,6 @@ functions:
extract_to: src/corpora
remote_file: ${mongo_fuzzer_corpus}
- "fetch legacy corpus": &fetch_legacy_corpus
- command: s3.get
- params:
- aws_key: ${s3_access_key_id}
- aws_secret: ${s3_secret_access_key}
- bucket: fuzzer-artifacts
- # Extract the legacy corpora to the merge directory to synthesize together until we burn in.
- extract_to: src/corpora-merged
- remote_file: ${project}/corpus/mongo-${build_variant}-latest.tgz
-
"archive new corpus": &archive_new_corpus
command: archive.targz_pack
params:
@@ -732,23 +741,6 @@ functions:
args:
- "./src/evergreen/functions/shared_scons_directory_umount.sh"
- "get all modified patch files":
- - *f_expansions_write
- - command: subprocess.exec
- params:
- binary: bash
- args:
- - "./src/evergreen/functions/modified_patch_files_get_all.sh"
-
- # This function should only be called from patch-build-only tasks.
- "get added and modified patch files":
- - *f_expansions_write
- - command: subprocess.exec
- params:
- binary: bash
- args:
- - "./src/evergreen/functions/added_and_modified_patch_files_get.sh"
-
"determine resmoke jobs": &determine_resmoke_jobs
command: subprocess.exec
params:
@@ -1084,6 +1076,30 @@ functions:
files:
- src/generated_resmoke_config/*.json
+ "generate version validation":
+ - *f_expansions_write
+ - *configure_evergreen_api_credentials
+ - command: subprocess.exec
+ type: test
+ params:
+ binary: bash
+ args:
+ - "./src/evergreen/generate_version.sh"
+ - command: archive.targz_pack
+ params:
+ target: generate_tasks_config.tgz
+ source_dir: src/generated_resmoke_config
+ include:
+ - "*"
+ - command: subprocess.exec
+ type: test
+ params:
+ binary: bash
+ args:
+ - "src/evergreen/run_python_script.sh"
+ - "buildscripts/validate_file_size.py"
+ - "generate_tasks_config.tgz"
+
"generate version burn in":
- *f_expansions_write
- *configure_evergreen_api_credentials
@@ -1124,6 +1140,11 @@ functions:
files:
- src/generated_resmoke_config/*.json
+ "initialize multiversion tasks": &initialize_multiversion_tasks
+ - command: shell.exec
+ params:
+ script: "echo 'noop'"
+
"generate resmoke tasks":
- *fetch_artifacts
- *f_expansions_write
@@ -1477,7 +1498,8 @@ functions:
params:
binary: bash
args:
- - "./src/evergreen/lint_fuzzer_sanity_patch.sh"
+ - "src/evergreen/run_python_script.sh"
+ - "evergreen/lint_fuzzer_sanity_patch.py"
"lint fuzzer sanity all":
- *f_expansions_write
@@ -1516,8 +1538,7 @@ functions:
local_file: jstests.tgz
remote_file: ${project}/${build_variant}/${revision}/jstestfuzz/${task_id}-${execution}.tgz
bucket: mciuploads
- permissions: private
- visibility: signed
+ permissions: public-read
content_type: application/gzip
display_name: Generated Tests - Execution ${execution}
@@ -1571,6 +1592,18 @@ functions:
visibility: signed
content_type: text/javascript
display_name: Minimized jstestfuzz Test - Execution ${execution}
+ - command: s3.put
+ params:
+ optional: true
+ aws_key: ${aws_key}
+ aws_secret: ${aws_secret}
+ local_file: src/statistics-report.json
+ remote_file: ${project}/${build_variant}/${revision}/artifacts/statistics-report-${task_id}-${execution}.json
+ bucket: mciuploads
+ permissions: private
+ visibility: signed
+ content_type: application/json
+ display_name: Statistics Report - Execution ${execution}
- *f_expansions_write
- command: subprocess.exec
params:
@@ -1725,8 +1758,8 @@ functions:
args:
- "./src/evergreen/selinux_run_test.sh"
env:
- test_list: ${test_list}
- user: ec2-user
+ TEST_LIST: ${test_list}
+ SELINUX_USER: ec2-user
### Process & archive remote EC2 artifacts ###
@@ -2349,7 +2382,7 @@ functions:
bucket: mciuploads
permissions: public-read
content_type: text/plain
- display_name: Generated multiversion exclude tags options
+ display_name: multiversion_exclude_tags.yml from resmoke invocation
# Pre task steps
pre:
@@ -2410,6 +2443,21 @@ tasks:
task_compile_flags: >-
PREFIX=dist-test
+## compile - build all scons targets except unittests ##
+- name: compile_dist_test_half
+ tags: []
+ depends_on:
+ - name: version_expansions_gen
+ variant: generate-tasks-for-version
+ commands:
+ - func: "scons compile"
+ vars:
+ targets: >-
+ compile_first_half_non_test_source
+ ${additional_compile_targets|}
+ task_compile_flags: >-
+ PREFIX=dist-test
+
- name: compile_upload_benchmarks
tags: []
depends_on:
@@ -2770,6 +2818,16 @@ tasks:
targets:
install-core
+- name: iwyu_self_test
+ tags: []
+ commands:
+ - command: subprocess.exec
+ params:
+ binary: bash
+ args:
+ - "src/evergreen/run_python_script.sh"
+ - "buildscripts/iwyu/test/run_tests.py"
+
- name: libdeps_graph_linting
tags: []
depends_on:
@@ -2938,6 +2996,46 @@ tasks:
suite: unittests
install_dir: build/install/bin
+## pretty_printer ##
+- <<: *task_template
+ name: run_pretty_printer_tests
+ tags: []
+ commands:
+ - func: "git get project and add git tag"
+ - *f_expansions_write
+ - *kill_processes
+ - *cleanup_environment
+ - func: "set up venv"
+ - func: "upload pip requirements"
+ - func: "configure evergreen api credentials"
+ - func: "do setup"
+ vars:
+ extraction_change_dir: build/install/
+ - command: s3.get
+ params:
+ aws_key: ${aws_key}
+ aws_secret: ${aws_secret}
+ remote_file: ${mongo_debugsymbols}
+ bucket: mciuploads
+ local_file: src/mongo-debugsymbols.tgz
+ optional: true
+ - command: subprocess.exec
+ params:
+ binary: bash
+ args:
+ - "src/evergreen/run_python_script.sh"
+ - "evergreen/functions/binaries_extract.py"
+ - "--tarball=mongo-debugsymbols.tgz"
+ - "--extraction-command=${decompress}"
+ - "--change-dir=build/install/"
+ - "--move-output=build/install/dist-test/pretty_printer_tests.txt:build/"
+ optional: true
+ - func: "run tests"
+ vars:
+ suite: pretty-printer-tests
+ install_dir: build/install/dist-test/bin
+
+
## run_unittests with UndoDB live-record ##
#- name: run_unittests_with_recording
# depends_on:
@@ -2995,8 +3093,6 @@ tasks:
- name: fetch_and_run_libfuzzertests
tags: []
commands:
- - func: "fetch corpus"
- - func: "fetch legacy corpus"
- func: "run tests"
vars:
suite: libfuzzer
@@ -3075,6 +3171,7 @@ tasks:
vars:
targets: install-integration-tests
compiling_for_test: true
+ separate_debug: off
- name: compile_visibility_test
tags: []
@@ -3484,7 +3581,7 @@ tasks:
- func: "run tests"
vars:
suite: benchmarks
- exec_timeout_secs: 18000 # 5 hour timeout.
+ exec_timeout_secs: 21600 # 6 hour timeout.
resmoke_jobs_max: 1
- func: "send benchmark results"
# - func: "analyze benchmark results"
@@ -3527,6 +3624,17 @@ tasks:
resmoke_jobs_max: 1
- func: "send benchmark results"
+- <<: *benchmark_template
+ name: benchmarks_query
+ tags: ["benchmarks"]
+ commands:
+ - func: "do benchmark setup"
+ - func: "run tests"
+ vars:
+ suite: benchmarks_query
+ resmoke_jobs_max: 1
+ - func: "send benchmark results"
+
- <<: *benchmark_template
name: benchmarks_expression
tags: ["benchmarks"]
@@ -3690,6 +3798,10 @@ tasks:
name: initial_sync_multiversion_fuzzer_gen
tags: ["multiversion_fuzzer", "require_npm", "random_name", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ initial_sync_fuzzer_last_lts: last_lts
+ initial_sync_fuzzer_last_continuous: last_continuous
- func: "generate resmoke tasks"
vars:
<<: *jstestfuzz_config_vars
@@ -3719,12 +3831,15 @@ tasks:
name: aggregation_multiversion_fuzzer_gen
tags: ["aggfuzzer", "common", "multiversion", "require_npm", "random_name", "future_git_tag_incompatible"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ aggregation_expression_multiversion_fuzzer_last_lts: last_lts
+ aggregation_expression_multiversion_fuzzer_last_continuous: last_continuous
- func: "generate resmoke tasks"
vars:
<<: *jstestfuzz_config_vars
num_files: 5
num_tasks: 5
- suite: generational_fuzzer
resmoke_args: "--mongodSetParameters='{logComponentVerbosity: {command: 2}}'"
npm_command: agg-fuzzer
run_no_feature_flag_tests: "true"
@@ -3734,12 +3849,15 @@ tasks:
name: aggregation_expression_multiversion_fuzzer_gen
tags: ["aggfuzzer", "multiversion", "require_npm", "random_name"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ aggregation_multiversion_fuzzer_last_lts: last_lts
+ aggregation_multiversion_fuzzer_last_continuous: last_continuous
- func: "generate resmoke tasks"
vars:
<<: *jstestfuzz_config_vars
num_files: 5
num_tasks: 5
- suite: generational_fuzzer
resmoke_args: "--mongodSetParameters='{logComponentVerbosity: {command: 2}}'"
npm_command: agg-expr-fuzzer
run_no_feature_flag_tests: "true"
@@ -3884,13 +4002,16 @@ tasks:
name: update_fuzzer_gen
tags: ["updatefuzzer", "require_npm", "random_name", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ update_fuzzer_last_lts: last_lts
+ update_fuzzer_last_continuous: last_continuous
- func: "generate resmoke tasks"
vars:
<<: *jstestfuzz_config_vars
num_files: 5
num_tasks: 5
npm_command: update-fuzzer
- suite: update_fuzzer
resmoke_args: "--mongodSetParameters='{logComponentVerbosity: {command: 2}}'"
run_no_feature_flag_tests: "true"
@@ -3914,13 +4035,16 @@ tasks:
name: update_fuzzer_replication_gen
tags: ["updatefuzzer", "require_npm", "random_name", "multiversion", "no_debug_mode"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ update_fuzzer_replication_last_lts: last_lts
+ update_fuzzer_replication_last_continuous: last_continuous
- func: "generate resmoke tasks"
vars:
<<: *jstestfuzz_config_vars
num_files: 5
num_tasks: 5
npm_command: update-fuzzer
- suite: update_fuzzer_replication
resmoke_args: "--mongodSetParameters='{logComponentVerbosity: {command: 2}}'"
run_no_feature_flag_tests: "true"
@@ -3929,6 +4053,10 @@ tasks:
name: rollback_multiversion_fuzzer_gen
tags: ["multiversion_fuzzer", "require_npm", "random_name", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ rollback_fuzzer_last_lts: last_lts
+ rollback_fuzzer_last_continuous: last_continuous
- func: "generate resmoke tasks"
vars:
<<: *jstestfuzz_config_vars
@@ -4245,6 +4373,14 @@ tasks:
name: jstestfuzz_replication_multiversion_gen
tags: ["multiversion_fuzzer", "require_npm", "random_name", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ jstestfuzz_replication_last_continuous_new_new_old: last_continuous
+ jstestfuzz_replication_last_continuous_new_old_new: last_continuous
+ jstestfuzz_replication_last_continuous_old_new_new: last_continuous
+ jstestfuzz_replication_last_lts_new_new_old: last_lts
+ jstestfuzz_replication_last_lts_new_old_new: last_lts
+ jstestfuzz_replication_last_lts_old_new_new: last_lts
- func: "generate resmoke tasks"
vars:
<<: *jstestfuzz_config_vars
@@ -4312,6 +4448,10 @@ tasks:
name: jstestfuzz_sharded_multiversion_gen
tags: ["multiversion_fuzzer", "require_npm", "random_name", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ jstestfuzz_sharded_last_continuous_new_old_old_new: last_continuous
+ jstestfuzz_sharded_last_lts_new_old_old_new: last_lts
- func: "generate resmoke tasks"
vars:
<<: *jstestfuzz_config_vars
@@ -4445,6 +4585,14 @@ tasks:
name: multiversion_sanity_check_gen
tags: ["multiversion", "multiversion_sanity_check"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ multiversion_sanity_check_last_continuous_new_new_old: last_continuous
+ multiversion_sanity_check_last_continuous_new_old_new: last_continuous
+ multiversion_sanity_check_last_continuous_old_new_new: last_continuous
+ multiversion_sanity_check_last_lts_new_new_old: last_lts
+ multiversion_sanity_check_last_lts_new_old_new: last_lts
+ multiversion_sanity_check_last_lts_old_new_new: last_lts
- func: "generate resmoke tasks"
vars:
run_no_feature_flag_tests: "true"
@@ -4453,6 +4601,14 @@ tasks:
name: replica_sets_jscore_multiversion_gen
tags: ["multiversion", "multiversion_passthrough"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ replica_sets_jscore_passthrough_last_continuous_new_new_old: last_continuous
+ replica_sets_jscore_passthrough_last_continuous_new_old_new: last_continuous
+ replica_sets_jscore_passthrough_last_continuous_old_new_new: last_continuous
+ replica_sets_jscore_passthrough_last_lts_new_new_old: last_lts
+ replica_sets_jscore_passthrough_last_lts_new_old_new: last_lts
+ replica_sets_jscore_passthrough_last_lts_old_new_new: last_lts
- func: "generate resmoke tasks"
vars:
suite: replica_sets_jscore_passthrough
@@ -4470,7 +4626,6 @@ tasks:
- *kill_processes
- *cleanup_environment
- *set_up_venv
- - func: "get added and modified patch files"
- func: "setup jstestfuzz"
- func: "lint fuzzer sanity patch"
@@ -4641,6 +4796,18 @@ tasks:
- func: "upload pip requirements"
- func: "generate version"
+- name: version_gen_validation
+ commands:
+ - command: manifest.load
+ - *git_get_project
+ - *f_expansions_write
+ - *add_git_tag
+ - *kill_processes
+ - *cleanup_environment
+ - func: "set up venv"
+ - func: "upload pip requirements"
+ - func: "generate version validation"
+
- name: version_burn_in_gen
commands:
- command: manifest.load
@@ -4679,17 +4846,18 @@ tasks:
- func: "do setup"
- func: "run tests"
-- <<: *task_template
- name: change_streams_v1_resume_token_passthrough
- tags: ["change_streams"]
- commands:
- - func: "do setup"
- - func: "run tests"
-
- <<: *gen_task_template
name: change_streams_multiversion_gen
tags: ["multiversion", "multiversion_passthrough"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ change_streams_last_continuous_new_new_old: last_continuous
+ change_streams_last_continuous_new_old_new: last_continuous
+ change_streams_last_continuous_old_new_new: last_continuous
+ change_streams_last_lts_new_new_old: last_lts
+ change_streams_last_lts_new_old_new: last_lts
+ change_streams_last_lts_old_new_new: last_lts
- func: "generate resmoke tasks"
vars:
suite: change_streams
@@ -4699,6 +4867,10 @@ tasks:
name: change_streams_downgrade_gen
tags: ["multiversion_passthrough", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ change_streams_downgrade_last_continuous_new_old_old_new: last_continuous
+ change_streams_downgrade_last_lts_new_old_old_new: last_lts
- func: "generate resmoke tasks"
vars:
run_no_feature_flag_tests: "true"
@@ -4721,19 +4893,14 @@ tasks:
- func: "do setup"
- func: "run tests"
-- <<: *task_template
- name: change_streams_v1_resume_token_sharded_collections_passthrough
- tags: ["change_streams"]
- depends_on:
- - name: change_streams
- commands:
- - func: "do setup"
- - func: "run tests"
-
- <<: *gen_task_template
name: change_streams_sharded_collections_multiversion_gen
tags: ["multiversion_passthrough", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ change_streams_sharded_collections_passthrough_last_continuous_new_old_old_new: last_continuous
+ change_streams_sharded_collections_passthrough_last_lts_new_old_old_new: last_lts
- func: "generate resmoke tasks"
vars:
suite: change_streams_sharded_collections_passthrough
@@ -4741,7 +4908,7 @@ tasks:
- <<: *gen_task_template
name: multiversion_future_git_tag_gen
- tags: ["multiversion", "no_version_combination", "multiversion_future_git_tag"]
+ tags: ["multiversion", "no_multiversion_generate_tasks", "multiversion_future_git_tag"]
commands:
- func: "generate resmoke tasks"
vars:
@@ -4749,7 +4916,7 @@ tasks:
- <<: *gen_task_template
name: multiversion_auth_future_git_tag_gen
- tags: ["auth", "multiversion", "no_version_combination", "multiversion_future_git_tag"]
+ tags: ["auth", "multiversion", "no_multiversion_generate_tasks", "multiversion_future_git_tag"]
commands:
- func: "generate resmoke tasks"
vars:
@@ -4827,14 +4994,14 @@ tasks:
- <<: *task_template
name: change_streams_multitenant_passthrough
- tags: ["change_streams"]
+ tags: [] # TODO SERVER-57866: Add the "change_streams" tag here.
commands:
- func: "do setup"
- func: "run tests"
- <<: *task_template
name: change_streams_multitenant_sharded_collections_passthrough
- tags: ["change_streams"]
+ tags: [] # TODO SERVER-57866: Add the "change_streams" tag here.
commands:
- func: "do setup"
- func: "run tests"
@@ -4998,6 +5165,8 @@ tasks:
args:
- "src/evergreen/external_auth_oidc_setup.sh"
- func: "run tests"
+ vars:
+ resmoke_jobs_max: ${external_auth_oidc_jobs_max|1}
- <<: *task_template
name: external_auth_windows
@@ -5044,7 +5213,7 @@ tasks:
- <<: *task_template
name: config_fuzzer_simulate_crash_concurrency_replication_gen
- tags: ["config_fuzzer", "large"]
+ tags: ["config_fuzzer", "large", "linux_only"]
commands:
- func: "generate resmoke tasks"
vars:
@@ -5066,15 +5235,28 @@ tasks:
--excludeWithAnyTags=does_not_support_config_fuzzer
use_large_distro: "true"
+- <<: *task_template
+ name: config_fuzzer_concurrency_simultaneous_replication_gen
+ tags: ["config_fuzzer", "large"]
+ commands:
+ - func: "generate resmoke tasks"
+ vars:
+ suite: concurrency_simultaneous_replication
+ resmoke_args: >-
+ --fuzzMongodConfigs=normal
+ --excludeWithAnyTags=does_not_support_config_fuzzer
+ use_large_distro: "true"
+
- <<: *gen_task_template
name: config_fuzzer_concurrency_sharded_replication_gen
- tags: ["config_fuzzer", "large"]
+ tags: ["config_fuzzer", "large", "sharded"]
commands:
- func: "generate resmoke tasks"
vars:
suite: concurrency_sharded_replication
resmoke_args: >-
--fuzzMongodConfigs=normal
+ --fuzzMongosConfigs=normal
--excludeWithAnyTags=does_not_support_config_fuzzer
use_large_distro: "true"
@@ -5101,6 +5283,7 @@ tasks:
suite: concurrency_sharded_replication
resmoke_args: >-
--fuzzMongodConfigs=stress
+ --fuzzMongosConfigs=normal
--excludeWithAnyTags=does_not_support_config_fuzzer
use_large_distro: "true"
exec_timeout_secs: 14400 # 4 hours
@@ -5127,6 +5310,7 @@ tasks:
suite: sharding_jscore_passthrough
resmoke_args: >-
--fuzzMongodConfigs=normal
+ --fuzzMongosConfigs=normal
--excludeWithAnyTags=does_not_support_config_fuzzer
use_large_distro: "true"
@@ -5404,13 +5588,13 @@ tasks:
- <<: *gen_task_template
name: multiversion_auth_gen
- tags: ["auth", "multiversion", "no_version_combination", "future_git_tag_incompatible"]
+ tags: ["auth", "multiversion", "no_multiversion_generate_tasks", "future_git_tag_incompatible"]
commands:
- func: "generate resmoke tasks"
- <<: *gen_task_template
name: multiversion_gen
- tags: ["multiversion", "no_version_combination", "future_git_tag_incompatible"]
+ tags: ["multiversion", "no_multiversion_generate_tasks", "future_git_tag_incompatible"]
commands:
- func: "generate resmoke tasks"
@@ -5419,7 +5603,7 @@ tasks:
# build variants that enable this task.
- <<: *gen_task_template
name: feature_flag_multiversion_gen
- tags: ["multiversion", "no_version_combination"]
+ tags: ["multiversion", "no_multiversion_generate_tasks"]
commands:
- func: "generate resmoke tasks"
@@ -5485,17 +5669,20 @@ tasks:
commands:
- func: "generate resmoke tasks"
-- <<: *task_template
- name: sharded_collections_jscore_passthrough_with_catalog_shard
+- <<: *gen_task_template
+ name: sharded_collections_jscore_passthrough_with_config_shard_gen
tags: ["sharding", "jscore"]
commands:
- - func: "do setup"
- - func: "run tests"
+ - func: "generate resmoke tasks"
- <<: *gen_task_template
name: sharded_collections_jscore_multiversion_gen
tags: ["multiversion_passthrough", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ sharded_collections_jscore_passthrough_last_continuous_new_old_old_new: last_continuous
+ sharded_collections_jscore_passthrough_last_lts_new_old_old_new: last_lts
- func: "generate resmoke tasks"
vars:
suite: sharded_collections_jscore_passthrough
@@ -5508,17 +5695,20 @@ tasks:
- func: "do setup"
- func: "run tests"
-- <<: *task_template
- name: sharding_jscore_passthrough_with_catalog_shard
+- <<: *gen_task_template
+ name: sharding_jscore_passthrough_with_config_shard_gen
tags: ["sharding", "jscore", "common"]
commands:
- - func: "do setup"
- - func: "run tests"
+ - func: "generate resmoke tasks"
- <<: *gen_task_template
name: sharding_jscore_multiversion_gen
tags: ["multiversion_passthrough", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ sharding_jscore_passthrough_last_lts_new_old_old_new: last_lts
+ sharding_jscore_passthrough_last_continuous_new_old_old_new: last_continuous
- func: "generate resmoke tasks"
vars:
suite: sharding_jscore_passthrough
@@ -5647,7 +5837,14 @@ tasks:
use_large_distro: "true"
- <<: *task_template
- name: telemetry_passthrough
+ name: query_stats_passthrough
+ tags: []
+ commands:
+ - func: "do setup"
+ - func: "run tests"
+
+- <<: *task_template
+ name: query_stats_passthrough_writeonly
tags: []
commands:
- func: "do setup"
@@ -5812,6 +6009,14 @@ tasks:
name: concurrency_replication_multiversion_gen
tags: ["multiversion", "multiversion_passthrough"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ concurrency_replication_last_continuous_new_new_old: last_continuous
+ concurrency_replication_last_continuous_new_old_new: last_continuous
+ concurrency_replication_last_continuous_old_new_new: last_continuous
+ concurrency_replication_last_lts_new_new_old: last_lts
+ concurrency_replication_last_lts_new_old_new: last_lts
+ concurrency_replication_last_lts_old_new_new: last_lts
- func: "generate resmoke tasks"
vars:
suite: concurrency_replication
@@ -5864,15 +6069,7 @@ tasks:
- <<: *gen_task_template
name: concurrency_replication_wiredtiger_cursor_sweeps_gen
- tags: ["concurrency", "repl"]
- commands:
- - func: "generate resmoke tasks"
- vars:
- resmoke_jobs_max: 1
-
-- <<: *gen_task_template
- name: concurrency_replication_wiredtiger_eviction_debug_gen
- tags: ["concurrency", "repl", "debug_only"]
+ tags: ["concurrency", "repl", "cursor_sweeps"]
commands:
- func: "generate resmoke tasks"
vars:
@@ -5888,7 +6085,7 @@ tasks:
resmoke_jobs_max: 1
- <<: *gen_task_template
- name: concurrency_sharded_with_catalog_shard_gen
+ name: concurrency_sharded_with_config_shard_gen
tags: ["concurrency", "common", "read_concern_maj", "large", "sharded", "no_debug_mode"]
commands:
- func: "generate resmoke tasks"
@@ -5900,6 +6097,10 @@ tasks:
name: concurrency_sharded_replication_multiversion_gen
tags: ["multiversion_passthrough", "sharded", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ concurrency_sharded_replication_last_continuous_new_old_old_new: last_continuous
+ concurrency_sharded_replication_last_lts_new_old_old_new: last_lts
- func: "generate resmoke tasks"
vars:
suite: concurrency_sharded_replication
@@ -5915,7 +6116,7 @@ tasks:
resmoke_jobs_max: 1
- <<: *gen_task_template
- name: concurrency_sharded_with_balancer_and_catalog_shard_gen
+ name: concurrency_sharded_with_balancer_and_config_shard_gen
tags: ["concurrency", "common", "read_concern_maj", "large", "sharded", "no_debug_mode"]
commands:
- func: "generate resmoke tasks"
@@ -6098,16 +6299,7 @@ tasks:
- <<: *task_template
name: concurrency_simultaneous_replication_wiredtiger_cursor_sweeps
- tags: ["concurrency", "repl", "random_name"]
- commands:
- - func: "do setup"
- - func: "run tests"
- vars:
- resmoke_jobs_max: 1
-
-- <<: *task_template
- name: concurrency_simultaneous_replication_wiredtiger_eviction_debug
- tags: ["concurrency", "repl", "debug_only", "random_name"]
+ tags: ["concurrency", "repl", "cursor_sweeps"]
commands:
- func: "do setup"
- func: "run tests"
@@ -6208,6 +6400,10 @@ tasks:
name: replica_sets_multiversion_gen
tags: ["random_multiversion_ds", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ replica_sets_last_lts: last_lts
+ replica_sets_last_continuous: last_continuous
- func: "generate resmoke tasks"
vars:
suite: replica_sets
@@ -6236,7 +6432,7 @@ tasks:
use_large_distro: "true"
- <<: *gen_task_template
- name: sharding_catalog_shard_gen
+ name: sharding_config_shard_gen
tags: ["sharding", "common"]
commands:
- func: "generate resmoke tasks"
@@ -6247,6 +6443,10 @@ tasks:
name: sharding_multiversion_gen
tags: ["random_multiversion_ds", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ sharding_last_continuous: last_continuous
+ sharding_last_lts: last_lts
- func: "generate resmoke tasks"
vars:
use_large_distro: "true"
@@ -6259,7 +6459,6 @@ tasks:
commands:
- func: "generate resmoke tasks"
vars:
- suite: sharding_continuous_config_stepdown
use_large_distro: "true"
# This is a subset of sharding_max_mirroring_opportunistic_secondary_targeting_ese_gen and
@@ -6297,14 +6496,6 @@ tasks:
vars:
use_large_distro: "true"
-- <<: *gen_task_template
- name: sharding_auth_catalog_shard_gen
- tags: []
- commands:
- - func: "generate resmoke tasks"
- vars:
- use_large_distro: "true"
-
- <<: *gen_task_template
name: sharding_auth_audit_gen
tags: ["sharding", "auth", "audit", "non_live_record", "no_debug_mode"]
@@ -6331,13 +6522,15 @@ tasks:
resmoke_jobs_max: 1
- <<: *gen_task_template
- name: sharding_last_lts_mongos_and_mixed_shards_gen
- tags: ["sharding", "common", "multiversion", "no_version_combination"]
+ name: sharding_mongos_and_mixed_shards_gen
+ tags: ["sharding", "common", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ sharding_last_lts_mongos_and_mixed_shards: last_lts
- func: "generate resmoke tasks"
vars:
use_large_distro: "true"
- multiversion_exclude_tags_version: last_lts
run_no_feature_flag_tests: "true"
- <<: *gen_task_template
@@ -6346,6 +6539,12 @@ tasks:
commands:
- func: "generate resmoke tasks"
+- <<: *gen_task_template
+ name: sharded_collections_single_writes_without_shard_key_jscore_passthrough_gen
+ tags: ["sharding"]
+ commands:
+ - func: "generate resmoke tasks"
+
- <<: *gen_task_template
name: ssl_gen
tags: ["encrypt", "ssl", "patch_build"]
@@ -6370,6 +6569,24 @@ tasks:
vars:
suite: ssl_x509
+- <<: *gen_task_template
+ name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen
+ tags: ["jscore"]
+ commands:
+ - func: "generate resmoke tasks"
+
+- <<: *gen_task_template
+ name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen
+ tags: ["sharding"]
+ commands:
+ - func: "generate resmoke tasks"
+
+- <<: *gen_task_template
+ name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen
+ tags: ["sharding"]
+ commands:
+ - func: "generate resmoke tasks"
+
- <<: *task_template
name: jsCore_decimal
tags: ["jscore", "common", "decimal"]
@@ -6438,112 +6655,30 @@ tasks:
vars:
use_large_distro: "true"
-# Use explicit task definitions for retryable_writes_downgrade suites to avoid running
-# with all Repl multiversion combinations.
- <<: *gen_task_template
- name: retryable_writes_downgrade_last_continuous_gen
- tags: ["multiversion_passthrough", "multiversion", "no_version_combination"]
+ name: retryable_writes_downgrade_gen
+ tags: ["multiversion_passthrough", "multiversion"]
commands:
- - func: "generate resmoke tasks"
+ - func: "initialize multiversion tasks"
vars:
- multiversion_exclude_tags_version: last_continuous
- run_no_feature_flag_tests: "true"
-
-- <<: *gen_task_template
- name: retryable_writes_downgrade_last_lts_gen
- tags: ["multiversion_passthrough", "multiversion", "no_version_combination"]
- commands:
+ retryable_writes_downgrade_last_lts: last_lts
+ retryable_writes_downgrade_last_continuous: last_continuous
- func: "generate resmoke tasks"
vars:
- multiversion_exclude_tags_version: last_lts
run_no_feature_flag_tests: "true"
- <<: *gen_task_template
name: sharded_retryable_writes_downgrade_gen
tags: ["multiversion_passthrough", "multiversion"]
commands:
+ - func: "initialize multiversion tasks"
+ vars:
+ sharded_retryable_writes_downgrade_last_continuous_new_old_old_new: last_continuous
+ sharded_retryable_writes_downgrade_last_lts_new_old_old_new: last_lts
- func: "generate resmoke tasks"
vars:
run_no_feature_flag_tests: "true"
-- <<: *gen_task_template
- name: logical_session_cache_replication_default_refresh_jscore_passthrough_gen
- tags: ["logical_session_cache", "repl"]
- commands:
- - func: "generate resmoke tasks"
-
-- <<: *gen_task_template
- name: logical_session_cache_replication_100ms_refresh_jscore_passthrough_gen
- tags: ["logical_session_cache", "repl"]
- commands:
- - func: "generate resmoke tasks"
-
-- <<: *gen_task_template
- name: logical_session_cache_replication_1sec_refresh_jscore_passthrough_gen
- tags: ["logical_session_cache", "one_sec", "repl"]
- commands:
- - func: "generate resmoke tasks"
-
-- <<: *gen_task_template
- name: logical_session_cache_replication_10sec_refresh_jscore_passthrough_gen
- tags: ["logical_session_cache", "repl"]
- commands:
- - func: "generate resmoke tasks"
-
-- <<: *gen_task_template
- name: logical_session_cache_sharding_default_refresh_jscore_passthrough_gen
- tags: ["logical_session_cache"]
- commands:
- - func: "generate resmoke tasks"
-
-- <<: *gen_task_template
- name: logical_session_cache_sharding_100ms_refresh_jscore_passthrough_gen
- tags: ["logical_session_cache"]
- commands:
- - func: "generate resmoke tasks"
-
-- <<: *gen_task_template
- name: logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough_gen
- tags: ["logical_session_cache"]
- commands:
- - func: "generate resmoke tasks"
-
-- <<: *gen_task_template
- name: logical_session_cache_sharding_1sec_refresh_jscore_passthrough_gen
- tags: ["logical_session_cache", "one_sec"]
- commands:
- - func: "generate resmoke tasks"
-
-- <<: *gen_task_template
- name: logical_session_cache_sharding_10sec_refresh_jscore_passthrough_gen
- tags: ["logical_session_cache"]
- commands:
- - func: "generate resmoke tasks"
-
-- <<: *gen_task_template
- name: logical_session_cache_standalone_default_refresh_jscore_passthrough_gen
- tags: ["logical_session_cache"]
- commands:
- - func: "generate resmoke tasks"
-
-- <<: *gen_task_template
- name: logical_session_cache_standalone_100ms_refresh_jscore_passthrough_gen
- tags: ["logical_session_cache"]
- commands:
- - func: "generate resmoke tasks"
-
-- <<: *gen_task_template
- name: logical_session_cache_standalone_1sec_refresh_jscore_passthrough_gen
- tags: ["logical_session_cache", "one_sec"]
- commands:
- - func: "generate resmoke tasks"
-
-- <<: *gen_task_template
- name: logical_session_cache_standalone_10sec_refresh_jscore_passthrough_gen
- tags: ["logical_session_cache"]
- commands:
- - func: "generate resmoke tasks"
-
- <<: *gen_task_template
name: retryable_writes_jscore_stepdown_passthrough_gen
tags: ["retry"]
@@ -6801,7 +6936,6 @@ tasks:
- func: "run powercycle test"
timeout_secs: 1800 # 30 minute timeout for no output
-
- name: selinux_rhel8_org
tags: []
depends_on:
@@ -6836,6 +6970,8 @@ tasks:
- func: "run selinux tests"
vars:
distro: rhel80-selinux
+ test_list: jstests/selinux/*.js src/mongo/db/modules/enterprise/jstests/selinux/*.js
+
- name: selinux_rhel9_org
tags: []
depends_on:
@@ -6983,9 +7119,13 @@ tasks:
- <<: *task_template
name: resmoke_validation_tests
tags: []
- depends_on: []
+ depends_on:
+ - name: archive_dist_test
commands:
- - func: "do non-compile setup"
+ - func: "do setup"
+ vars:
+ extraction_change_dir: build/install/
+ move_outputs: "--move-output=build/install/dist-test/pretty_printer_tests.txt:build/"
- func: "run tests"
- name: test_packages
@@ -7063,8 +7203,8 @@ tasks:
content_type: ${content_type|application/gzip}
display_name: Source tarball
# We only need to upload the source tarball from one of the build variants
- # because it should be the same everywhere, so just use rhel70/windows.
- build_variants: [rhel70, windows]
+ # because it should be the same everywhere, so just use rhel80/windows.
+ build_variants: [rhel80, windows]
- command: s3.put
params:
optional: true
@@ -7173,7 +7313,7 @@ tasks:
- name: publish_packages
- run_on: rhel80-small
+ run_on: rhel8.7-small
tags: ["publish"]
# This should prevent this task from running in patch builds, where we
# don't want to publish packages.
@@ -7202,14 +7342,27 @@ tasks:
aws_secret_remote: ${repo_aws_secret}
- func: "set up notary client credentials"
- *f_expansions_write
+ - command: shell.exec
+ params:
+ shell: bash
+ script: |
+ set -oe
+ podman login --username ${release_tools_container_registry_username} --password ${release_tools_container_registry_password} ${release_tools_container_registry}
- command: subprocess.exec
params:
binary: bash
+ env:
+ AWS_ACCESS_KEY_ID: ${upload_lock_access_key_id}
+ AWS_SECRET_ACCESS_KEY: ${upload_lock_secret_access_key}
+ UPLOAD_LOCK_IMAGE: ${upload_lock_image}
+ UPLOAD_BUCKET: ${upload_lock_bucket}
+ AWS_REGION: ${upload_lock_region}
+ EVERGREEN_TASK_ID: ${task_id}
args:
- "./src/evergreen/packages_publish.sh"
- name: push
- run_on: rhel80-small
+ run_on: rhel8.7-small
tags: ["publish"]
patchable: false
depends_on:
@@ -7579,6 +7732,70 @@ tasks:
content_type: text/plain
remote_file: ${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-${suffix}-${task_id}-signed.msi.md5
+ - command: subprocess.exec
+ params:
+ continue_on_err: true
+ binary: bash
+ env:
+ SERVER_TARBALL_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}
+ SERVER_TARBALL_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}
+ CRYPTD_TARBALL_PATH: src/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}
+ CRYPTD_TARBALL_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}
+ MONGOHOUSE_TARBALL_PATH: src/mh-${push_name}-${push_arch}-${suffix}.${ext|tgz}
+ MONGOHOUSE_TARBALL_KEY: ${version_id}/${build_id}/push/${push_path}/mh-${push_name}-${push_arch}-${suffix}.${ext|tgz}
+ SOURCE_TARBALL_PATH: src/mongodb-src-${src_suffix}.${ext|tar.gz}
+ SOURCE_TARBALL_KEY: ${version_id}/${build_id}/push/src/mongodb-src-${src_suffix}.${ext|tar.gz}
+ DEBUG_SYMBOLS_TARBALL_PATH: src/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}
+ DEBUG_SYMBOLS_TARBALL_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}
+ SERVER_TARBALL_SIGNATURE_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sig
+ SERVER_TARBALL_SIGNATURE_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sig
+ CRYPTD_TARBALL_SIGNATURE_PATH: src/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sig
+ CRYPTD_TARBALL_SIGNATURE_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sig
+ SOURCE_TARBALL_SIGNATURE_PATH: src/mongodb-src-${src_suffix}.${ext|tar.gz}.sig
+ SOURCE_TARBALL_SIGNATURE_KEY: ${version_id}/${build_id}/push/src/mongodb-src-${src_suffix}.${ext|tar.gz}.sig
+ DEBUG_SYMBOLS_TARBALL_SIGNATURE_PATH: src/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sig
+ DEBUG_SYMBOLS_TARBALL_SIGNATURE_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sig
+ MSI_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.msi
+ MSI_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}-signed.msi
+ SERVER_TARBALL_SHA1_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha1
+ SERVER_TARBALL_SHA1_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha1
+ CRYPTD_TARBALL_SHA1_PATH: src/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha1
+ CRYPTD_TARBALL_SHA1_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha1
+ SOURCE_TARBALL_SHA1_PATH: src/mongodb-src-${src_suffix}.${ext|tar.gz}.sha1
+ SOURCE_TARBALL_SHA1_KEY: ${version_id}/${build_id}/push/src/mongodb-src-${src_suffix}.${ext|tar.gz}.sha1
+ DEBUG_SYMBOLS_TARBALL_SHA1_PATH: src/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sha1
+ DEBUG_SYMBOLS_TARBALL_SHA1_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sha1
+ MSI_SHA1_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.msi.sha1
+ MSI_SHA1_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}-signed.msi.sha1
+ SERVER_TARBALL_SHA256_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha256
+ SERVER_TARBALL_SHA256_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha256
+ CRYPTD_TARBALL_SHA256_PATH: src/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha256
+ CRYPTD_TARBALL_SHA256_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha256
+ SOURCE_TARBALL_SHA256_PATH: src/mongodb-src-${src_suffix}.${ext|tar.gz}.sha256
+ SOURCE_TARBALL_SHA256_KEY: ${version_id}/${build_id}/push/src/mongodb-src-${src_suffix}.${ext|tar.gz}.sha256
+ DEBUG_SYMBOLS_TARBALL_SHA256_PATH: src/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sha256
+ DEBUG_SYMBOLS_TARBALL_SHA256_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sha256
+ MSI_SHA256_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.msi.sha256
+ MSI_SHA256_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}-signed.msi.sha256
+ SERVER_TARBALL_MD5_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.md5
+ SERVER_TARBALL_MD5_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.md5
+ CRYPTD_TARBALL_MD5_PATH: src/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.md5
+ CRYPTD_TARBALL_MD5_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.md5
+ SOURCE_TARBALL_MD5_PATH: src/mongodb-src-${src_suffix}.${ext|tar.gz}.md5
+ SOURCE_TARBALL_MD5_KEY: ${version_id}/${build_id}/push/src/mongodb-src-${src_suffix}.${ext|tar.gz}.md5
+ DEBUG_SYMBOLS_TARBALL_MD5_PATH: src/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.md5
+ DEBUG_SYMBOLS_TARBALL_MD5_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.md5
+ MSI_MD5_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.msi.md5
+ MSI_MD5_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}-signed.msi.md5
+ AWS_ACCESS_KEY_ID: ${upload_lock_access_key_id}
+ AWS_SECRET_ACCESS_KEY: ${upload_lock_secret_access_key}
+ UPLOAD_LOCK_IMAGE: ${upload_lock_image}
+ UPLOAD_BUCKET: ${upload_lock_bucket}
+ AWS_REGION: ${upload_lock_region}
+ EVERGREEN_TASK_ID: ${task_id}
+ args:
+ - "./src/evergreen/run_upload_lock_push.sh"
+
- command: s3Copy.copy
params:
aws_key: ${aws_key}
@@ -7704,7 +7921,7 @@ tasks:
'destination': {'path': '${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.md5', 'bucket': '${push_bucket}'}}
- name: crypt_push
- run_on: rhel80-small
+ run_on: rhel8.7-small
tags: ["publish_crypt"]
patchable: false
stepback: false
@@ -7741,13 +7958,18 @@ tasks:
aws_key_remote: ${repo_aws_key}
aws_secret_remote: ${repo_aws_secret}
- func: "f_expansions_write"
- - func: "set up notary client credentials"
+ # login to container registry
+ - command: shell.exec
+ params:
+ shell: bash
+ script: |
+ set -oe
+ podman login --username ${release_tools_container_registry_username} --password ${release_tools_container_registry_password} ${release_tools_container_registry}
- command: subprocess.exec
- type: test
params:
binary: bash
args:
- - "./src/evergreen/notary_client_crypt_run.sh"
+ - "./src/evergreen/garasign_gpg_crypt_sign.sh"
# Put the crypt tarball/zipfile
- command: s3.put
params:
@@ -7853,33 +8075,58 @@ tasks:
resmoke_jobs_max: 1
- <<: *task_template
- name: cqf
+ name: vector_search
tags: []
commands:
- func: "do setup"
- func: "run tests"
+ vars:
+ resmoke_jobs_max: 1
- <<: *task_template
- name: cqf_disabled_pipeline_opt
- tags: []
+ name: cqf
+ tags: [cqf]
commands:
- func: "do setup"
- func: "run tests"
- <<: *task_template
- name: cqf_passthrough
- tags: []
+ name: cqf_disabled_pipeline_opt
+ tags: [cqf]
commands:
- func: "do setup"
- func: "run tests"
- <<: *task_template
name: cqf_parallel
- tags: []
+ tags: [cqf]
+ commands:
+ - func: "do setup"
+ - func: "run tests"
+
+- <<: *task_template
+ name: cqf_experimental_jscore_passthrough
+ tags: [cqf]
+ commands:
+ - func: "do setup"
+ - func: "run tests"
+
+- <<: *task_template
+ name: cqf_experimental_aggregation_passthrough
+ tags: [cqf]
commands:
- func: "do setup"
- func: "run tests"
+- <<: *gen_task_template
+ name: cqf_experimental_no_passthrough_gen
+ tags: ["cqf"]
+ commands:
+ - func: "generate resmoke tasks"
+ vars:
+ suite: cqf_experimental_no_passthrough
+ use_large_distro: "true"
+
- <<: *task_template
name: streams
tags: []
@@ -7938,6 +8185,28 @@ tasks:
JIRA_AUTH_CONSUMER_KEY: ${jira_auth_consumer_key}
JIRA_AUTH_KEY_CERT: ${jira_auth_key_cert}
+- name: lint_large_files_check
+ tags: []
+ exec_timeout_secs: 600 # 10 minute timeout
+ commands:
+ - command: manifest.load
+ - func: "git get project and add git tag"
+ - *f_expansions_write
+ - *kill_processes
+ - *cleanup_environment
+ - func: "set up venv"
+ - func: "upload pip requirements"
+ - func: "configure evergreen api credentials"
+ - command: subprocess.exec
+ type: test
+ params:
+ binary: bash
+ args:
+ - "./src/evergreen/run_python_script.sh"
+ - "buildscripts/large_file_check.py"
+ - "--exclude"
+ - "src/third_party/*"
+
- name: check_for_todos
tags: []
exec_timeout_secs: 600 # 10 minute timeout
@@ -8055,7 +8324,7 @@ tasks:
- <<: *task_template
name: query_golden_cqf
- tags: []
+ tags: [cqf]
commands:
- func: "do setup"
- func: "run tests"
@@ -8119,6 +8388,11 @@ task_groups:
tasks:
- libdeps_graph_linting
+- <<: *compile_task_group_template
+ name: iwyu_self_test_TG
+ tasks:
+ - iwyu_self_test
+
- <<: *compile_task_group_template
name: compile_ninja_TG
tasks:
@@ -8190,6 +8464,28 @@ task_groups:
- <<: *compile_task_group_template
name: compile_archive_and_run_libfuzzertests_TG
+ setup_group_can_fail_task: false
+ setup_group:
+ - command: manifest.load
+ - func: "git get project and add git tag"
+ - func: "set task expansion macros"
+ - func: "f_expansions_write"
+ - func: "kill processes"
+ - func: "cleanup environment"
+ # The python virtual environment is installed in ${workdir}, which is created in
+ # "set up venv".
+ - func: "set up venv"
+ - func: "upload pip requirements"
+ - func: "f_expansions_write"
+ - func: "configure evergreen api credentials"
+ - func: "get buildnumber"
+ - func: "f_expansions_write"
+ - func: "set up credentials"
+ - func: "use WiredTiger develop" # noop if ${use_wt_develop} is not "true"
+ - func: "set up win mount script"
+ - func: "generate compile expansions"
+ - func: "f_expansions_write"
+ - func: "fetch corpus"
tasks:
- compile_and_archive_libfuzzertests
- fetch_and_run_libfuzzertests
@@ -8223,6 +8519,29 @@ task_groups:
- compile_all_but_not_unittests
- package
+# SERVER-76006
+# This is a compile stream meant for non-cached and/or underpowered systems.
+# It joins most of the compile tasks together under a single host spread out
+# across different tasks.
+- <<: *compile_task_group_template
+ name: small_compile_test_and_package_serial_no_unittests_TG
+ tasks:
+ - compile_dist_test_half
+ - compile_dist_test
+ - archive_dist_test
+ - archive_dist_test_debug
+ - compile_integration_test
+ - integration_tests_standalone
+ - integration_tests_standalone_audit
+ - integration_tests_replset
+ - integration_tests_replset_ssl_auth
+ - integration_tests_sharded
+ - compile_dbtest
+ - run_dbtest
+ - archive_dbtest
+ - compile_all_but_not_unittests
+ - package
+
- <<: *compile_task_group_template
name: compile_test_benchmark_and_package_serial_TG
tasks:
diff --git a/etc/evergreen_yml_components/project_and_distro_settings.yml b/etc/evergreen_yml_components/project_and_distro_settings.yml
deleted file mode 100644
index 83e3d74b2bfbc..0000000000000
--- a/etc/evergreen_yml_components/project_and_distro_settings.yml
+++ /dev/null
@@ -1,76 +0,0 @@
-###
-# Definitions for project and distro settings associated with this Evergreen project.
-###
-
-
-## Aliases.
-patch_aliases:
- - alias: "embedded"
- variant: ".*"
- task: "embedded_.*"
- variant_tags: []
- task_tags: []
- - alias: "hourly"
- variant: "^(enterprise-windows-all-feature-flags-required|linux-64-debug|ubuntu1804-debug-aubsan-lite|enterprise-rhel-70-64-bit|ubuntu1604-debug|macos-debug|windows-debug )$"
- task: ".*"
- variant_tags: []
- task_tags: []
- - alias: "query"
- variant: "^(.*query-patch-only|enterprise-ubuntu-dynamic-1804-clang-tidy-required)$"
- task: ".*"
- variant_tags: []
- task_tags: []
- - alias: "required"
- variant: "^(.*-required$|linux-64-debug|ubuntu1804-debug-aubsan-lite)$"
- task: ".*"
- variant_tags: []
- task_tags: []
- - alias: "security"
- variant: "^(.*security-patch-only)$"
- task: ".*"
- variant_tags: []
- task_tags: []
-
-commit_queue_aliases:
- - variant: "commit-queue"
- task: "^(run_.*|compile_.*|lint_.*|validate_commit_message|test_api_version_compatibility|jsCore)$"
- variant_tags: []
- task_tags: []
- - variant: "^(enterprise-ubuntu-dynamic-1804-clang-tidy-required|linux-x86-dynamic-compile-required)$"
- task: "clang_tidy"
- variant_tags: []
- task_tags: []
-
-github_pr_aliases:
-- variant: "^(.*query-patch-only|enterprise-ubuntu-dynamic-1804-clang-tidy-required)$"
- task: ".*"
- variant_tags: []
- task_tags: []
-
-git_tag_aliases: []
-
-github_checks_aliases: []
-
-## Virtual Workstation Commands.
-workstation_config:
- git_clone: no
- setup_commands:
- - command: "git clone git@github.com:mongodb/server-workflow-tool.git"
- directory: ""
- - command: "bash virtual_workstation_setup.sh"
- directory: "server-workflow-tool"
-
-
-## Task Sync.
-task_sync:
- config_enabled: true
- patch_enabled: true
-
-
-## Build Baron.
-build_baron_settings:
- ticket_create_project: BF
- ticket_search_projects:
- - BF
- - WT
- - SERVER
diff --git a/etc/evergreen_yml_components/variants/atlas.yml b/etc/evergreen_yml_components/variants/atlas.yml
index bb1b3b05cc1c2..04f51c9bbb441 100644
--- a/etc/evergreen_yml_components/variants/atlas.yml
+++ b/etc/evergreen_yml_components/variants/atlas.yml
@@ -18,10 +18,12 @@ buildvariants:
push_bucket: downloads.10gen.com
push_name: linux
push_arch: x86_64-enterprise-rhel70
+ test_flags: --excludeWithAnyTags=requires_latch_analyzer
compile_flags: >-
--ssl MONGO_DISTMOD=rhel70
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
@@ -54,7 +56,6 @@ buildvariants:
- name: jsCore_txns_large_txns_format
- name: .jstestfuzz .common
- name: libunwind_tests
- - name: .logical_session_cache .one_sec
- name: .ocsp
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
@@ -72,7 +73,7 @@ buildvariants:
- name: test_packages
distros:
- ubuntu2004-package
- # TODO: BF-24515 restore when BF is resolved
- #- name: selinux_rhel7_enterprise
+ - name: vector_search
+ - name: selinux_rhel7_enterprise
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
diff --git a/etc/evergreen_yml_components/variants/classic_engine.yml b/etc/evergreen_yml_components/variants/classic_engine.yml
new file mode 100644
index 0000000000000..437c282c4ef2d
--- /dev/null
+++ b/etc/evergreen_yml_components/variants/classic_engine.yml
@@ -0,0 +1,327 @@
+# Build variants for testing the classic engine.
+
+variables:
+- &linux_x86_dynamic_compile_variant_dependency
+ depends_on:
+ - name: archive_dist_test_debug
+ variant: &linux_x86_dynamic_compile_variant_name linux-x86-dynamic-compile
+ - name: version_gen
+ variant: generate-tasks-for-version
+ # This is added because of EVG-18211.
+ # Without this we are adding extra dependencies on evergreen and it is causing strain
+ omit_generated_tasks: true
+
+- &linux_x86_generic_expansions
+ multiversion_platform: rhel80
+ multiversion_edition: enterprise
+ repo_edition: enterprise
+ large_distro_name: rhel80-medium
+ num_scons_link_jobs_available: 0.99
+ compile_variant: *linux_x86_dynamic_compile_variant_name
+
+- &enterprise-rhel-80-64-bit-dynamic-expansions
+ <<: *linux_x86_generic_expansions
+ scons_cache_scope: shared
+ scons_cache_mode: all
+ has_packages: false
+ jstestfuzz_num_generated_files: 40
+ jstestfuzz_concurrent_num_files: 10
+ target_resmoke_time: 10
+ max_sub_suites: 5
+ idle_timeout_factor: 1.5
+ exec_timeout_factor: 1.5
+ large_distro_name: rhel80-medium
+ burn_in_tag_buildvariants: >-
+ enterprise-rhel-80-64-bit-inmem
+ enterprise-rhel-80-64-bit-multiversion
+ burn_in_tag_compile_task_dependency: archive_dist_test_debug
+
+# If you add anything to san_options, make sure the appropriate changes are
+# also made to SConstruct.
+# and also to the san_options in compile_static_analysis.yml
+- aubsan_options: &aubsan_options
+ >-
+ UBSAN_OPTIONS="print_stacktrace=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer"
+ LSAN_OPTIONS="suppressions=etc/lsan.suppressions:report_objects=1"
+ ASAN_OPTIONS="detect_leaks=1:check_initialization_order=true:strict_init_order=true:abort_on_error=1:disable_coredump=0:handle_abort=1:strict_string_checks=true:detect_invalid_pointer_pairs=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer"
+
+buildvariants:
+- &enterprise-rhel-80-64-bit-dynamic-classic-engine
+ <<: *linux_x86_dynamic_compile_variant_dependency
+ name: enterprise-rhel-80-64-bit-dynamic-classic-engine
+ display_name: "Shared Library Enterprise RHEL 8.0 (Classic Engine)"
+ cron: "0 0 * * 0" # once a week (Sunday midnight UTC)
+ modules:
+ - enterprise
+ run_on:
+ - rhel80-small
+ stepback: false
+ expansions:
+ <<: *enterprise-rhel-80-64-bit-dynamic-expansions
+ jstestfuzz_num_generated_files: 40
+ jstestfuzz_concurrent_num_files: 10
+ target_resmoke_time: 10
+ max_sub_suites: 5
+ test_flags: >-
+ --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}"
+ large_distro_name: rhel80-medium
+ burn_in_tag_buildvariants: >-
+ enterprise-rhel-80-64-bit-inmem
+ enterprise-rhel-80-64-bit-multiversion
+ burn_in_tag_compile_task_dependency: archive_dist_test_debug
+ depends_on:
+ - name: archive_dist_test_debug
+ variant: *linux_x86_dynamic_compile_variant_name
+ - name: version_gen
+ variant: generate-tasks-for-version
+ # This is added because of EVG-18211.
+ # Without this we are adding extra dependencies on evergreen and it is causing strain
+ omit_generated_tasks: true
+ tasks:
+ - name: .aggfuzzer !.sbe_only
+ - name: .aggregation !.sbe_only
+ - name: .auth
+ - name: .causally_consistent !.sharding
+ - name: .change_stream_fuzzer
+ - name: .change_streams
+ - name: .concurrency !.large !.ubsan !.no_txns !.compute_mode
+ - name: .concurrency .large !.ubsan !.no_txns !.compute_mode
+ distros:
+ - rhel80-medium
+ - name: .encrypt
+ - name: .jscore .common !jsCore !.sbe_only
+ - name: .jstestfuzz !.flow_control
+ - name: .misc_js
+ - name: .multi_shard
+ - name: .query_fuzzer
+ - name: .random_multiversion_ds
+ - name: .read_only
+ - name: .read_write_concern !.large
+ - name: .read_write_concern .large
+ distros:
+ - rhel80-medium
+ - name: .replica_sets !.encrypt !.auth
+ distros:
+ - rhel80-xlarge
+ - name: .rollbackfuzzer
+ - name: .sharding .common
+ - name: .sharding .jscore !.wo_snapshot !.multi_stmt
+ - name: .sharding .txns
+ - name: .serverless
+ distros:
+ - rhel80-xlarge
+ - name: .updatefuzzer
+ - name: aggregation_repeat_queries
+ - name: audit
+ - name: burn_in_tags_gen
+ depends_on:
+ - name: version_burn_in_gen
+ variant: generate-tasks-for-version
+ omit_generated_tasks: true
+ - name: archive_dist_test_debug
+ variant: *linux_x86_dynamic_compile_variant_name
+ - name: burn_in_tests_gen
+ depends_on:
+ - name: version_burn_in_gen
+ variant: generate-tasks-for-version
+ omit_generated_tasks: true
+ - name: archive_dist_test_debug
+ variant: *linux_x86_dynamic_compile_variant_name
+ - name: check_feature_flag_tags
+ - name: check_for_todos
+ - name: disk_wiredtiger
+ - name: initial_sync_fuzzer_gen
+ - name: jsCore
+ distros:
+ - rhel80-xlarge
+ - name: jsCore_min_batch_repeat_queries_ese_gsm
+ - name: jsCore_txns_large_txns_format
+ - name: json_schema
+ - name: lint_fuzzer_sanity_patch
+ - name: mqlrun
+ - name: multi_stmt_txn_jscore_passthrough_with_migration_gen
+ - name: multiversion_gen
+ - name: .multiversion_sanity_check
+ - name: replica_sets_api_version_jscore_passthrough_gen
+ - name: replica_sets_reconfig_jscore_passthrough_gen
+ - name: replica_sets_reconfig_jscore_stepdown_passthrough_gen
+ distros:
+ - rhel80-xlarge
+ - name: replica_sets_reconfig_kill_primary_jscore_passthrough_gen
+ distros:
+ - rhel80-xlarge
+ - name: retryable_writes_jscore_passthrough_gen
+ - name: retryable_writes_jscore_stepdown_passthrough_gen
+ - name: sasl
+ - name: search
+ - name: search_auth
+ - name: search_pinned_connections_auth
+ - name: search_ssl
+ - name: secondary_reads_passthrough_gen
+ - name: session_jscore_passthrough
+ - name: sharding_api_version_jscore_passthrough_gen
+ - name: test_api_version_compatibility
+ - name: unittest_shell_hang_analyzer_gen
+ - name: vector_search
+
+- name: &rhel80-debug-asan-classic-engine rhel80-debug-asan-classic-engine
+ display_name: "* ASAN Enterprise RHEL 8.0 DEBUG (Classic Engine)"
+ cron: "0 0 * * 2" # once a week (Tuesday midnight UTC)
+ modules:
+ - enterprise
+ run_on:
+ - rhel80-build
+ stepback: false
+ expansions:
+ additional_package_targets: >-
+ archive-mongocryptd
+ archive-mongocryptd-debug
+ lang_environment: LANG=C
+ san_options: *aubsan_options
+ compile_flags: >-
+ --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars
+ --dbg=on
+ --opt=on
+ --allocator=system
+ --sanitize=address
+ --ssl
+ --ocsp-stapling=off
+ --enable-free-mon=on
+ -j$(grep -c ^processor /proc/cpuinfo)
+ compile_variant: *rhel80-debug-asan-classic-engine
+ test_flags: >-
+ --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}"
+ --excludeWithAnyTags=requires_fast_memory,requires_ocsp_stapling,requires_increased_memlock_limits
+ multiversion_platform: rhel80
+ multiversion_edition: enterprise
+ resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under ASAN build.
+ hang_analyzer_dump_core: false
+ scons_cache_scope: shared
+ exec_timeout_secs: 14400 # 3 hour timeout
+ separate_debug: off
+ large_distro_name: rhel80-build
+ tasks:
+ - name: compile_test_benchmark_and_package_serial_TG
+ - name: .aggregation !.sbe_only
+ - name: .auth
+ - name: audit
+ - name: .benchmarks
+ - name: .causally_consistent !.wo_snapshot
+ - name: .change_streams
+ - name: .misc_js
+ - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.compute_mode
+ - name: .encrypt
+ - name: free_monitoring
+ - name: external_auth
+ - name: external_auth_aws
+ - name: external_auth_oidc
+ - name: initial_sync_fuzzer_gen
+ - name: compile_integration_and_test_parallel_stream_TG
+ distros:
+ - rhel80-large
+ - name: .jscore .common !.sbe_only
+ - name: jsCore_min_batch_repeat_queries_ese_gsm
+ - name: jsCore_txns_large_txns_format
+ - name: json_schema
+ - name: .logical_session_cache
+ - name: .multi_shard .common
+ - name: .query_fuzzer
+ - name: .read_write_concern
+ - name: replica_sets_large_txns_format_jscore_passthrough
+ - name: .replica_sets !.multi_oplog
+ - name: .replica_sets .encrypt
+ - name: .resharding_fuzzer
+ - name: .retry
+ - name: .read_only
+ - name: .rollbackfuzzer
+ - name: .updatefuzzer
+ - name: sasl
+ - name: secondary_reads_passthrough_gen
+ - name: session_jscore_passthrough
+ - name: .sharding .jscore !.wo_snapshot
+ - name: .sharding .common !.csrs !.jstestfuzz
+ - name: .watchdog
+ - name: .stitch
+ - name: .serverless
+ - name: unittest_shell_hang_analyzer_gen
+ - name: .updatefuzzer
+ - name: server_discovery_and_monitoring_json_test_TG
+ - name: server_selection_json_test_TG
+ - name: generate_buildid_to_debug_symbols_mapping
+
+- name: &rhel80-debug-ubsan-classic-engine rhel80-debug-ubsan-classic-engine
+ display_name: "* UBSAN Enterprise RHEL 8.0 DEBUG (Classic Engine)"
+ cron: "0 0 * * 4" # once a week (Thursday midnight UTC)
+ modules:
+ - enterprise
+ run_on:
+ - rhel80-build
+ stepback: false
+ expansions:
+ additional_package_targets: >-
+ archive-mongocryptd
+ archive-mongocryptd-debug
+ lang_environment: LANG=C
+ san_options: *aubsan_options
+ compile_variant: *rhel80-debug-ubsan-classic-engine
+ compile_flags: >-
+ --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars
+ --dbg=on
+ --opt=on
+ --sanitize=undefined
+ --ssl
+ --ocsp-stapling=off
+ --enable-free-mon=on
+ -j$(grep -c ^processor /proc/cpuinfo)
+ test_flags: >-
+ --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}"
+ --excludeWithAnyTags=requires_ocsp_stapling,requires_increased_memlock_limits
+ multiversion_platform: rhel80
+ multiversion_edition: enterprise
+ resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under UBSAN build.
+ scons_cache_scope: shared
+ separate_debug: off
+ large_distro_name: rhel80-build
+ tasks:
+ - name: compile_test_benchmark_and_package_serial_TG
+ - name: .aggregation !.sbe_only
+ - name: .auth
+ - name: audit
+ - name: .benchmarks
+ - name: .causally_consistent !.wo_snapshot
+ - name: .change_streams
+ - name: .misc_js
+ - name: .concurrency !.no_txns !.repl !.kill_terminate !.compute_mode
+ - name: disk_wiredtiger
+ - name: .encrypt
+ - name: free_monitoring
+ - name: initial_sync_fuzzer_gen
+ - name: compile_integration_and_test_parallel_stream_TG
+ distros:
+ - rhel80-large
+ - name: .jscore .common !.sbe_only
+ - name: jsCore_min_batch_repeat_queries_ese_gsm
+ - name: jsCore_txns_large_txns_format
+ - name: json_schema
+ - name: .logical_session_cache .one_sec
+ - name: .multi_shard .common
+ - name: .read_write_concern
+ - name: replica_sets_large_txns_format_jscore_passthrough
+ - name: .replica_sets !.multi_oplog
+ - name: .replica_sets .encrypt
+ - name: .resharding_fuzzer
+ - name: .retry
+ - name: .rollbackfuzzer
+ - name: .read_only
+ - name: sasl
+ - name: secondary_reads_passthrough_gen
+ - name: session_jscore_passthrough
+ - name: .sharding .jscore !.wo_snapshot
+ - name: .sharding .common !.csrs !.jstestfuzz
+ - name: .stitch
+ - name: .updatefuzzer
+ - name: .serverless
+ - name: watchdog_wiredtiger
+ - name: server_discovery_and_monitoring_json_test_TG
+ - name: server_selection_json_test_TG
+ - name: generate_buildid_to_debug_symbols_mapping
diff --git a/etc/evergreen_yml_components/variants/compile_static_analysis.yml b/etc/evergreen_yml_components/variants/compile_static_analysis.yml
index cd82501355638..36c4fb2ca77ab 100644
--- a/etc/evergreen_yml_components/variants/compile_static_analysis.yml
+++ b/etc/evergreen_yml_components/variants/compile_static_analysis.yml
@@ -57,7 +57,7 @@ variables:
# If you add anything to san_options, make sure the appropriate changes are
# also made to SConstruct.
-# and also to the san_options in evergreen.yml
+# and also to the san_options in evergreen.yml and sanitizer.yml
- aubsan_options: &aubsan_options
>-
UBSAN_OPTIONS="print_stacktrace=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer"
@@ -67,12 +67,11 @@ variables:
buildvariants:
- <<: *linux-x86-dynamic-compile-params
- name: &linux-x86-dynamic-compile-required linux-x86-dynamic-compile-required
- display_name: "! Linux x86 Shared Library Compile & Static Analysis"
+ name: &linux-x86-dynamic-compile linux-x86-dynamic-compile
+ display_name: "* Linux x86 Shared Library"
expansions:
<<: *linux-x86-dynamic-compile-expansions
- clang_tidy_toolchain: v4
- compile_variant: *linux-x86-dynamic-compile-required
+ compile_variant: *linux-x86-dynamic-compile
tasks:
- name: compile_ninja_quick_TG
- name: compile_test_and_package_parallel_unittest_stream_TG
@@ -80,10 +79,7 @@ buildvariants:
- name: compile_test_and_package_parallel_dbtest_stream_TG
- name: compile_integration_and_test_parallel_stream_TG
- name: generate_buildid_to_debug_symbols_mapping
- - name: .lint
- - name: clang_tidy_TG
- distros:
- - rhel80-xxlarge
+ - name: run_pretty_printer_tests
- name: server_discovery_and_monitoring_json_test_TG
distros:
- rhel80-large
@@ -91,6 +87,21 @@ buildvariants:
distros:
- rhel80-large
+- <<: *generic_linux_compile_params
+ name: &linux-x86-dynamic-compile-future-tag-multiversion linux-x86-dynamic-compile-future-tag-multiversion
+ display_name: "Linux x86 Shared Library Compile (future git tag multiversion)"
+ modules:
+ - enterprise
+ expansions:
+ <<: *linux-x86-dynamic-compile-expansions
+ bv_future_git_tag: r100.0.0-9999
+ compile_variant: *linux-x86-dynamic-compile-future-tag-multiversion
+ depends_on:
+ - name: version_expansions_gen
+ variant: enterprise-rhel-80-64-bit-future-git-tag-multiversion-version-gen
+ tasks:
+ - name: compile_test_and_package_serial_TG
+
- <<: *generic_linux_compile_params
name: &linux-x86-dynamic-debug-compile-required linux-x86-dynamic-debug-compile-required # TODO: replace with Sanitizer.
display_name: "! Linux x86 Shared Library DEBUG Compile"
@@ -156,15 +167,15 @@ buildvariants:
- name: .stitch
- <<: *linux-x86-dynamic-compile-params
- name: &linux-crypt-compile-required linux-crypt-compile-required
- display_name: "! Linux x86 Crypt Enterprise Compile"
+ name: &linux-crypt-compile linux-crypt-compile
+ display_name: "* Linux x86 Crypt Enterprise Compile"
expansions:
<<: *linux-x86-dynamic-compile-expansions
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic
-Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
- compile_variant: *linux-crypt-compile-required
+ compile_variant: *linux-crypt-compile
tasks:
- name: .crypt
- name: crypt_build_debug_and_test
@@ -209,22 +220,31 @@ buildvariants:
- <<: *linux-arm64-dynamic-compile-params
name: &amazon-linux2-arm64-compile amazon-linux2-arm64-compile
- display_name: "* Amazon Linux 2 arm64 Shared Library Compile"
+ display_name: "! Amazon Linux 2 arm64 Shared Library Compile & Static Analysis"
expansions:
<<: *linux-arm64-dynamic-compile-expansions
+ clang_tidy_toolchain: v4
compile_variant: *amazon-linux2-arm64-compile
tasks:
+ - name: clang_tidy_TG
+ distros:
+ - amazon2-arm64-xlarge
+ - name: compile_ninja_quick_TG
- name: compile_test_and_package_parallel_unittest_stream_TG
- name: compile_test_and_package_parallel_core_stream_TG
- name: compile_test_and_package_parallel_dbtest_stream_TG
- name: compile_integration_and_test_parallel_stream_TG
- name: generate_buildid_to_debug_symbols_mapping
+ - name: iwyu_self_test_TG
+ - name: .lint
+ - name: resmoke_validation_tests
- name: server_discovery_and_monitoring_json_test_TG
- name: server_selection_json_test_TG
+ - name: run_pretty_printer_tests
- <<: *linux-arm64-dynamic-compile-params
name: &amazon-linux2-arm64-crypt-compile amazon-linux2-arm64-crypt-compile
- display_name: "* Amazon Linux 2 arm64 Crypt Compile"
+ display_name: "! Amazon Linux 2 arm64 Crypt Compile"
expansions:
<<: *linux-arm64-dynamic-compile-expansions
compile_variant: *amazon-linux2-arm64-crypt-compile
diff --git a/etc/evergreen_yml_components/variants/config_shard.yml b/etc/evergreen_yml_components/variants/config_shard.yml
new file mode 100644
index 0000000000000..17d423b1437e1
--- /dev/null
+++ b/etc/evergreen_yml_components/variants/config_shard.yml
@@ -0,0 +1,88 @@
+# This build variant is used to test suites that use sharded cluster fixture with config shard mode.
+# TODO (SERVER-75884): Remove this once we switch to config shard as the default.
+
+# THIS WAS COPIED FROM EVERGREEN.YML - ANY MODIFICATIONS HERE SHOULD ALSO BE MADE IN THAT FILE.
+variables:
+- &linux_x86_dynamic_compile_variant_dependency
+ depends_on:
+ - name: archive_dist_test_debug
+ variant: &linux_x86_dynamic_compile_variant_name linux-x86-dynamic-compile
+ - name: version_gen
+ variant: generate-tasks-for-version
+ # This is added because of EVG-18211.
+ # Without this we are adding extra dependencies on evergreen and it is causing strain
+ omit_generated_tasks: true
+
+# THIS WAS COPIED FROM EVERGREEN.YML - ANY MODIFICATIONS HERE SHOULD ALSO BE MADE IN THAT FILE.
+- &linux_x86_generic_expansions
+ multiversion_platform: rhel80
+ multiversion_edition: enterprise
+ repo_edition: enterprise
+ large_distro_name: rhel80-medium
+ num_scons_link_jobs_available: 0.99
+ compile_variant: *linux_x86_dynamic_compile_variant_name
+
+# THIS WAS COPIED FROM EVERGREEN.YML - ANY MODIFICATIONS HERE SHOULD ALSO BE MADE IN THAT FILE.
+- &enterprise-rhel-80-64-bit-dynamic-expansions
+ <<: *linux_x86_generic_expansions
+ scons_cache_scope: shared
+ scons_cache_mode: all
+ has_packages: false
+ jstestfuzz_num_generated_files: 40
+ jstestfuzz_concurrent_num_files: 10
+ target_resmoke_time: 10
+ max_sub_suites: 5
+ idle_timeout_factor: 1.5
+ exec_timeout_factor: 1.5
+ large_distro_name: rhel80-medium
+
+buildvariants:
+- &enterprise-rhel-80-64-bit-dynamic-config-shard
+ <<: *linux_x86_dynamic_compile_variant_dependency
+ name: enterprise-rhel-80-64-bit-dynamic-config-shard
+ display_name: "* Shared Library Enterprise RHEL 8.0 (Config Shard)"
+ cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
+ modules:
+ - enterprise
+ run_on:
+ - rhel80-small
+ stepback: false
+ expansions: &enterprise-rhel-80-64-bit-dynamic-config-shard-expansions
+ <<: *enterprise-rhel-80-64-bit-dynamic-expansions
+ test_flags: >-
+ --configShard=any
+ --excludeWithAnyTags=config_shard_incompatible
+ tasks:
+ - name: aggregation_mongos_passthrough
+ - name: aggregation_one_shard_sharded_collections
+ - name: aggregation_sharded_collections_causally_consistent_passthrough
+ - name: aggregation_sharded_collections_passthrough
+ - name: causally_consistent_hedged_reads_jscore_passthrough_gen
+ - name: causally_consistent_jscore_passthrough_auth_gen
+ - name: causally_consistent_jscore_passthrough_gen
+ - name: change_streams
+ - name: change_streams_mongos_sessions_passthrough
+ - name: change_streams_multi_stmt_txn_mongos_passthrough
+ - name: change_streams_multi_stmt_txn_sharded_collections_passthrough
+ - name: change_streams_per_shard_cursor_passthrough
+ - name: fle2_sharding_high_cardinality
+ - name: fle2_sharding
+ - name: jstestfuzz_sharded_causal_consistency_gen
+ - name: jstestfuzz_sharded_continuous_stepdown_gen
+ - name: jstestfuzz_sharded_gen
+ - name: jstestfuzz_sharded_session_gen
+ - name: sharded_causally_consistent_jscore_passthrough_gen
+ - name: sharded_causally_consistent_read_concern_snapshot_passthrough_gen
+ - name: sharding_auth_gen
+ # Explicitly include instead of using tags to avoid pulling in replica_sets_multiversion_gen. This
+ # variant will be removed when config shards become the default, so this is only temporary.
+ - name: sharding_multiversion_gen
+ - name: .sharding .txns
+ # Skip csrs stepdown suite because most tests can't handle the first shard stepping down.
+ - name: .sharding .common !.csrs
+ - name: .sharding .jscore !.wo_snapshot !.multi_stmt
+ - name: .concurrency .sharded !.large
+ - name: .concurrency .sharded .large
+ distros:
+ - rhel80-medium
+ - name: .multi_shard
diff --git a/etc/evergreen_yml_components/variants/ibm.yml b/etc/evergreen_yml_components/variants/ibm.yml
index f21592344f2ae..02546b982ef2b 100644
--- a/etc/evergreen_yml_components/variants/ibm.yml
+++ b/etc/evergreen_yml_components/variants/ibm.yml
@@ -14,12 +14,14 @@ buildvariants:
additional_package_targets: >-
archive-mongocryptd
archive-mongocryptd-debug
+ test_flags: --excludeWithAnyTags=requires_latch_analyzer
# We need to compensate for SMT8 setting the cpu count very high and lower the amount of parallelism down
compile_flags: >-
--ssl
MONGO_DISTMOD=rhel81
-j$(echo "$(grep -c processor /proc/cpuinfo)/2" | bc)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic
-Wl,--no-gnu-unique"
@@ -74,6 +76,7 @@ buildvariants:
MONGO_DISTMOD=rhel81
-j$(echo "$(grep -c processor /proc/cpuinfo)/2" | bc)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
tasks:
- name: compile_test_and_package_serial_TG
distros:
@@ -98,7 +101,10 @@ buildvariants:
-j3
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
--linker=gold
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: >-
+ --excludeWithAnyTags=requires_external_data_source,requires_increased_memlock_limits
+ --excludeWithAnyTags=requires_latch_analyzer,incompatible_with_s390x
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic
-Wl,--no-gnu-unique"
@@ -118,10 +124,7 @@ buildvariants:
multiversion_edition: enterprise
compile_variant: enterprise-rhel-72-s390x
tasks:
- - name: compile_test_and_package_serial_no_unittests_TG
- distros:
- - rhel72-zseries-build
- - name: compile_integration_and_test_no_audit_parallel_stream_TG
+ - name: small_compile_test_and_package_serial_no_unittests_TG
distros:
- rhel72-zseries-build
- name: .aggregation .common
@@ -142,7 +145,8 @@ buildvariants:
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
-- name: enterprise-rhel-83-s390x
+- &enterprise-rhel-83-s390x-template
+ name: enterprise-rhel-83-s390x
display_name: Enterprise RHEL 8.3 s390x
modules:
- enterprise
@@ -150,17 +154,19 @@ buildvariants:
- rhel83-zseries-small
cron: "0 4 * * 0"
stepback: false
- expansions:
+ expansions: &enterprise-rhel-83-s390x-expansions-template
additional_package_targets: >-
archive-mongocryptd
archive-mongocryptd-debug
release_buid: true
+ test_flags: --excludeWithAnyTags=incompatible_with_s390x,requires_latch_analyzer
compile_flags: >-
--ssl
MONGO_DISTMOD=rhel83
-j3
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
--linker=gold
+ --use-diagnostic-latches=off
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
@@ -179,10 +185,7 @@ buildvariants:
multiversion_edition: enterprise
compile_variant: enterprise-rhel-83-s390x
tasks:
- - name: compile_test_and_package_serial_TG
- distros:
- - rhel83-zseries-large
- - name: compile_integration_and_test_no_audit_parallel_stream_TG
+ - name: small_compile_test_and_package_serial_no_unittests_TG
distros:
- rhel83-zseries-large
- name: .aggregation .common
@@ -202,3 +205,21 @@ buildvariants:
- name: .publish_crypt
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
+
+- <<: *enterprise-rhel-83-s390x-template
+ name: enterprise-rhel-83-s390x-dynamic
+ display_name: Enterprise RHEL 8.3 s390x Shared
+ expansions:
+ <<: *enterprise-rhel-83-s390x-expansions-template
+ compile_flags: >-
+ --link-model=dynamic
+ --ssl
+ MONGO_DISTMOD=rhel83
+ -j$(echo "$(grep -c processor /proc/cpuinfo)/2" | bc)
+ --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --linker=gold
+ --use-diagnostic-latches=off
+ tasks:
+ - name: compile_test_and_package_serial_TG
+ distros:
+ - rhel83-zseries-large
diff --git a/etc/evergreen_yml_components/variants/in_memory.yml b/etc/evergreen_yml_components/variants/in_memory.yml
index 547676d8cee2d..ac56df2ee645d 100644
--- a/etc/evergreen_yml_components/variants/in_memory.yml
+++ b/etc/evergreen_yml_components/variants/in_memory.yml
@@ -40,14 +40,13 @@ buildvariants:
- name: .change_streams
- name: .change_stream_fuzzer
- name: .misc_js
- - name: .concurrency !.ubsan !.no_txns !.debug_only !.kill_terminate !.requires_wt
+ - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.requires_wt
distros:
- rhel80-medium # Some workloads require a lot of memory, use a bigger machine for this suite.
- name: initial_sync_fuzzer_gen
- name: .jscore .common !.decimal
- name: jsCore_txns_large_txns_format
- name: .jstestfuzz !.initsync
- - name: .logical_session_cache
- name: .multi_shard .common
- name: multi_stmt_txn_jscore_passthrough_with_migration_gen
- name: .read_write_concern !.durable_history
diff --git a/etc/evergreen_yml_components/variants/misc_release.yml b/etc/evergreen_yml_components/variants/misc_release.yml
index d4f7e3863621e..f956cf40b146b 100644
--- a/etc/evergreen_yml_components/variants/misc_release.yml
+++ b/etc/evergreen_yml_components/variants/misc_release.yml
@@ -8,7 +8,7 @@ buildvariants:
- amazon2-test
expansions:
test_flags: >-
- --excludeWithAnyTags=SERVER-34286,incompatible_with_amazon_linux,requires_external_data_source
+ --excludeWithAnyTags=SERVER-34286,incompatible_with_amazon_linux,requires_external_data_source,requires_latch_analyzer
push_path: linux
push_bucket: downloads.mongodb.org
push_name: linux
@@ -18,6 +18,7 @@ buildvariants:
MONGO_DISTMOD=amazon2
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
multiversion_platform: amazon2
multiversion_edition: targeted
has_packages: true
@@ -45,7 +46,6 @@ buildvariants:
- name: .jscore .common
- name: .jstestfuzz .common
- name: libunwind_tests
- - name: .logical_session_cache .one_sec
- name: multiversion_gen
- name: replica_sets_gen
- name: .replica_sets .common
@@ -74,7 +74,7 @@ buildvariants:
archive-mongocryptd-debug
# TODO BUILD-13887 should fix uses_pykmip incompatibility.
test_flags: >-
- --excludeWithAnyTags=SERVER-34286,incompatible_with_amazon_linux,uses_pykmip,requires_external_data_source
+ --excludeWithAnyTags=SERVER-34286,incompatible_with_amazon_linux,uses_pykmip,requires_external_data_source,requires_latch_analyzer
push_path: linux
push_bucket: downloads.10gen.com
push_name: linux
@@ -84,6 +84,7 @@ buildvariants:
MONGO_DISTMOD=amazon2
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
@@ -109,7 +110,6 @@ buildvariants:
- name: .jscore .common
- name: .jstestfuzz .common
- name: libunwind_tests
- - name: .logical_session_cache .one_sec
- name: noPassthrough_gen
- name: noPassthroughWithMongod_gen
- name: .replica_sets .common
@@ -142,7 +142,8 @@ buildvariants:
MONGO_DISTMOD=amazon2
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_external_data_source,requires_latch_analyzer
has_packages: true
packager_script: packager.py
packager_arch: aarch64
@@ -168,7 +169,6 @@ buildvariants:
- name: free_monitoring
- name: .jscore .common
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_gen
- name: .replica_sets .common
- name: .sharding .txns
@@ -204,11 +204,12 @@ buildvariants:
MONGO_DISTMOD=amazon2
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
# TODO BUILD-13887 should fix uses_pykmip incompatibility.
- test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_ldap_pool,uses_pykmip,requires_v4_0,requires_external_data_source
+ test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_ldap_pool,uses_pykmip,requires_v4_0,requires_external_data_source,requires_latch_analyzer
has_packages: true
multiversion_platform: amazon2
multiversion_edition: enterprise
@@ -235,8 +236,8 @@ buildvariants:
- name: .change_streams
- name: .change_stream_fuzzer
- name: .misc_js
- - name: .concurrency !.large !.ubsan !.no_txns !.debug_only
- - name: .concurrency .large !.ubsan !.no_txns !.debug_only
+ - name: .concurrency !.large !.ubsan !.no_txns
+ - name: .concurrency .large !.ubsan !.no_txns
distros:
- amazon2-arm64-large
- name: .config_fuzzer !.large
@@ -301,46 +302,46 @@ buildvariants:
- name: test_packages
distros:
- ubuntu1804-arm64-build
+ - name: vector_search
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
-- name: amazon2022
- display_name: Amazon Linux 2022
+- name: amazon2023
+ display_name: Amazon Linux 2023
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
run_on:
- - amazon2022-small
+ - amazon2023.0-small
expansions:
push_path: linux
push_bucket: downloads.mongodb.org
push_name: linux
- push_arch: x86_64-amazon2022
- compile_flags: --ssl MONGO_DISTMOD=amazon2022 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_external_data_source
+ push_arch: x86_64-amazon2023
+ compile_flags: --ssl MONGO_DISTMOD=amazon2023 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_external_data_source,requires_latch_analyzer
has_packages: true
packager_script: packager.py
packager_arch: x86_64
- packager_distro: amazon2022
+ packager_distro: amazon2023
repo_edition: org
scons_cache_scope: shared
- large_distro_name: amazon2022-large
- compile_variant: amazon2022
+ large_distro_name: amazon2023.0-large
+ compile_variant: amazon2023
tasks:
- name: compile_test_and_package_serial_no_unittests_TG
distros:
- - amazon2022-large
+ - amazon2023.0-large
- name: aggregation
- name: .auth !.audit !.multiversion
- name: causally_consistent_jscore_txns_passthrough
- name: .misc_js
- name: .concurrency .common
distros:
- - amazon2022-large
+ - amazon2023.0-large
- name: concurrency_replication_causal_consistency_gen
- name: disk_wiredtiger
- name: free_monitoring
- name: .jscore .common
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_gen
- name: .replica_sets .common
- name: .sharding .txns
@@ -354,37 +355,37 @@ buildvariants:
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
-- name: enterprise-amazon2022
- display_name: "Enterprise Amazon Linux 2022"
+- name: enterprise-amazon2023
+ display_name: "Enterprise Amazon Linux 2023"
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
modules:
- enterprise
run_on:
- - amazon2022-small
+ - amazon2023.0-small
expansions:
additional_package_targets: archive-mongocryptd archive-mongocryptd-debug archive-mh archive-mh-debug
push_path: linux
push_bucket: downloads.10gen.com
push_name: linux
- push_arch: x86_64-enterprise-amazon2022
- compile_flags: --ssl MONGO_DISTMOD=amazon2022 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ push_arch: x86_64-enterprise-amazon2023
+ compile_flags: --ssl MONGO_DISTMOD=amazon2023 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off
crypt_task_compile_flags: SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique"
# TODO BUILD-13887 should fix uses_pykmip incompatibility.
- test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_ldap_pool,uses_pykmip,requires_v4_0,requires_external_data_source
+ test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_ldap_pool,uses_pykmip,requires_v4_0,requires_external_data_source,requires_latch_analyzer
has_packages: true
- multiversion_platform: amazon2022
+ multiversion_platform: amazon2023
multiversion_edition: enterprise
multiversion_architecture: x86_64
packager_script: packager_enterprise.py
packager_arch: x86_64
- packager_distro: amazon2022
+ packager_distro: amazon2023
repo_edition: enterprise
scons_cache_scope: shared
- compile_variant: enterprise-amazon2022
+ compile_variant: enterprise-amazon2023
tasks:
- name: compile_test_and_package_serial_no_unittests_TG
distros:
- - amazon2022-large
+ - amazon2023.0-large
- name: test_api_version_compatibility
- name: .aggfuzzer !.feature_flag_guarded !.multiversion
- name: .aggregation !.feature_flag_guarded
@@ -394,10 +395,10 @@ buildvariants:
- name: .change_streams
- name: .change_stream_fuzzer
- name: .misc_js
- - name: .concurrency !.large !.ubsan !.no_txns !.debug_only
- - name: .concurrency .large !.ubsan !.no_txns !.debug_only
+ - name: .concurrency !.large !.ubsan !.no_txns
+ - name: .concurrency .large !.ubsan !.no_txns
distros:
- - amazon2022-large
+ - amazon2023.0-large
- name: config_fuzzer_concurrency
- name: config_fuzzer_jsCore
- name: disk_wiredtiger
@@ -406,7 +407,7 @@ buildvariants:
- name: initial_sync_fuzzer_gen
- name: jsCore
distros:
- - amazon2022-large
+ - amazon2023.0-large
- name: .jscore .common !jsCore !.feature_flag_guarded
- name: jsCore_txns_large_txns_format
- name: json_schema
@@ -418,17 +419,17 @@ buildvariants:
- name: .query_fuzzer
- name: .read_write_concern .large
distros:
- - amazon2022-large
+ - amazon2023.0-large
- name: .read_write_concern !.large
- name: .replica_sets !.encrypt !.auth
distros:
- - amazon2022-large
+ - amazon2023.0-large
- name: replica_sets_api_version_jscore_passthrough_gen
- name: replica_sets_reconfig_jscore_passthrough_gen
- name: retryable_writes_jscore_passthrough_gen
- name: retryable_writes_jscore_stepdown_passthrough_gen
distros:
- - amazon2022-large
+ - amazon2023.0-large
- name: .read_only
- name: .rollbackfuzzer
- name: sasl
@@ -444,46 +445,47 @@ buildvariants:
- name: .stitch
- name: .crypt
distros:
- - amazon2022-large
+ - amazon2023.0-large
- name: .publish_crypt
- name: secondary_reads_passthrough_gen
- name: server_discovery_and_monitoring_json_test_TG
- name: .serverless
distros:
- - amazon2022-large
+ - amazon2023.0-large
- name: server_selection_json_test_TG
distros:
- - amazon2022-large
+ - amazon2023.0-large
- name: test_packages
distros:
- ubuntu2204-large
+ - name: vector_search
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
-- name: amazon2022-arm64
- display_name: Amazon Linux 2022 arm64
+- name: amazon2023-arm64
+ display_name: Amazon Linux 2023 arm64
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
run_on:
- - amazon2022-arm64-small
+ - amazon2023.0-arm64-small
expansions:
push_path: linux
push_bucket: downloads.mongodb.org
push_name: linux
- push_arch: aarch64-amazon2022
- compile_flags: --ssl MONGO_DISTMOD=amazon2022 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_external_data_source
+ push_arch: aarch64-amazon2023
+ compile_flags: --ssl MONGO_DISTMOD=amazon2023 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_external_data_source,requires_latch_analyzer
has_packages: true
packager_script: packager.py
packager_arch: aarch64
- packager_distro: amazon2022
+ packager_distro: amazon2023
repo_edition: org
scons_cache_scope: shared
- large_distro_name: amazon2022-arm64-large
- compile_variant: amazon2022-arm64
+ large_distro_name: amazon2023.0-arm64-large
+ compile_variant: amazon2023-arm64
tasks:
- name: compile_test_and_package_serial_no_unittests_TG
distros:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
- name: aggregation !.feature_flag_guarded
- name: .auth !.audit !.multiversion
- name: sharding_auth_gen
@@ -491,13 +493,12 @@ buildvariants:
- name: .misc_js
- name: .concurrency .common
distros:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
- name: concurrency_replication_causal_consistency_gen
- name: disk_wiredtiger
- name: free_monitoring
- name: .jscore .common
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_gen
- name: .replica_sets .common
- name: .sharding .txns
@@ -511,37 +512,37 @@ buildvariants:
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
-- name: enterprise-amazon2022-arm64
- display_name: "Enterprise Amazon Linux 2022 arm64"
+- name: enterprise-amazon2023-arm64
+ display_name: "Enterprise Amazon Linux 2023 arm64"
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
modules:
- enterprise
run_on:
- - amazon2022-arm64-small
+ - amazon2023.0-arm64-small
expansions:
additional_package_targets: archive-mongocryptd archive-mongocryptd-debug archive-mh archive-mh-debug
push_path: linux
push_bucket: downloads.10gen.com
push_name: linux
- push_arch: aarch64-enterprise-amazon2022
- compile_flags: --ssl MONGO_DISTMOD=amazon2022 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ push_arch: aarch64-enterprise-amazon2023
+ compile_flags: --ssl MONGO_DISTMOD=amazon2023 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off
crypt_task_compile_flags: SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique"
# TODO BUILD-13887 should fix uses_pykmip incompatibility.
- test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_ldap_pool,uses_pykmip,requires_v4_0,requires_external_data_source
+ test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_ldap_pool,uses_pykmip,requires_v4_0,requires_external_data_source,requires_latch_analyzer
has_packages: true
- multiversion_platform: amazon2022
+ multiversion_platform: amazon2023
multiversion_edition: enterprise
multiversion_architecture: aarch64
packager_script: packager_enterprise.py
packager_arch: aarch64
- packager_distro: amazon2022
+ packager_distro: amazon2023
repo_edition: enterprise
scons_cache_scope: shared
- compile_variant: enterprise-amazon2022-arm64
+ compile_variant: enterprise-amazon2023-arm64
tasks:
- name: compile_test_and_package_serial_no_unittests_TG
distros:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
- name: test_api_version_compatibility
- name: .aggfuzzer !.feature_flag_guarded !.multiversion
- name: .aggregation !.feature_flag_guarded
@@ -551,21 +552,21 @@ buildvariants:
- name: .change_streams
- name: .change_stream_fuzzer
- name: .misc_js
- - name: .concurrency !.large !.ubsan !.no_txns !.debug_only
- - name: .concurrency .large !.ubsan !.no_txns !.debug_only
+ - name: .concurrency !.large !.ubsan !.no_txns
+ - name: .concurrency .large !.ubsan !.no_txns
distros:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
- name: .config_fuzzer !.large
- name: .config_fuzzer .large
distros:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
- name: disk_wiredtiger
- name: .encrypt
- name: idl_tests
- name: initial_sync_fuzzer_gen
- name: jsCore
distros:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
- name: .jscore .common !jsCore !.feature_flag_guarded
- name: jsCore_min_batch_repeat_queries_ese_gsm
- name: jsCore_txns_large_txns_format
@@ -578,17 +579,17 @@ buildvariants:
- name: .query_fuzzer
- name: .read_write_concern .large
distros:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
- name: .read_write_concern !.large
- name: .replica_sets !.encrypt !.auth
distros:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
- name: replica_sets_api_version_jscore_passthrough_gen
- name: replica_sets_reconfig_jscore_passthrough_gen
- name: retryable_writes_jscore_passthrough_gen
- name: retryable_writes_jscore_stepdown_passthrough_gen
distros:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
- name: .read_only
- name: .rollbackfuzzer
- name: sasl
@@ -604,19 +605,20 @@ buildvariants:
- name: .stitch
- name: .crypt
distros:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
- name: .publish_crypt
- name: secondary_reads_passthrough_gen
- name: server_discovery_and_monitoring_json_test_TG
- name: .serverless
distros:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
- name: server_selection_json_test_TG
distros:
- - amazon2022-arm64-large
+ - amazon2023.0-arm64-large
- name: test_packages
distros:
- ubuntu2204-arm64-large
+ - name: vector_search
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
@@ -630,12 +632,13 @@ buildvariants:
push_bucket: downloads.mongodb.org
push_name: linux
push_arch: x86_64-debian10
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
compile_flags: >-
--ssl
MONGO_DISTMOD=debian10
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
multiversion_platform: debian10
multiversion_edition: targeted
has_packages: true
@@ -663,7 +666,6 @@ buildvariants:
- name: free_monitoring
- name: .jscore .common !.decimal !.feature_flag_guarded
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: multiversion_gen
- name: replica_sets_gen
- name: .replica_sets .common
@@ -700,7 +702,8 @@ buildvariants:
MONGO_DISTMOD=debian10
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic
-Wl,--no-gnu-unique"
@@ -728,7 +731,6 @@ buildvariants:
- debian10-large
- name: .jscore .common !.decimal !.sharding !.feature_flag_guarded
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
- name: sasl
@@ -757,7 +759,8 @@ buildvariants:
MONGO_DISTMOD=debian11
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
multiversion_platform: debian11
multiversion_edition: targeted
has_packages: true
@@ -785,7 +788,6 @@ buildvariants:
- name: free_monitoring
- name: .jscore .common !.decimal !.feature_flag_guarded
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: multiversion_gen
- name: replica_sets_gen
- name: .replica_sets .common
@@ -821,7 +823,8 @@ buildvariants:
--ssl MONGO_DISTMOD=debian11
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
@@ -845,7 +848,6 @@ buildvariants:
- name: .encrypt !.replica_sets !.aggregation !.sharding !.jscore
- name: .jscore .common !.decimal !.sharding !.feature_flag_guarded
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
- name: sasl
@@ -873,7 +875,8 @@ buildvariants:
--ssl MONGO_DISTMOD=rhel70
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
multiversion_platform: rhel70
multiversion_edition: targeted
has_packages: true
@@ -901,7 +904,6 @@ buildvariants:
- name: free_monitoring
- name: .jscore .common
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: multiversion_gen
- name: replica_sets_gen
- name: .replica_sets .common
@@ -913,8 +915,7 @@ buildvariants:
- name: test_packages
distros:
- ubuntu2004-package
- #TODO: BF-24515 renable once fixed
- #- name: selinux_rhel7_org
+ - name: selinux_rhel7_org
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
@@ -925,10 +926,12 @@ buildvariants:
run_on:
- rhel76-test
expansions:
+ test_flags: --excludeWithAnyTags=requires_latch_analyzer
compile_flags: >-
--ssl MONGO_DISTMOD=rhel70
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
compile_variant: rhel76_compile_rhel70
tasks:
- name: compile_and_archive_dist_test_then_package_TG
@@ -952,10 +955,11 @@ buildvariants:
--opt=on
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
tooltags: ""
build_mongoreplay: true
test_flags: >-
- --excludeWithAnyTags=requires_os_access
+ --excludeWithAnyTags=requires_os_access,requires_latch_analyzer
compile_variant: ubi8
tasks:
- name: compile_and_archive_dist_test_then_package_TG
@@ -982,7 +986,8 @@ buildvariants:
--ssl MONGO_DISTMOD=rhel80
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
multiversion_platform: rhel80
multiversion_edition: targeted
has_packages: true
@@ -1010,7 +1015,6 @@ buildvariants:
- name: free_monitoring
- name: .jscore .common
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: multiversion_gen
- name: replica_sets_gen
- name: .replica_sets .common
@@ -1022,6 +1026,7 @@ buildvariants:
- name: test_packages
distros:
- ubuntu2004-package
+ - name: selinux_rhel8_org
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
@@ -1046,7 +1051,8 @@ buildvariants:
--ssl MONGO_DISTMOD=rhel80
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique"
CFLAGS="-fno-gnu-unique"
@@ -1080,8 +1086,8 @@ buildvariants:
- name: .change_streams
- name: .change_stream_fuzzer
- name: .misc_js
- - name: .concurrency !.large !.ubsan !.no_txns !.debug_only
- - name: .concurrency .large !.ubsan !.no_txns !.debug_only
+ - name: .concurrency !.large !.ubsan !.no_txns
+ - name: .concurrency .large !.ubsan !.no_txns
distros:
- rhel80-medium
- name: disk_wiredtiger
@@ -1148,9 +1154,8 @@ buildvariants:
- name: test_packages
distros:
- ubuntu2004-package
-
- #TODO: BF-24515 renable once fixed
- #- name: selinux_rhel8_enterprise
+ - name: vector_search
+ - name: selinux_rhel8_enterprise
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
@@ -1168,7 +1173,8 @@ buildvariants:
--ssl MONGO_DISTMOD=rhel82
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_increased_memlock_limits,requires_latch_analyzer
has_packages: true
packager_script: packager.py
packager_arch: aarch64
@@ -1194,7 +1200,6 @@ buildvariants:
- name: free_monitoring
- name: .jscore .common
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_gen
- name: .replica_sets .common
- name: .sharding .txns
@@ -1229,7 +1234,8 @@ buildvariants:
--ssl MONGO_DISTMOD=rhel82
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic
-Wl,--no-gnu-unique"
@@ -1256,7 +1262,6 @@ buildvariants:
- name: .jscore .common !.decimal !.sharding !.feature_flag_guarded
- name: jsCore_txns_large_txns_format
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
- name: .replica_sets .multi_oplog
@@ -1274,6 +1279,7 @@ buildvariants:
- name: test_packages
distros:
- ubuntu1804-arm64-build
+ - name: vector_search
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
@@ -1287,8 +1293,8 @@ buildvariants:
push_bucket: downloads.mongodb.org
push_name: linux
push_arch: x86_64-rhel90
- compile_flags: --ssl MONGO_DISTMOD=rhel90 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ compile_flags: --ssl MONGO_DISTMOD=rhel90 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
multiversion_platform: rhel90
multiversion_edition: targeted
has_packages: true
@@ -1316,7 +1322,6 @@ buildvariants:
- name: free_monitoring
- name: .jscore .common
- name: .jstestfuzz .common !.multiversion
- - name: .logical_session_cache .one_sec
- name: replica_sets_gen
- name: .replica_sets .common
- name: .sharding .txns
@@ -1327,8 +1332,7 @@ buildvariants:
- name: test_packages
distros:
- ubuntu2204-large
- #TODO: BF-24515 renable once fixed
- #- name: selinux_rhel9_org
+ - name: selinux_rhel9_org
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
@@ -1345,8 +1349,8 @@ buildvariants:
push_bucket: downloads.10gen.com
push_name: linux
push_arch: x86_64-enterprise-rhel90
- compile_flags: --ssl MONGO_DISTMOD=rhel90 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ compile_flags: --ssl MONGO_DISTMOD=rhel90 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
crypt_task_compile_flags: SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique"
multiversion_platform: rhel90
multiversion_edition: enterprise
@@ -1380,7 +1384,6 @@ buildvariants:
- name: jsCore_txns_large_txns_format
- name: .jstestfuzz .common
- name: libunwind_tests
- - name: .logical_session_cache .one_sec
- name: .ocsp
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
@@ -1398,9 +1401,8 @@ buildvariants:
- name: test_packages
distros:
- ubuntu2204-large
-
- #TODO: BF-24515 renable once fixed
- #- name: selinux_rhel9_enterprise
+ - name: vector_search
+ - name: selinux_rhel9_enterprise
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
@@ -1419,7 +1421,8 @@ buildvariants:
--ssl MONGO_DISTMOD=rhel90
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
has_packages: true
packager_script: packager.py
packager_arch: aarch64
@@ -1445,7 +1448,6 @@ buildvariants:
- name: free_monitoring
- name: .jscore .common
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_gen
- name: .replica_sets .common
- name: .sharding .txns
@@ -1459,7 +1461,7 @@ buildvariants:
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
-- name: enterprise-rhel90-arm64
+- name: enterprise-rhel-90-arm64
display_name: "Enterprise RHEL 9.0 arm64"
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
modules:
@@ -1480,7 +1482,8 @@ buildvariants:
--ssl MONGO_DISTMOD=rhel90
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic
-Wl,--no-gnu-unique"
@@ -1492,7 +1495,7 @@ buildvariants:
repo_edition: enterprise
scons_cache_scope: shared
large_distro_name: rhel90-arm64-large
- compile_variant: enterprise-rhel90-arm64
+ compile_variant: enterprise-rhel-90-arm64
tasks:
- name: compile_test_and_package_serial_no_unittests_TG
distros:
@@ -1508,7 +1511,6 @@ buildvariants:
- name: .jscore .common !.decimal !.sharding
- name: jsCore_txns_large_txns_format
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
- name: .replica_sets .multi_oplog
@@ -1527,6 +1529,7 @@ buildvariants:
- name: test_packages
distros:
- ubuntu2204-arm64-small
+ - name: vector_search
- name: .publish
- name: generate_buildid_to_debug_symbols_mapping
@@ -1549,7 +1552,8 @@ buildvariants:
-j$(echo $(grep -c ^processor /proc/cpuinfo) / 2 | bc)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
--linker=gold
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_increased_memlock_limits,requires_latch_analyzer
multiversion_platform: suse12
multiversion_edition: targeted
has_packages: true
@@ -1580,7 +1584,6 @@ buildvariants:
- name: .jscore .common !.decimal !.feature_flag_guarded
- name: .jstestfuzz .common
- name: multiversion_gen
- - name: .logical_session_cache .one_sec
- name: replica_sets_gen
- name: .replica_sets .common
- name: .sharding .txns
@@ -1618,7 +1621,8 @@ buildvariants:
-j$(echo $(grep -c ^processor /proc/cpuinfo) / 2 | bc)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
--linker=gold
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
@@ -1641,7 +1645,6 @@ buildvariants:
- name: .encrypt !.replica_sets !.aggregation !.sharding !.jscore
- name: .jscore .common !.decimal !.sharding !.feature_flag_guarded
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
- name: sasl
@@ -1677,7 +1680,8 @@ buildvariants:
MONGO_DISTMOD=suse15
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
@@ -1698,7 +1702,6 @@ buildvariants:
- name: .encrypt !.replica_sets !.aggregation !.sharding !.jscore
- name: .jscore .common !.decimal !.sharding !.feature_flag_guarded
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
- name: sasl
@@ -1727,7 +1730,8 @@ buildvariants:
MONGO_DISTMOD=suse15
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_increased_memlock_limits,requires_latch_analyzer
multiversion_platform: suse15
multiversion_edition: targeted
has_packages: true
@@ -1754,7 +1758,6 @@ buildvariants:
- name: free_monitoring
- name: .jscore .common !.decimal !.feature_flag_guarded
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: multiversion_gen
- name: replica_sets_gen
- name: .replica_sets .common
@@ -1786,9 +1789,10 @@ buildvariants:
MONGO_DISTMOD=ubuntu1804
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
multiversion_platform: ubuntu1804
multiversion_edition: targeted
- test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source
+ test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source,requires_latch_analyzer
has_packages: true
packager_script: packager.py
packager_arch: x86_64
@@ -1816,7 +1820,6 @@ buildvariants:
- name: .jscore .common
- name: .jstestfuzz .common
- name: libunwind_tests
- - name: .logical_session_cache .one_sec
- name: multiversion_gen
- name: .powercycle
- name: replica_sets_gen
@@ -1857,13 +1860,14 @@ buildvariants:
MONGO_DISTMOD=ubuntu1804
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic
-Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
multiversion_platform: ubuntu1804
multiversion_edition: enterprise
- test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source
+ test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source,requires_latch_analyzer
has_packages: true
packager_script: packager_enterprise.py
packager_arch: x86_64
@@ -1896,7 +1900,6 @@ buildvariants:
- name: jsCore_auth
- name: .jstestfuzz .common
- name: libunwind_tests
- - name: .logical_session_cache .one_sec
- name: .multiversion_fuzzer
- name: .multiversion_passthrough
- name: .ocsp
@@ -1933,11 +1936,12 @@ buildvariants:
MONGO_DISTMOD=ubuntu1804
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic
-Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
- test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source
+ test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source,requires_increased_memlock_limits,requires_latch_analyzer
resmoke_jobs_max: 4 # Avoid starting too many mongod's on ARM test servers
has_packages: true
packager_script: packager_enterprise.py
@@ -1962,7 +1966,6 @@ buildvariants:
- name: fle
- name: .jscore .common !.auth !.feature_flag_guarded
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_gen
- name: .replica_sets .common
- name: .sharding .txns
@@ -1994,7 +1997,8 @@ buildvariants:
MONGO_DISTMOD=ubuntu1804
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source,requires_latch_analyzer
resmoke_jobs_max: 8 # Avoid starting too many mongod's on ARM test servers
has_packages: true
packager_script: packager.py
@@ -2032,7 +2036,8 @@ buildvariants:
MONGO_DISTMOD=ubuntu2204
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_increased_memlock_limits,requires_latch_analyzer
multiversion_platform: ubuntu2204
multiversion_edition: targeted
has_packages: true
@@ -2059,7 +2064,6 @@ buildvariants:
- name: .jscore .common
- name: .jstestfuzz .common
- name: libunwind_tests
- - name: .logical_session_cache .one_sec
# - name: multiversion_gen
- name: replica_sets_gen
- name: replica_sets_jscore_passthrough
@@ -2089,7 +2093,8 @@ buildvariants:
MONGO_DISTMOD=ubuntu2004
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
multiversion_platform: ubuntu2004
multiversion_edition: targeted
has_packages: true
@@ -2116,7 +2121,6 @@ buildvariants:
- name: .jscore .common
- name: .jstestfuzz .common
- name: libunwind_tests
- - name: .logical_session_cache .one_sec
# - name: multiversion_gen
- name: replica_sets_gen
- name: replica_sets_jscore_passthrough
@@ -2153,7 +2157,8 @@ buildvariants:
--ssl MONGO_DISTMOD=ubuntu2004
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
@@ -2185,7 +2190,6 @@ buildvariants:
- name: jsCore_auth
- name: .jstestfuzz .common
- name: libunwind_tests
- - name: .logical_session_cache .one_sec
- name: .ocsp
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
@@ -2220,7 +2224,8 @@ buildvariants:
MONGO_DISTMOD=ubuntu2204
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
@@ -2252,7 +2257,6 @@ buildvariants:
- name: jsCore_auth
- name: .jstestfuzz .common
- name: libunwind_tests
- - name: .logical_session_cache .one_sec
- name: .ocsp
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
@@ -2286,7 +2290,8 @@ buildvariants:
--ssl MONGO_DISTMOD=ubuntu2004
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
crypt_task_compile_flags: >-
SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique"
CCFLAGS="-fno-gnu-unique"
@@ -2314,7 +2319,6 @@ buildvariants:
- name: fle
- name: .jscore .common !.auth !.feature_flag_guarded
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_gen
- name: .replica_sets .common
- name: .sharding .txns
@@ -2345,7 +2349,8 @@ buildvariants:
MONGO_DISTMOD=ubuntu2004
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
resmoke_jobs_max: 8 # Avoid starting too many mongod's on ARM test servers
has_packages: true
packager_script: packager.py
@@ -2382,8 +2387,8 @@ buildvariants:
push_bucket: downloads.10gen.com
push_name: linux
push_arch: aarch64-enterprise-ubuntu2204
- compile_flags: --ssl MONGO_DISTMOD=ubuntu2204 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ compile_flags: --ssl MONGO_DISTMOD=ubuntu2204 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
crypt_task_compile_flags: SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique"
resmoke_jobs_max: 4 # Avoid starting too many mongod's on ARM test servers
has_packages: true
@@ -2409,7 +2414,6 @@ buildvariants:
- name: fle
- name: .jscore .common !.auth !.feature_flag_guarded
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: replica_sets_gen
- name: .replica_sets .common
- name: .sharding .txns
@@ -2438,8 +2442,8 @@ buildvariants:
push_bucket: downloads.mongodb.org
push_name: linux
push_arch: aarch64-ubuntu2204
- compile_flags: --ssl MONGO_DISTMOD=ubuntu2204 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
- test_flags: --excludeWithAnyTags=requires_external_data_source
+ compile_flags: --ssl MONGO_DISTMOD=ubuntu2204 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off
+ test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer
resmoke_jobs_max: 8 # Avoid starting too many mongod's on ARM test servers
has_packages: true
packager_script: packager.py
@@ -2474,9 +2478,7 @@ buildvariants:
push_bucket: downloads.mongodb.org
push_name: windows
push_arch: x86_64
- multiversion_platform: windows_x86_64-2008plus-ssl
- multiversion_platform_42_or_later: windows_x86_64-2012plus
- multiversion_platform_44_or_later: windows
+ multiversion_platform: windows
multiversion_edition: base
content_type: application/zip
compile_flags: >-
@@ -2484,12 +2486,13 @@ buildvariants:
MONGO_DISTMOD=windows
-j$(bc <<< "$(grep -c '^processor' /proc/cpuinfo) / 1.5")
--win-version-min=win10
+ --use-diagnostic-latches=off
num_scons_link_jobs_available: 0.25
python: '/cygdrive/c/python/python37/python.exe'
ext: zip
scons_cache_scope: shared
large_distro_name: windows-vsCurrent-large
- test_flags: &windows_common_test_excludes --excludeWithAnyTags=incompatible_with_windows_tls,requires_external_data_source
+ test_flags: &windows_common_test_excludes --excludeWithAnyTags=incompatible_with_windows_tls,requires_external_data_source,requires_latch_analyzer
compile_variant: windows
tasks:
- name: compile_test_and_package_serial_no_unittests_TG
@@ -2502,7 +2505,7 @@ buildvariants:
- name: .misc_js
# Some concurrency workloads require a lot of memory, so we use machines
# with more RAM for these suites.
- - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.common !.debug_only
+ - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.common
distros:
- windows-vsCurrent-large
- name: .concurrency .common
@@ -2549,6 +2552,7 @@ buildvariants:
LIBPATH="c:/sasl/lib"
-j$(bc <<< "$(grep -c '^processor' /proc/cpuinfo) / 1.5")
--win-version-min=win10
+ --use-diagnostic-latches=off
num_scons_link_jobs_available: 0.25
python: '/cygdrive/c/python/python37/python.exe'
ext: zip
@@ -2577,7 +2581,7 @@ buildvariants:
- name: .misc_js
# Some concurrency workloads require a lot of memory, so we use machines
# with more RAM for these suites.
- - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.common !.debug_only
+ - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.common
distros:
- windows-vsCurrent-large
- name: .concurrency .common
@@ -2612,7 +2616,7 @@ buildvariants:
run_on:
- macos-1100
expansions:
- test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_external_data_source
+ test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_external_data_source,requires_latch_analyzer
push_path: osx
push_bucket: downloads.mongodb.org
push_name: macos
@@ -2623,6 +2627,7 @@ buildvariants:
-j$(sysctl -n hw.logicalcpu)
--libc++
--variables-files=etc/scons/xcode_macosx.vars
+ --use-diagnostic-latches=off
resmoke_jobs_max: 6
compile_variant: macos
tasks:
@@ -2633,7 +2638,8 @@ buildvariants:
- name: .causally_consistent !.sharding
- name: .change_streams
- name: .misc_js
- - name: .concurrency !.ubsan !.no_txns !.debug_only !.kill_terminate
+ # TODO(SERVER-78135): remove !.cursor_sweeps.
+ - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.cursor_sweeps
- name: disk_wiredtiger
- name: free_monitoring
- name: initial_sync_fuzzer_gen
@@ -2643,7 +2649,6 @@ buildvariants:
- name: .jstestfuzz .interrupt
- name: .jstestfuzz .common
- name: .jstestfuzz .session
- - name: .logical_session_cache .one_sec
- name: .query_fuzzer
- name: .read_write_concern !.linearize
- name: replica_sets_gen
@@ -2657,9 +2662,7 @@ buildvariants:
- name: .sharding .txns
- name: .ssl
- name: .stitch
- - name: unittest_shell_hang_analyzer_gen
- name: push
- - name: generate_buildid_to_debug_symbols_mapping
- name: macos-arm64
display_name: macOS arm64
@@ -2667,7 +2670,7 @@ buildvariants:
run_on:
- macos-1100-arm64
expansions:
- test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_external_data_source
+ test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_external_data_source,requires_latch_analyzer
push_path: osx
push_bucket: downloads.mongodb.org
push_name: macos
@@ -2678,6 +2681,7 @@ buildvariants:
-j$(sysctl -n hw.logicalcpu)
--libc++
--variables-files=etc/scons/xcode_macosx_arm.vars
+ --use-diagnostic-latches=off
resmoke_jobs_max: 6
compile_variant: macos-arm64
tasks:
@@ -2688,7 +2692,8 @@ buildvariants:
- name: .causally_consistent !.sharding
- name: .change_streams
- name: .misc_js
- - name: .concurrency !.ubsan !.no_txns !.debug_only !.kill_terminate
+ # TODO(SERVER-78135): remove !.cursor_sweeps.
+ - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.cursor_sweeps
- name: disk_wiredtiger
- name: free_monitoring
- name: initial_sync_fuzzer_gen
@@ -2698,7 +2703,6 @@ buildvariants:
- name: .jstestfuzz .interrupt
- name: .jstestfuzz .common
- name: .jstestfuzz .session
- - name: .logical_session_cache .one_sec
- name: .query_fuzzer
- name: .read_write_concern !.linearize
- name: replica_sets_gen
@@ -2712,9 +2716,7 @@ buildvariants:
- name: .sharding .txns
- name: .ssl
- name: .stitch
- - name: unittest_shell_hang_analyzer_gen
- name: push
- - name: generate_buildid_to_debug_symbols_mapping
- name: enterprise-macos
display_name: Enterprise macOS
@@ -2724,7 +2726,7 @@ buildvariants:
run_on:
- macos-1100
expansions:
- test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_gcm,requires_external_data_source
+ test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_gcm,requires_external_data_source,requires_latch_analyzer
additional_package_targets: >-
archive-mongocryptd
archive-mongocryptd-debug
@@ -2740,6 +2742,7 @@ buildvariants:
-j$(sysctl -n hw.logicalcpu)
--libc++
--variables-files=etc/scons/xcode_macosx.vars
+ --use-diagnostic-latches=off
resmoke_jobs_max: 6
compile_variant: enterprise-macos
tasks:
@@ -2748,11 +2751,9 @@ buildvariants:
- name: audit
- name: auth_audit_gen
- name: causally_consistent_jscore_txns_passthrough
- # TODO: SERVER-66945 Re-enable ESE on enterprise macos
- # - name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore
+ - name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore
- name: .jscore .common !.decimal !.sharding !.feature_flag_guarded
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: mqlrun
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
@@ -2760,7 +2761,6 @@ buildvariants:
- name: push
- name: .crypt
- name: .publish_crypt
- - name: generate_buildid_to_debug_symbols_mapping
- name: enterprise-macos-arm64
display_name: Enterprise macOS arm64
@@ -2770,7 +2770,7 @@ buildvariants:
run_on:
- macos-1100-arm64
expansions:
- test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_gcm,requires_external_data_source
+ test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_gcm,requires_external_data_source,requires_latch_analyzer
additional_package_targets: >-
archive-mongocryptd
archive-mongocryptd-debug
@@ -2786,6 +2786,7 @@ buildvariants:
-j$(sysctl -n hw.logicalcpu)
--libc++
--variables-files=etc/scons/xcode_macosx_arm.vars
+ --use-diagnostic-latches=off
resmoke_jobs_max: 6
compile_variant: enterprise-macos-arm64
tasks:
@@ -2797,7 +2798,6 @@ buildvariants:
- name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore
- name: .jscore .common !.decimal !.sharding !.feature_flag_guarded
- name: .jstestfuzz .common
- - name: .logical_session_cache .one_sec
- name: mqlrun
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
@@ -2805,7 +2805,6 @@ buildvariants:
- name: push
- name: .crypt
- name: .publish_crypt
- - name: generate_buildid_to_debug_symbols_mapping
- name: enterprise-rhel-82-arm64-grpc
display_name: "Enterprise RHEL 8.2 arm64 GRPC"
@@ -2816,12 +2815,14 @@ buildvariants:
- rhel82-arm64-large
stepback: false
expansions:
+ test_flags: --excludeWithAnyTags=requires_latch_analyzer
compile_flags: >-
--ssl
--dbg=on
MONGO_DISTMOD=rhel80
-j$(grep -c ^processor /proc/cpuinfo)
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
--link-model=dynamic
ENABLE_GRPC_BUILD=1
--use-libunwind=off
@@ -2832,3 +2833,39 @@ buildvariants:
- name: compile_test_and_package_parallel_unittest_stream_TG
- name: compile_test_and_package_parallel_core_stream_TG
- name: compile_test_and_package_parallel_dbtest_stream_TG
+
+- name: enterprise-amazon2-streams
+ display_name: "Amazon Linux 2 enterprise build with streams"
+ cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
+ modules:
+ - enterprise
+ run_on:
+ - amazon2-test
+ expansions:
+ test_flags: >-
+ --excludeWithAnyTags=SERVER-34286,incompatible_with_amazon_linux,uses_pykmip,requires_external_data_source
+ push_path: linux
+ push_bucket: downloads.10gen.com
+ push_name: linux
+ push_arch: x86_64-enterprise-amazon2-streams
+ compile_flags: >-
+ --ssl
+ MONGO_DISTMOD=amazon2
+ -j$(grep -c ^processor /proc/cpuinfo)
+ --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --streams-release-build
+ multiversion_platform: amazon2
+ multiversion_edition: enterprise-streams
+ has_packages: true
+ packager_script: packager_enterprise.py
+ packager_arch: x86_64
+ packager_distro: amazon2
+ repo_edition: enterprise
+ scons_cache_scope: shared
+ compile_variant: enterprise-amazon2-streams
+ tasks:
+ - name: compile_test_and_package_serial_no_unittests_TG
+ distros:
+ - amazon2-build
+ - name: .publish
+ - name: generate_buildid_to_debug_symbols_mapping
diff --git a/etc/evergreen_yml_components/variants/sanitizer.yml b/etc/evergreen_yml_components/variants/sanitizer.yml
index 8358e7c175b6a..29c5eea251e35 100644
--- a/etc/evergreen_yml_components/variants/sanitizer.yml
+++ b/etc/evergreen_yml_components/variants/sanitizer.yml
@@ -1,6 +1,16 @@
# Build variant definitions for vanilla sanitizers that can be used across
# release and dev environments.
+variables:
+# If you add anything to san_options, make sure the appropriate changes are
+# also made to SConstruct.
+# and also to the san_options in evergreen.yml and compile_static_analysis.yml
+- aubsan_options: &aubsan_options
+ >-
+ UBSAN_OPTIONS="print_stacktrace=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer"
+ LSAN_OPTIONS="suppressions=etc/lsan.suppressions:report_objects=1"
+ ASAN_OPTIONS="detect_leaks=1:check_initialization_order=true:strict_init_order=true:abort_on_error=1:disable_coredump=0:handle_abort=1:strict_string_checks=true:detect_invalid_pointer_pairs=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer"
+
buildvariants:
- name: rhel80-asan
@@ -11,9 +21,7 @@ buildvariants:
stepback: true
expansions:
lang_environment: LANG=C
- san_options: >-
- LSAN_OPTIONS="suppressions=etc/lsan.suppressions:report_objects=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer"
- ASAN_OPTIONS="detect_leaks=1:check_initialization_order=true:strict_init_order=true:abort_on_error=1:disable_coredump=0:handle_abort=1:strict_string_checks=true:detect_invalid_pointer_pairs=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer"
+ san_options: *aubsan_options
compile_flags: >-
--variables-files=etc/scons/mongodbtoolchain_stable_clang.vars
--opt=on
@@ -50,11 +58,7 @@ buildvariants:
archive-mongocryptd
archive-mongocryptd-debug
lang_environment: LANG=C
- # If you add anything to san_options, make sure the appropriate changes are
- # also made to SConstruct.
- san_options: >-
- LSAN_OPTIONS="suppressions=etc/lsan.suppressions:report_objects=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer"
- ASAN_OPTIONS="detect_leaks=1:check_initialization_order=true:strict_init_order=true:abort_on_error=1:disable_coredump=0:handle_abort=1:strict_string_checks=true:detect_invalid_pointer_pairs=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer"
+ san_options: *aubsan_options
compile_flags: >-
--variables-files=etc/scons/mongodbtoolchain_stable_clang.vars
--dbg=on
@@ -65,7 +69,7 @@ buildvariants:
--ocsp-stapling=off
--enable-free-mon=on
-j$(grep -c ^processor /proc/cpuinfo)
- test_flags: --excludeWithAnyTags=requires_fast_memory,requires_ocsp_stapling
+ test_flags: --excludeWithAnyTags=requires_fast_memory,requires_ocsp_stapling,requires_increased_memlock_limits
multiversion_platform: rhel80
multiversion_edition: enterprise
resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under ASAN build.
@@ -98,7 +102,6 @@ buildvariants:
- name: jsCore_min_batch_repeat_queries_ese_gsm
- name: jsCore_txns_large_txns_format
- name: json_schema
- - name: .logical_session_cache
- name: .multi_shard .common
- name: multiversion_gen
- name: .multiversion_fuzzer
@@ -141,9 +144,7 @@ buildvariants:
archive-mongocryptd
archive-mongocryptd-debug
lang_environment: LANG=C
- # If you add anything to san_options, make sure the appropriate changes are
- # also made to SConstruct.
- san_options: UBSAN_OPTIONS="print_stacktrace=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer"
+ san_options: *aubsan_options
compile_flags: >-
--variables-files=etc/scons/mongodbtoolchain_stable_clang.vars
--dbg=on
@@ -153,7 +154,7 @@ buildvariants:
--ocsp-stapling=off
--enable-free-mon=on
-j$(grep -c ^processor /proc/cpuinfo)
- test_flags: --excludeWithAnyTags=requires_ocsp_stapling
+ test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_increased_memlock_limits
multiversion_platform: rhel80
multiversion_edition: enterprise
resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under UBSAN build.
@@ -166,6 +167,7 @@ buildvariants:
- name: compile_integration_and_test_parallel_stream_TG
distros:
- rhel80-large
+ - name: run_pretty_printer_tests
- name: .aggregation !.feature_flag_guarded
- name: .auth
- name: audit
@@ -182,7 +184,6 @@ buildvariants:
- name: jsCore_min_batch_repeat_queries_ese_gsm
- name: jsCore_txns_large_txns_format
- name: json_schema
- - name: .logical_session_cache .one_sec
- name: .multi_shard .common
- name: multiversion_gen
- name: .multiversion_fuzzer
@@ -208,3 +209,46 @@ buildvariants:
- name: server_discovery_and_monitoring_json_test_TG
- name: server_selection_json_test_TG
- name: generate_buildid_to_debug_symbols_mapping
+
+- &rhel80-debug-aubsan-lite_fuzzer-template
+ name: &rhel80-debug-aubsan-lite_fuzzer rhel80-debug-aubsan-lite_fuzzer
+ display_name: "{A,UB}SAN Enterprise RHEL 8.0 FUZZER"
+ cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
+ modules:
+ - enterprise
+ run_on:
+ - rhel80-build
+ stepback: false
+ expansions:
+ additional_package_targets: >-
+ archive-mongocryptd
+ archive-mongocryptd-debug
+ lang_environment: LANG=C
+ toolchain_version: stable
+ # If you add anything to san_options, make sure the appropriate changes are
+ # also made to SConstruct.
+ san_options: *aubsan_options
+ compile_flags: >-
+ LINKFLAGS=-nostdlib++
+ LIBS=stdc++
+ --variables-files=etc/scons/mongodbtoolchain_${toolchain_version}_clang.vars
+ --dbg=on
+ --opt=on
+ --allocator=system
+ --sanitize=undefined,address,fuzzer
+ --ssl
+ --ocsp-stapling=off
+ -j$(grep -c ^processor /proc/cpuinfo)
+ test_flags: --excludeWithAnyTags=requires_ocsp_stapling
+ resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under {A,UB}SAN build.
+ hang_analyzer_dump_core: false
+ scons_cache_scope: shared
+ separate_debug: off
+ compile_variant: *rhel80-debug-aubsan-lite_fuzzer
+ display_tasks:
+ - name: libfuzzertests!
+ execution_tasks:
+ - compile_and_archive_libfuzzertests
+ - fetch_and_run_libfuzzertests
+ tasks:
+ - name: compile_archive_and_run_libfuzzertests_TG
diff --git a/etc/evergreen_yml_components/variants/task_generation.yml b/etc/evergreen_yml_components/variants/task_generation.yml
index 169561518b387..ae952462c2a3f 100644
--- a/etc/evergreen_yml_components/variants/task_generation.yml
+++ b/etc/evergreen_yml_components/variants/task_generation.yml
@@ -11,5 +11,9 @@ buildvariants:
- rhel80-medium
tasks:
- name: version_gen
+ distros:
+ - ubuntu2004-medium
- name: version_burn_in_gen
+ distros:
+ - ubuntu2004-medium
- name: version_expansions_gen
diff --git a/etc/generate_subtasks_config.yml b/etc/generate_subtasks_config.yml
index bf2471d40125d..b212c99a5730d 100644
--- a/etc/generate_subtasks_config.yml
+++ b/etc/generate_subtasks_config.yml
@@ -1,20 +1,19 @@
build_variant_large_distro_exceptions:
- amazon
- amazon2
- - amazon2022
- - amazon2022-arm64
+ - amazon2023
+ - amazon2023-arm64
- debian10
- debian11
- enterprise-amazon2
- enterprise-amazon2-arm64
- - enterprise-amazon2022
- - enterprise-amazon2022-arm64
+ - enterprise-amazon2023
+ - enterprise-amazon2023-arm64
- enterprise-debian10-64
- enterprise-debian11-64
- enterprise-linux-64-amazon-ami
- enterprise-macos
- enterprise-macos-rosetta-2
- - enterprise-macos-cxx20
- enterprise-macos-arm64
- enterprise-rhel-67-s390x
- enterprise-rhel-70-64-bit
diff --git a/etc/iwyu_mapping.imp b/etc/iwyu_mapping.imp
new file mode 100644
index 0000000000000..75dcd55d6196d
--- /dev/null
+++ b/etc/iwyu_mapping.imp
@@ -0,0 +1,21 @@
+[
+ {include: ["\"mongo/platform/compiler_gcc.h\"", "private", "\"mongo/platform/compiler.h\"", "public"]},
+ {include: ["\"float.h\"", "private", "", "public"]},
+ {include: ["\"limits.h\"", "private", "", "public"]},
+ {include: ["\"stdarg.h\"", "private", "", "public"]},
+
+ {include: ["", "private", "", "public"]},
+ {include: ["\"boost/smart_ptr/detail/operator_bool.hpp\"", "private", "", "public"]},
+ {include: ["", "private", "", "public"]},
+ {include: ["\"boost/optional/detail/optional_relops.hpp\"", "private", "", "public"]},
+ {include: ["", "private", "", "public"]},
+ {include: ["\"boost/optional/detail/optional_reference_spec.hpp\"", "private", "", "public"]},
+ {include: ["", "private", "", "public"]},
+ {include: ["\"boost/tuple/detail/tuple_basic.hpp\"", "private", "", "public"]},
+ {include: ["", "private", "", "public"]},
+ {include: ["\"boost/program_options/detail/value_semantic.hpp\"", "private", "", "public"]},
+ {include: ["", "private", "", "public"]},
+ {include: ["\"boost/optional/detail/optional_swap.hpp\"", "private", "", "public"]},
+ {include: ["", "private", "", "public"]},
+ {include: ["\"boost/preprocessor/iteration/detail/iter/limits/forward1_256.hpp\"", "private", "", "public"]},
+]
diff --git a/etc/lsan.suppressions b/etc/lsan.suppressions
index 80d58180cae1e..2c88c8cce9c43 100644
--- a/etc/lsan.suppressions
+++ b/etc/lsan.suppressions
@@ -9,4 +9,9 @@ leak:mongo::Interruptible::installWaitListener
# The singleton must live throughout the lifetime of all SSL threads
leak::mongo::SSLThreadInfo::ThreadIDManager::idManager
+# Thread names leak from threads that are never terminated.
+leak:mongo::setThreadName
+leak:mongo::getThreadName
+leak:__cxa_thread_atexit_impl
+
leak:glob64
diff --git a/etc/pip/components/build_metrics.req b/etc/pip/components/build_metrics.req
index 05066289e1858..288240c805b4b 100644
--- a/etc/pip/components/build_metrics.req
+++ b/etc/pip/components/build_metrics.req
@@ -2,4 +2,4 @@ psutil
jsonschema
memory_profiler
puremagic
-tabulate
\ No newline at end of file
+tabulate
diff --git a/etc/pip/components/evergreen.req b/etc/pip/components/evergreen.req
index 986f727df9d02..dbf22a50bd10b 100644
--- a/etc/pip/components/evergreen.req
+++ b/etc/pip/components/evergreen.req
@@ -3,4 +3,4 @@ dataclasses; python_version < "3.7"
inject ~= 4.3.1
GitPython ~= 3.1.7
pydantic ~= 1.8.2
-structlog ~= 19.2.0
+structlog ~= 23.1.0
diff --git a/etc/pip/components/idl.req b/etc/pip/components/idl.req
index 0b0ef8924b208..13ad323f0cdad 100644
--- a/etc/pip/components/idl.req
+++ b/etc/pip/components/idl.req
@@ -1,3 +1,2 @@
unittest-xml-reporting >= 2.2.0, <= 3.0.4
-typing <= 3.7.4.3
packaging <= 21.3
diff --git a/etc/pip/components/lint.req b/etc/pip/components/lint.req
index 3437ce33961e8..d216c950f158d 100644
--- a/etc/pip/components/lint.req
+++ b/etc/pip/components/lint.req
@@ -1,13 +1,14 @@
# Linters
# Note: These versions are checked by python modules in buildscripts/linter/
-GitPython ~= 3.1.7
-mypy ~= 0.942
-pydocstyle == 6.1.1
-pylint == 2.7.2
-structlog ~= 19.2.0
-typing <= 3.7.4.3
-yamllint == 1.15.0
-yapf == 0.26.0
-evergreen-lint == 0.1.3
-types-setuptools == 57.4.12
-types-requests == 2.26.3
+GitPython ~= 3.1.31
+mypy ~= 1.3.0
+pydocstyle == 6.3.0
+pylint == 2.7.2 # latest is 2.17.4, but that causes pip install requirements to fail
+structlog ~= 23.1.0
+yamllint == 1.32.0
+yapf == 0.26.0 # latest is 0.40.1, but that causes CI failures
+evergreen-lint == 0.1.4
+types-setuptools == 57.4.12 # latest is 68.0.0.0, but that causes pip install requirements to fail
+types-requests == 2.31.0.1
+tqdm
+colorama
diff --git a/etc/pip/components/platform.req b/etc/pip/components/platform.req
index 2d2f7893ad3dc..e14765ddf0afd 100644
--- a/etc/pip/components/platform.req
+++ b/etc/pip/components/platform.req
@@ -5,5 +5,5 @@ pywin32>=225; sys_platform == "win32" and python_version > "3"
cryptography == 2.3; platform_machine == "s390x" or platform_machine == "ppc64le" # Needed for oauthlib to use RSAAlgorithm # Version locked - see SERVER-36618
cryptography == 36.0.2; platform_machine != "s390x" and platform_machine != "ppc64le"
-mongo-ninja-python == 1.11.1.4; platform_machine == "x86_64" and sys_platform == "linux"
-ninja >= 1.10.0; platform_machine != "x86_64" or sys_platform != "linux"
+mongo-ninja-python == 1.11.1.5; (platform_machine == "x86_64" or platform_machine == "aarch64") and sys_platform == "linux"
+ninja >= 1.10.0; (platform_machine != "x86_64" and platform_machine != "aarch64") or sys_platform != "linux"
diff --git a/etc/pip/components/testing.req b/etc/pip/components/testing.req
index 616235a4235ac..2841fd4b30f9e 100644
--- a/etc/pip/components/testing.req
+++ b/etc/pip/components/testing.req
@@ -25,3 +25,4 @@ mongomock == 4.1.2
pyjwt
selenium
geckodriver-autoinstaller
+pipx==1.2.0
diff --git a/etc/pip/components/tooling_metrics.req b/etc/pip/components/tooling_metrics.req
index 0583e72f5c7cb..09c4ba3f77e5d 100644
--- a/etc/pip/components/tooling_metrics.req
+++ b/etc/pip/components/tooling_metrics.req
@@ -1 +1 @@
-mongo-tooling-metrics == 1.0.7
+mongo-tooling-metrics == 1.0.8
diff --git a/etc/repo_config.yaml b/etc/repo_config.yaml
index 53f6340b618a7..3579077c2eaed 100644
--- a/etc/repo_config.yaml
+++ b/etc/repo_config.yaml
@@ -128,7 +128,7 @@ repos:
repos:
- yum/amazon/2/mongodb-org
- - name: amazon2022
+ - name: amazon2023
type: rpm
edition: org
bucket: repo.mongodb.org
@@ -376,7 +376,7 @@ repos:
repos:
- yum/amazon/2/mongodb-enterprise
- - name: amazon2022
+ - name: amazon2023
type: rpm
edition: enterprise
bucket: repo.mongodb.com
diff --git a/etc/system_perf.yml b/etc/system_perf.yml
index 8ca61ff128999..c4e29cfe972ed 100755
--- a/etc/system_perf.yml
+++ b/etc/system_perf.yml
@@ -45,10 +45,10 @@ variables:
# _skip_compile_rhel70: &_compile_rhel70
# - name: schedule_global_auto_tasks
# variant: task_generation
-# _skip_compile_amazon_linux2_arm64: &_compile_amazon_linux2_arm64
+# _skip_compile_amazon_linux2_arm64: &_compile_amazon_linux2_arm64
# - name: schedule_global_auto_tasks
# variant: task_generation
-# _skip_compile_amazon_linux2_arm64_with_mongocrypt_shlib: &_compile_amazon_linux2_arm64_with_mongocrypt_shlib
+# _skip_compile_amazon_linux2_arm64_with_mongocrypt_shlib: &_compile_amazon_linux2_arm64_with_mongocrypt_shlib
# - name: schedule_global_auto_tasks
# variant: task_generation
# _skip_expansions: &_expansion_updates
@@ -132,10 +132,11 @@ modules:
repo: git@github.com:10gen/mongo-enterprise-modules.git
prefix: src/mongo/db/modules
branch: master
+# Pinned to version 100.7.2
- name: mongo-tools
repo: git@github.com:mongodb/mongo-tools.git
prefix: mongo-tools/src/github.com/mongodb
- branch: master
+ branch: db8c5c4
- name: PrivateWorkloads
repo: git@github.com:10gen/PrivateWorkloads.git
prefix: ${workdir}/src
@@ -344,7 +345,7 @@ functions:
set -o igncr
fi;
- # set_goenv provides set_goenv(), print_ldflags() and print_tags() used below
+ # set_goenv provides set_goenv()
. ./set_goenv.sh
GOROOT="" set_goenv || exit
go version
@@ -354,7 +355,7 @@ functions:
build_tools="$build_tools mongoreplay"
fi
for i in $build_tools; do
- go build -ldflags "$(print_ldflags)" ${args} -tags "$(print_tags ${tooltags})" -o "../../../../../mongodb/bin/$i${exe|}" $i/main/$i.go
+ go build -o "../../../../../mongodb/bin/$i${exe|}" $i/main/$i.go
"../../../../../mongodb/bin/$i${exe|}" --version
done
- command: shell.exec
@@ -391,6 +392,9 @@ functions:
EOF
fi
tar czf mongodb${compile_variant|}.tar.gz mongodb
+ # Put all matching mongo debug from the build directory in an archive in the same location
+ # as the library archive (i.e. mongodb/bin).
+ tar czvf mongodb${compile_variant|}-debugsymbols.tar.gz $(find ./build/cached -name mongo\*.debug -type f) --xform 's:^.*/:mongodb/bin/:'
- command: s3.put
params:
aws_key: ${aws_key}
@@ -401,6 +405,16 @@ functions:
permissions: public-read
content_type: ${content_type|application/x-gzip}
display_name: mongodb${compile_variant|}.tar.gz
+ - command: s3.put
+ params:
+ aws_key: ${aws_key}
+ aws_secret: ${aws_secret}
+ local_file: src/mongodb${compile_variant|}-debugsymbols.tar.gz
+ remote_file: ${project_dir}/${version_id}/${revision}/${platform}/mongodb${compile_variant|}-${version_id}-debugsymbols.tar.gz
+ bucket: mciuploads
+ permissions: public-read
+ content_type: ${content_type|application/x-gzip}
+ display_name: mongo-debugsymbols.tgz
###
###
@@ -414,6 +428,9 @@ functions:
set -o verbose
source "${workdir}/compile_venv/bin/activate"
python ./buildscripts/scons.py ${compile_flags|} ${scons_cache_args|} $extra_args SPLIT_DWARF=0 archive-mongo-crypt-dev MONGO_VERSION=${version} DESTDIR=$(pwd)/crypt-lib-${version} PKGDIR=$(pwd) ${patch_compile_flags|}
+ # Put all matching mongo .debug from the build directory in an archive in the same location
+ # as the library archive (i.e. lib).
+ tar czvf mongo-crypt-dev-debugsymbols.tar.gz $(find ./build/cached -name mongo\*.debug -type f) --xform 's:^.*/:lib/:'
- command: s3.put
params:
aws_key: ${aws_key}
@@ -424,6 +441,16 @@ functions:
permissions: public-read
content_type: ${content_type|application/x-gzip}
display_name: mongo_crypt_shared_v1-${version|}-${compile_variant|}.tgz
+ - command: s3.put
+ params:
+ aws_key: ${aws_key}
+ aws_secret: ${aws_secret}
+ local_file: src/mongo-crypt-dev-debugsymbols.tar.gz
+ remote_file: ${project_dir}/${version_id}/${revision}/${platform}/mongo_crypt_shared_v1-${compile_variant|}-${version_id}-debugsymbols.tgz
+ bucket: mciuploads
+ permissions: public-read
+ content_type: ${content_type|application/x-gzip}
+ display_name: mongo_crypt_shared_v1-debugsymbols.tgz
###
## Schedule Tasks ##
@@ -1120,7 +1147,7 @@ tasks:
test_control: "initialsync-logkeeper"
mongodb_setup: "initialsync-logkeeper-short"
# Logkeeper dataset with FCV set to 6.0
- mongodb_dataset: "https://dsi-donot-remove.s3-us-west-2.amazonaws.com/InitialSyncLogKeeper/logkeeper-slice-data-mongodb-6.3.tgz"
+ mongodb_dataset: "https://dsi-donot-remove.s3-us-west-2.amazonaws.com/InitialSyncLogKeeper/logkeeper-slice-data-mongodb-7.0.tgz"
- name: initialsync-logkeeper-short-fcbis
priority: 5
@@ -1130,7 +1157,7 @@ tasks:
test_control: "initialsync-logkeeper"
mongodb_setup: "initialsync-logkeeper-short-fcbis"
# Logkeeper dataset with FCV set to 6.0
- mongodb_dataset: "https://dsi-donot-remove.s3-us-west-2.amazonaws.com/InitialSyncLogKeeper/logkeeper-slice-data-mongodb-6.3.tgz"
+ mongodb_dataset: "https://dsi-donot-remove.s3-us-west-2.amazonaws.com/InitialSyncLogKeeper/logkeeper-slice-data-mongodb-7.0.tgz"
- name: initialsync-logkeeper
priority: 5
@@ -1162,7 +1189,7 @@ tasks:
test_control: "initialsync-logkeeper-short-s3-update"
mongodb_setup: "initialsync-logkeeper-short-s3-update"
# Update this to Logkeeper dataset with FCV set to latest after each LTS release.
- mongodb_dataset: "https://dsi-donot-remove.s3-us-west-2.amazonaws.com/InitialSyncLogKeeper/logkeeper-slice-data-mongodb-6.3.tgz"
+ mongodb_dataset: "https://dsi-donot-remove.s3-us-west-2.amazonaws.com/InitialSyncLogKeeper/logkeeper-slice-data-mongodb-7.0.tgz"
- name: initialsync-logkeeper-snapshot-update
priority: 5
@@ -1187,13 +1214,6 @@ tasks:
test_control: "initialsync-large"
mongodb_setup: "replica-2node-fcbis"
- - name: change_streams_throughput
- priority: 5
- commands:
- - func: f_run_dsi_workload
- vars:
- test_control: "change_streams_throughput"
-
- name: change_streams_latency
priority: 5
commands:
@@ -1264,6 +1284,33 @@ tasks:
threads: "1 4",
read_cmd: 'true',
share_dataset: 'true'}
+ - name: fast_running_queries
+ commands:
+ - func: f_run_dsi_workload
+ vars:
+ test_control: mongo-perf.2023-02
+ # We can see tests start to peak throughput around 4 threads, but we go much higher to
+ # purposefully stress out the system. We can see that we can get pretty high concurrency
+ # levels and maintain throughput, so we want to test this still holds on experimental
+ # variants like those testing query shape stats.
+ test_control_params: |
+ {include_filter_1: fast_running_query,
+ include_filter_2: core regression,
+ exclude_filter: single_threaded,
+ threads: "128",
+ read_cmd: 'true'}
+ - name: fast_running_queries_large_dataset
+ commands:
+ - func: f_run_dsi_workload
+ vars:
+ test_control: mongo-perf.2023-02
+ test_control_params: |
+ {include_filter_1: fast_running_query,
+ include_filter_2: query_large_dataset,
+ exclude_filter: none,
+ threads: "128",
+ read_cmd: 'true',
+ share_dataset: 'true'}
- name: big_collection
commands:
- func: f_run_dsi_workload
@@ -1554,6 +1601,7 @@ buildvariants:
-j$(grep -c ^processor /proc/cpuinfo)
--release
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
install-mongocryptd
run_on:
- "amazon2-xlarge"
@@ -1586,6 +1634,7 @@ buildvariants:
-j$(grep -c ^processor /proc/cpuinfo)
--release
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
--allocator=system
--enable-free-mon=off
--enterprise-features=fle,search
@@ -1602,8 +1651,7 @@ buildvariants:
- name: linux-intel-standalone-classic-query-engine
display_name: Linux Intel Standalone (Classic Query Engine)
- # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799).
- cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ cron: "0 0 * * 4" # 00:00 on Thursday
modules: *modules
expansions:
mongodb_setup_release: 2022-11
@@ -1641,8 +1689,7 @@ buildvariants:
- name: linux-intel-standalone-sbe
display_name: Linux Intel Standalone (SBE)
- # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799).
- cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ cron: "0 0 * * 4" # 00:00 on Thursday
modules: *modules
expansions:
mongodb_setup_release: 2022-11
@@ -1661,8 +1708,7 @@ buildvariants:
- name: linux-intel-1-node-replSet-classic-query-engine
display_name: Linux Intel 1-Node ReplSet (Classic Query Engine)
- # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799).
- cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ cron: &linux-1-node-repl-cron "0 0 * * 4" # 00:00 on Thursday
modules: *modules
expansions:
mongodb_setup_release: 2022-11
@@ -1686,8 +1732,7 @@ buildvariants:
- name: linux-intel-1-node-replSet-sbe
display_name: Linux Intel 1-Node ReplSet (SBE)
- # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799).
- cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ cron: "0 0 * * 4" # 00:00 on Thursday
modules: *modules
expansions:
mongodb_setup_release: 2022-11
@@ -1747,6 +1792,7 @@ buildvariants:
-j$(grep -c ^processor /proc/cpuinfo)
--release
--variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars
+ --use-diagnostic-latches=off
compile_variant: -rhel70
run_on:
- rhel70-large
@@ -1755,11 +1801,13 @@ buildvariants:
- name: atlas-like-replica.2022-10
display_name: M60-like-replica.2022-10 3-Node ReplSet
- cron: "0 0 * * 0,4" # 00:00 on Sunday,Thursday
+ # TODO SERVER-74399 Reduce frequency back to baseline.
+ # cron: &atlas-like-repl-cron "0 0 * * 0,4" # 00:00 on Sunday,Thursday
+ cron: &atlas-like-repl-cron "0 0 * * *" # Every day at 00:00
modules: *modules
expansions:
mongodb_setup: atlas-like-replica.2022-10
- infrastructure_provisioning: M60-like-replica.2022-10
+ infrastructure_provisioning: M60-like-replica.2023-04
infrastructure_provisioning_release: 2022-11
workload_setup: 2022-11
platform: linux
@@ -1780,6 +1828,67 @@ buildvariants:
- name: linkbench
- name: linkbench2
+ - name: atlas-like-replica-query-stats.2022-10
+ display_name: M60-like-replica.2022-10 3-Node ReplSet (Query Stats)
+ cron: *atlas-like-repl-cron
+ modules: *modules
+ expansions:
+ mongodb_setup: atlas-like-replica-query-stats.2022-10
+ infrastructure_provisioning: M60-like-replica.2023-04
+ infrastructure_provisioning_release: 2022-11
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ compile_variant: -arm64
+ run_on:
+ - "rhel70-perf-M60-like"
+ depends_on: *_compile_amazon_linux2_arm64
+ tasks: # Cannot use *3nodetasks because secondary_performance uses a special mongodb setup.
+ - name: schedule_patch_auto_tasks
+ - name: schedule_variant_auto_tasks
+ - name: industry_benchmarks
+ - name: ycsb_60GB
+ - name: tpcc
+ - name: tpcc_majority
+ - name: linkbench
+ - name: linkbench2
+
+ - name: atlas-M30-real
+ display_name: M30-Atlas ReplSet AWS
+ cron: "0 0 * * 0,4" # 00:00 on Sunday, Thursday
+ modules: *modules
+ expansions:
+ mongodb_setup: atlas
+ canaries: none
+ atlas_setup: M30-repl
+ use_custom_build: true
+ infrastructure_provisioning: workload_client_arm.2023-04
+ infrastructure_provisioning_release: 2022-11
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ storageEngine: wiredTiger
+ compile_variant: "-arm64"
+ run_on:
+ - "rhel70-perf-atlas-large"
+ depends_on:
+ - name: compile
+ variant: compile-amazon2
+ - name: schedule_global_auto_tasks
+ variant: task_generation
+ - name: compile
+ variant: compile-amazon-linux2-arm64
+ - name: schedule_global_auto_tasks
+ variant: task_generation
+ tasks: # Cannot use *3nodetasks because secondary_performance uses a special mongodb setup
+ - name: schedule_patch_auto_tasks
+ - name: schedule_variant_auto_tasks
+ - name: industry_benchmarks
+ - name: tpcc
+ - name: linkbench2
+
- name: atlas-M60-real
display_name: M60-Atlas ReplSet AWS
cron: "0 0 * * 0,4" # 00:00 on Sunday, Thursday
@@ -1789,14 +1898,13 @@ buildvariants:
canaries: none
atlas_setup: M60-repl
use_custom_build: true
- infrastructure_provisioning: workload_client
+ infrastructure_provisioning: workload_client_arm.2023-04
infrastructure_provisioning_release: 2022-11
workload_setup: 2022-11
platform: linux
project_dir: *project_dir
storageEngine: wiredTiger
compile_variant: "-arm64"
- client_compile_variant: "" # Explicitly set this. Otherwise it will use the server version
run_on:
- "rhel70-perf-atlas-large"
depends_on:
@@ -1828,7 +1936,7 @@ buildvariants:
atlas_setup: M60-repl-azure
use_custom_build_azure: true
compile_variant: -rhel70
- infrastructure_provisioning: workload_client
+ infrastructure_provisioning: workload_client_intel.2023-04
infrastructure_provisioning_release: 2022-11
workload_setup: 2022-11
platform: linux
@@ -1866,7 +1974,9 @@ buildvariants:
- name: linux-standalone.2022-11
display_name: Linux Standalone 2022-11
- cron: "0 0 * * 2,4,6" # Tuesday, Thursday and Saturday at 00:00
+ # TODO SERVER-74399 Reduce frequency back to baseline.
+ # cron: &linux-standalone-cron "0 0 * * 2,4,6" # Tuesday, Thursday and Saturday at 00:00
+ cron: &linux-standalone-cron "0 0 * * *" # Everyday at 00:00
modules: *modules
expansions:
mongodb_setup_release: 2022-11
@@ -1958,8 +2068,7 @@ buildvariants:
- name: linux-standalone-classic-query-engine.2022-11
display_name: Linux Standalone (Classic Query Engine) 2022-11
- # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799).
- cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ cron: "0 0 * * 4" # 00:00 on Thursday
modules: *modules
expansions:
mongodb_setup_release: 2022-11
@@ -1979,8 +2088,7 @@ buildvariants:
- name: linux-standalone-sbe.2022-11
display_name: Linux Standalone (SBE) 2022-11
- # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799).
- cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ cron: "0 0 * * 4" # 00:00 on Thursday
modules: *modules
expansions:
mongodb_setup_release: 2022-11
@@ -1998,14 +2106,13 @@ buildvariants:
depends_on: *_compile_amazon_linux2_arm64
tasks: *classic_engine_tasks
- - name: linux-1-node-replSet-classic-query-engine.2022-11
- display_name: Linux 1-Node ReplSet (Classic Query Engine) 2022-11
- # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799).
+ - name: linux-standalone-sampling-bonsai.2022-11
+ display_name: Linux Standalone (Bonsai with Sampling CE) 2022-11
cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
modules: *modules
expansions:
mongodb_setup_release: 2022-11
- mongodb_setup: single-replica-classic-query-engine
+ mongodb_setup: standalone-sampling-bonsai
infrastructure_provisioning_release: 2022-11
infrastructure_provisioning: single
workload_setup: 2022-11
@@ -2017,16 +2124,34 @@ buildvariants:
run_on:
- "rhel70-perf-single"
depends_on: *_compile_amazon_linux2_arm64
- tasks: *classic_engine_1nodereplset_tasks
+ tasks: *classic_engine_tasks
- - name: linux-1-node-replSet-sbe.2022-11
- display_name: Linux 1-Node ReplSet (SBE) 2022-11
- # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799).
+ - name: linux-intel-standalone-sampling-bonsai
+ display_name: Linux Intel Standalone (Bonsai with Sampling CE)
cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
modules: *modules
expansions:
mongodb_setup_release: 2022-11
- mongodb_setup: single-replica-sbe
+ mongodb_setup: standalone-sampling-bonsai
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: single-intel
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ run_on:
+ - "rhel70-perf-single"
+ depends_on: *_compile_amazon2
+ tasks: *classic_engine_tasks
+
+ - name: linux-standalone-heuristic-bonsai.2022-11
+ display_name: Linux Standalone (Bonsai with Heuristic CE) 2022-11
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: standalone-heuristic-bonsai
infrastructure_provisioning_release: 2022-11
infrastructure_provisioning: single
workload_setup: 2022-11
@@ -2038,17 +2163,53 @@ buildvariants:
run_on:
- "rhel70-perf-single"
depends_on: *_compile_amazon_linux2_arm64
+ tasks: *classic_engine_tasks
+
+ - name: linux-intel-standalone-heuristic-bonsai
+ display_name: Linux Intel Standalone (Bonsai with Heuristic CE)
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: standalone-heuristic-bonsai
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: single-intel
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ run_on:
+ - "rhel70-perf-single"
+ depends_on: *_compile_amazon2
+ tasks: *classic_engine_tasks
+
+ - name: linux-intel-1-node-replSet-sampling-bonsai
+ display_name: Linux Intel 1-Node ReplSet (Bonsai with Sampling CE)
+ cron: *linux-1-node-repl-cron
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: single-replica-sampling-bonsai
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: single-intel
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ run_on:
+ - "rhel70-perf-single"
+ depends_on: *_compile_amazon2
tasks: *classic_engine_1nodereplset_tasks
- - name: linux-standalone-telemetry
- display_name: Linux Standalone 2022-11 (Telemetry)
- # Run it twice a day.
- # Will make it less frequent when perf is finished.
- cron: "0 3,15 * * 0,1,2,3,4,5,6"
+ - name: linux-1-node-replSet-sampling-bonsai.2022-11
+ display_name: Linux 1-Node ReplSet (Bonsai with Sampling CE) 2022-11
+ cron: *linux-1-node-repl-cron
modules: *modules
expansions:
mongodb_setup_release: 2022-11
- mongodb_setup: standalone-telemetry
+ mongodb_setup: single-replica-sampling-bonsai
infrastructure_provisioning_release: 2022-11
infrastructure_provisioning: single
workload_setup: 2022-11
@@ -2060,23 +2221,34 @@ buildvariants:
run_on:
- "rhel70-perf-single"
depends_on: *_compile_amazon_linux2_arm64
- tasks:
- - name: schedule_patch_auto_tasks
- - name: schedule_variant_auto_tasks
- - name: ycsb_60GB
- - name: ycsb_60GB.long
- - name: crud_workloads_majority
- - name: tpcc
+ tasks: *classic_engine_1nodereplset_tasks
+
+ - name: linux-intel-1-node-replSet-heuristic-bonsai
+ display_name: Linux Intel 1-Node ReplSet (Bonsai with Heuristic CE)
+ cron: *linux-1-node-repl-cron
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: single-replica-heuristic-bonsai
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: single-intel
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ run_on:
+ - "rhel70-perf-single"
+ depends_on: *_compile_amazon2
+ tasks: *classic_engine_1nodereplset_tasks
- - name: linux-1-node-replSet-telemetry
- display_name: Linux 1-Node ReplSet 2022-11 (Telemetry)
- # Run it twice a day.
- # Will make it less frequent when perf is finished.
- cron: "0 3,15 * * 0,1,2,3,4,5,6"
+ - name: linux-1-node-replSet-heuristic-bonsai.2022-11
+ display_name: Linux 1-Node ReplSet (Bonsai with Heuristic CE) 2022-11
+ cron: *linux-1-node-repl-cron
modules: *modules
expansions:
mongodb_setup_release: 2022-11
- mongodb_setup: single-replica-telemetry
+ mongodb_setup: single-replica-heuristic-bonsai
infrastructure_provisioning_release: 2022-11
infrastructure_provisioning: single
workload_setup: 2022-11
@@ -2088,26 +2260,17 @@ buildvariants:
run_on:
- "rhel70-perf-single"
depends_on: *_compile_amazon_linux2_arm64
- tasks:
- - name: schedule_patch_auto_tasks
- - name: schedule_variant_auto_tasks
- - name: ycsb_60GB
- - name: ycsb_60GB.long
- - name: crud_workloads_majority
- - name: mixed_workloads
- - name: tpcc
+ tasks: *classic_engine_1nodereplset_tasks
- - name: linux-3-node-replSet-telemetry
- display_name: Linux 3-Node ReplSet 2022-11 (Telemetry)
- # Run it twice a day.
- # Will make it less frequent when perf is finished.
- cron: "0 3,15 * * 0,1,2,3,4,5,6"
+ - name: linux-1-node-replSet-classic-query-engine.2022-11
+ display_name: Linux 1-Node ReplSet (Classic Query Engine) 2022-11
+ cron: "0 0 * * 4" # 00:00 on Thursday
modules: *modules
expansions:
mongodb_setup_release: 2022-11
- mongodb_setup: replica-telemetry
+ mongodb_setup: single-replica-classic-query-engine
infrastructure_provisioning_release: 2022-11
- infrastructure_provisioning: replica
+ infrastructure_provisioning: single
workload_setup: 2022-11
platform: linux
project_dir: *project_dir
@@ -2115,29 +2278,68 @@ buildvariants:
storageEngine: wiredTiger
compile_variant: "-arm64"
run_on:
- - "rhel70-perf-replset"
+ - "rhel70-perf-single"
+ depends_on: *_compile_amazon_linux2_arm64
+ tasks: *classic_engine_1nodereplset_tasks
+
+ - name: linux-1-node-replSet-sbe.2022-11
+ display_name: Linux 1-Node ReplSet (SBE) 2022-11
+ cron: "0 0 * * 4" # 00:00 on Thursday
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: single-replica-sbe
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: single
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ compile_variant: "-arm64"
+ run_on:
+ - "rhel70-perf-single"
+ depends_on: *_compile_amazon_linux2_arm64
+ tasks: *classic_engine_1nodereplset_tasks
+
+ - name: linux-standalone-query-stats
+ display_name: Linux Standalone 2022-11 (QueryStats)
+ # Match the baseline non-query-stats cron
+ cron: *linux-standalone-cron
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: standalone-query-stats
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: single
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ compile_variant: "-arm64"
+ run_on:
+ - "rhel70-perf-single"
depends_on: *_compile_amazon_linux2_arm64
tasks:
- name: schedule_patch_auto_tasks
- name: schedule_variant_auto_tasks
+ - name: industry_benchmarks
- name: ycsb_60GB
- name: ycsb_60GB.long
- name: crud_workloads_majority
- - name: crud_workloads_w1
- - name: mixed_workloads
- name: tpcc
- - name: linux-3-shard-telemetry
- display_name: Linux 3-Shard Cluster 2022-11 (Telemetry)
- # Run it twice a day.
- # Will make it less frequent when perf is finished.
- cron: "0 3,15 * * 0,1,2,3,4,5,6"
+ - name: linux-standalone-limited-query-stats
+ display_name: Linux Standalone 2022-11 (Rate Limited QueryStats)
+ # Match the baseline non-query-stats cron
+ cron: *linux-standalone-cron
modules: *modules
expansions:
mongodb_setup_release: 2022-11
- mongodb_setup: shard-telemetry
+ mongodb_setup: standalone-query-stats-small-rate-limit
infrastructure_provisioning_release: 2022-11
- infrastructure_provisioning: shard
+ infrastructure_provisioning: single
workload_setup: 2022-11
platform: linux
project_dir: *project_dir
@@ -2145,14 +2347,11 @@ buildvariants:
storageEngine: wiredTiger
compile_variant: "-arm64"
run_on:
- - "rhel70-perf-shard"
+ - "rhel70-perf-single"
depends_on: *_compile_amazon_linux2_arm64
tasks:
- name: schedule_patch_auto_tasks
- name: schedule_variant_auto_tasks
- - name: crud_workloads_majority
- - name: crud_workloads_w1
- - name: mixed_workloads
- name: linux-1-node-replSet-all-feature-flags.2022-11
display_name: Linux 1-Node ReplSet (all feature flags) 2022-11
@@ -2180,7 +2379,7 @@ buildvariants:
- name: linux-1-node-replSet.2022-11
display_name: Linux 1-Node ReplSet 2022-11
- cron: "0 0 * * 4" # 00:00 on Thursday
+ cron: *linux-1-node-repl-cron
modules: *modules
expansions:
mongodb_setup_release: 2022-11
@@ -2213,7 +2412,6 @@ buildvariants:
- name: bestbuy_agg_merge_same_db
- name: bestbuy_agg_merge_wordcount
- name: bestbuy_query
- - name: change_streams_throughput
- name: change_streams_latency
- name: change_streams_listen_throughput
- name: snapshot_reads
@@ -2295,6 +2493,36 @@ buildvariants:
depends_on: *_compile_amazon_linux2_arm64
tasks: *audit-tasks
+ - name: linux-1-node-replSet-query-stats
+ display_name: Linux 1-Node ReplSet 2022-11 (QueryStats)
+ cron: *linux-1-node-repl-cron
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: single-replica-query-stats
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: single
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ compile_variant: "-arm64"
+ run_on:
+ - "rhel70-perf-single"
+ depends_on: *_compile_amazon_linux2_arm64
+ tasks:
+ - name: schedule_patch_auto_tasks
+ - name: schedule_variant_auto_tasks
+ - name: industry_benchmarks
+ - name: ycsb_60GB
+ - name: ycsb_60GB.long
+ - name: crud_workloads_majority
+ - name: mixed_workloads
+ - name: tpcc
+ - name: linkbench
+ - name: linkbench2
+
- name: linux-shard-lite-fle.2022-11
display_name: Linux Shard Lite FLE 2022-11
cron: "0 0 * * 0,4" # 00:00 on Sunday,Thursday
@@ -2471,7 +2699,7 @@ buildvariants:
- name: linux-3-shard.2022-11
display_name: Linux 3-Shard Cluster 2022-11
- cron: "0 0 * * 4" # 00:00 on Thursday
+ cron: &linux-3-shard-cron "0 0 * * 4" # 00:00 on Thursday
modules: *modules
expansions:
mongodb_setup_release: 2022-11
@@ -2487,7 +2715,7 @@ buildvariants:
run_on:
- "rhel70-perf-shard"
depends_on: *_compile_amazon_linux2_arm64
- tasks:
+ tasks: &linux_3_shard_tasks
- name: schedule_patch_auto_tasks
- name: schedule_variant_auto_tasks
- name: industry_benchmarks
@@ -2500,7 +2728,6 @@ buildvariants:
- name: smoke_test
- name: mongos_workloads
- name: mongos_large_catalog_workloads
- - name: change_streams_throughput
- name: change_streams_latency
- name: change_streams_listen_throughput
- name: change_streams_multi_mongos
@@ -2509,6 +2736,52 @@ buildvariants:
- name: tsbs_query_sharded_balancer
- name: tsbs_query_finance_sharded_balancer
+ - name: linux-3-shard-query-stats
+ display_name: Linux 3-Shard Cluster 2022-11 (QueryStats)
+ cron: *linux-3-shard-cron
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: shard-query-stats
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: shard
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ compile_variant: "-arm64"
+ run_on:
+ - "rhel70-perf-shard"
+ depends_on: *_compile_amazon_linux2_arm64
+ tasks:
+ - name: schedule_patch_auto_tasks
+ - name: schedule_variant_auto_tasks
+ - name: industry_benchmarks
+ - name: crud_workloads_majority
+ - name: crud_workloads_w1
+ - name: mixed_workloads
+
+ - name: linux-3-shard-heuristic-bonsai.2022-11
+ display_name: Linux 3-Shard Cluster 2022-11 (Bonsai with Heuristic CE)
+ cron: *linux-3-shard-cron
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: shard-heuristic-bonsai
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: shard
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ compile_variant: "-arm64"
+ run_on:
+ - "rhel70-perf-shard"
+ depends_on: *_compile_amazon_linux2_arm64
+ tasks: *linux_3_shard_tasks
+
- name: linux-shard-lite-audit.2022-11
display_name: Linux Shard Lite Cluster Audit 2022-11
cron: "0 0 * * 0,4" # 00:00 on Sunday,Thursday
@@ -2559,7 +2832,6 @@ buildvariants:
- name: bestbuy_query
- name: change_streams_latency
- name: change_streams_preimage_latency
- - name: change_streams_throughput
- name: change_streams_preimage_throughput
- name: change_streams_listen_throughput
- name: industry_benchmarks
@@ -2679,7 +2951,7 @@ buildvariants:
- name: linux-3-node-replSet.2022-11
display_name: Linux 3-Node ReplSet 2022-11
- cron: "0 0 * * 1,2,3,4,5,6" # Everyday except Sunday at 00:00
+ cron: &linux-3-node-cron "0 0 * * 1,2,3,4,5,6" # Everyday except Sunday at 00:00
modules: *modules
expansions:
mongodb_setup_release: 2022-11
@@ -2717,7 +2989,6 @@ buildvariants:
- name: bestbuy_agg_merge_same_db
- name: bestbuy_agg_merge_wordcount
- name: bestbuy_query
- - name: change_streams_throughput
- name: change_streams_preimage_throughput
- name: change_streams_latency
- name: change_streams_preimage_latency
@@ -2807,7 +3078,6 @@ buildvariants:
- name: bestbuy_agg_merge_same_db
- name: bestbuy_agg_merge_wordcount
- name: bestbuy_query
- - name: change_streams_throughput
- name: change_streams_latency
- name: change_streams_listen_throughput
- name: snapshot_reads
@@ -2866,7 +3136,6 @@ buildvariants:
- name: bestbuy_agg_merge_same_db
- name: bestbuy_agg_merge_wordcount
- name: bestbuy_query
- - name: change_streams_throughput
- name: change_streams_latency
- name: change_streams_listen_throughput
- name: snapshot_reads
@@ -2925,7 +3194,6 @@ buildvariants:
- name: bestbuy_agg_merge_same_db
- name: bestbuy_agg_merge_wordcount
- name: bestbuy_query
- - name: change_streams_throughput
- name: change_streams_latency
- name: change_streams_listen_throughput
- name: change_streams_preimage_throughput
@@ -3027,6 +3295,28 @@ buildvariants:
- name: initialsync-large
- name: initialsync-large-fcbis
+ - name: linux-3-node-replSet-cpu-cycle-metrics.2023-06
+ display_name: Linux 3-Node ReplSet CPU Cycle Metrics 2023-06
+ cron: "0 0 * * 4" # 00:00 on Thursday
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: replica-ipc-counters.2023-06
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: replica
+ workload_setup: 2022-11
+ platform: linux
+ authentication: disabled
+ storageEngine: wiredTiger
+ compile_variant: "-arm64"
+ project_dir: *project_dir
+ depends_on: *_compile_amazon_linux2_arm64
+ run_on:
+ - "rhel70-perf-replset"
+ tasks:
+ - name: schedule_patch_auto_tasks
+ - name: schedule_variant_auto_tasks
+
- name: linux-replSet-initialsync-logkeeper.2022-11
display_name: Linux ReplSet Initial Sync LogKeeper 2022-11
cron: "0 0 * * 4" # 00:00 on Thursday
@@ -3038,7 +3328,7 @@ buildvariants:
infrastructure_provisioning: initialsync-logkeeper
workload_setup: 2022-11
# EBS logkeeper snapshot with FCV set to 6.0
- snapshotId: snap-0e28e73fe0f1c503a
+ snapshotId: snap-0eca13ca4935455a2
platform: linux
authentication: disabled
storageEngine: wiredTiger
@@ -3065,7 +3355,7 @@ buildvariants:
# infrastructure_provisioning_release: 2022-11
# infrastructure_provisioning: initialsync-logkeeper-snapshot-update
# # Update this to latest snapshot after each LTS release.
- # snapshotId: snap-0e28e73fe0f1c503a
+ # snapshotId: snap-0eca13ca4935455a2
# platform: linux
# authentication: disabled
# storageEngine: wiredTiger
@@ -3124,10 +3414,95 @@ buildvariants:
- name: schedule_patch_auto_tasks
- name: schedule_variant_auto_tasks
+ - name: linux-3-node-replSet-query-stats
+ display_name: Linux 3-Node ReplSet 2022-11 (QueryStats)
+ cron: *linux-3-node-cron
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: replica-query-stats
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: replica
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ compile_variant: "-arm64"
+ run_on:
+ - "rhel70-perf-replset"
+ depends_on: *_compile_amazon_linux2_arm64
+ tasks:
+ - name: schedule_patch_auto_tasks
+ - name: schedule_variant_auto_tasks
+ - name: industry_benchmarks
+ - name: ycsb_60GB
+ - name: ycsb_60GB.long
+ - name: crud_workloads_majority
+ - name: crud_workloads_w1
+ - name: mixed_workloads
+ - name: tpcc
+ - name: linkbench
+ - name: linkbench2
+
+ - name: linux-3-node-replSet-disable-execution-control
+ display_name: Linux 3-Node ReplSet 2022-11 (Execution Control Off)
+ cron: *linux-3-node-cron
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: replica-disable-execution-control
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: replica
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ compile_variant: "-arm64"
+ run_on:
+ - "rhel70-perf-replset"
+ depends_on: *_compile_amazon_linux2_arm64
+ tasks:
+ - name: schedule_patch_auto_tasks
+ - name: schedule_variant_auto_tasks
+ - name: bestbuy_agg_merge_wordcount
+ - name: industry_benchmarks
+ - name: linkbench2
+ - name: misc_workloads
+ - name: mixed_workloads
+ - name: snapshot_reads
+ - name: tpcc
+ - name: ycsb_60GB
+
+ - name: linux-3-node-replSet-250mbwtcache.2023-05
+ display_name: Linux 3-Node ReplSet 250 MB WiredTiger Cache 2023-05
+ cron: *linux-3-node-cron
+ modules: *modules
+ expansions:
+ mongodb_setup_release: 2022-11
+ mongodb_setup: replica-250mbwtcache.2023-05
+ infrastructure_provisioning_release: 2022-11
+ infrastructure_provisioning: replica
+ workload_setup: 2022-11
+ platform: linux
+ project_dir: *project_dir
+ authentication: enabled
+ storageEngine: wiredTiger
+ compile_variant: "-arm64"
+ run_on:
+ - "rhel70-perf-replset"
+ depends_on: *_compile_amazon_linux2_arm64
+ tasks:
+ - name: schedule_patch_auto_tasks
+ - name: schedule_variant_auto_tasks
+
- &linux-microbenchmarks-standalone-arm
name: linux-microbenchmarks-standalone-arm.2023-01
display_name: MicroBenchmarks Arm Standalone inMemory.2023-01
- cron: "0 */4 * * *" # Every 4 hours starting at midnight
+ # TODO SERVER-74399 Reduce frequency back to baseline.
+ # cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ cron: &linux-microbench-cron "0 0 * * *" # Everyday at 00:00
modules: *modules
expansions: &standalone-arm-expansions
mongodb_setup_release: 2022-11
@@ -3142,7 +3517,7 @@ buildvariants:
project_dir: *project_dir
compile_variant: "-arm64"
run_on:
- - "rhel70-perf-single"
+ - "rhel70-perf-microbenchmarks"
depends_on: *_compile_amazon_linux2_arm64
tasks:
- name: big_collection
@@ -3173,10 +3548,56 @@ buildvariants:
- name: compound_wildcard_index_write_commands
- name: compound_wildcard_index_read_commands
+# Specialized Temporary Variants to understand the impact of query stats better.
+ - <<: *linux-microbenchmarks-standalone-arm
+ name: fast-query-microbenchmarks-baseline.2023-06
+ display_name: Fast Query Microbenchmarks (Baseline)
+ # TODO SERVER-78295 Fold these tasks in to the regular variants on the normal schedule.
+ cron: &fast-query-cron 0 */4 * * * # Every 4 hours, temporarily.
+ tasks:
+ - name: fast_running_queries
+ - name: fast_running_queries_large_dataset
+ - <<: *linux-microbenchmarks-standalone-arm
+ name: fast-query-microbenchmarks-query-stats.2023-06
+ display_name: Fast Query Microbenchmarks (QueryStats)
+ cron: *fast-query-cron
+ tasks:
+ - name: fast_running_queries
+ - name: fast_running_queries_large_dataset
+ expansions:
+ <<: *standalone-arm-expansions
+ mongodb_setup: mongo-perf-standalone-query-stats
+ - <<: *linux-microbenchmarks-standalone-arm
+ name: fast-query-microbenchmarks-limited-query-stats.2023-06
+ display_name: Fast Query Microbenchmarks (Limited QueryStats)
+ cron: *fast-query-cron
+ tasks:
+ - name: fast_running_queries
+ - name: fast_running_queries_large_dataset
+ expansions:
+ <<: *standalone-arm-expansions
+ mongodb_setup: mongo-perf-standalone-query-stats-small-rate-limit
+
+# Variant: Microbenchmarks with QueryStats
+ - <<: *linux-microbenchmarks-standalone-arm
+ name: linux-microbenchmarks-standalone-arm-query-stats.2023-01
+ display_name: MicroBenchmarks Arm Standalone inMemory.2023-01 (QueryStats)
+ expansions:
+ <<: *standalone-arm-expansions
+ mongodb_setup: mongo-perf-standalone-query-stats
+
+# Variant: Microbenchmarks with rate limited QueryStats
+ - <<: *linux-microbenchmarks-standalone-arm
+ name: linux-microbenchmarks-standalone-arm-limited-query-stats.2023-01
+ display_name: MicroBenchmarks (Rate Limitied QueryStats)
+ expansions:
+ <<: *standalone-arm-expansions
+ mongodb_setup: mongo-perf-standalone-query-stats-small-rate-limit
+
- &linux-microbenchmarks-repl-arm
name: linux-microbenchmarks-repl-arm.2023-01
display_name: MicroBenchmarks Arm 1-Node ReplSet inMemory.2023-01
- cron: "0 */4 * * *" # Every 4 hours starting at midnight
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
modules: *modules
expansions: &repl-arm-expansions
mongodb_setup_release: 2022-11
@@ -3191,7 +3612,7 @@ buildvariants:
project_dir: *project_dir
compile_variant: "-arm64"
run_on:
- - "rhel70-perf-single"
+ - "rhel70-perf-microbenchmarks"
depends_on: *_compile_amazon_linux2_arm64
tasks:
- name: genny_scale_InsertRemove
@@ -3205,7 +3626,7 @@ buildvariants:
- <<: *linux-microbenchmarks-standalone-arm
name: linux-microbenchmarks-standalone-all-feature-flags-arm.2023-01
display_name: MicroBenchmarks Arm Standalone inMemory (all feature flags).2023-01
- cron: "0 */4 * * *" # Every 4 hours starting at midnight
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
expansions:
<<: *standalone-arm-expansions
mongodb_setup: mongo-perf-standalone-all-feature-flags.2023-02
@@ -3242,8 +3663,7 @@ buildvariants:
- <<: *linux-microbenchmarks-standalone-arm
name: linux-microbenchmarks-standalone-classic-query-engine-arm.2023-01
display_name: MicroBenchmarks Arm Standalone inMemory (Classic Query Engine).2023-01
- # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799).
- cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ cron: "0 0 * * 4" # 00:00 on Thursday
expansions:
<<: *standalone-arm-expansions
mongodb_setup: mongo-perf-standalone-classic-query-engine.2023-02
@@ -3254,8 +3674,7 @@ buildvariants:
- <<: *linux-microbenchmarks-standalone-arm
name: linux-microbenchmarks-standalone-sbe-arm.2023-01
display_name: MicroBenchmarks Arm Standalone inMemory (SBE).2023-01
- # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799).
- cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ cron: "0 0 * * 4" # 00:00 on Thursday
expansions:
<<: *standalone-arm-expansions
mongodb_setup: mongo-perf-standalone-sbe.2023-02
@@ -3263,10 +3682,32 @@ buildvariants:
# Add tasks to the anchor that this variant references
# If diverging from that list, add the entire list of desired tasks here
+ - <<: *linux-microbenchmarks-standalone-arm
+ name: linux-microbenchmarks-standalone-sampling-bonsai-arm.2023-01
+ display_name: MicroBenchmarks Arm Standalone inMemory (Bonsai with Sampling CE).2023-01
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ expansions:
+ <<: *standalone-arm-expansions
+ mongodb_setup: mongo-perf-standalone-sampling-bonsai.2023-02
+ # yaml does not nicely merge arrays, so DO NOT ADD INDIVIDUAL TASKS HERE.
+ # Add tasks to the anchor that this variant references
+ # If diverging from that list, add the entire list of desired tasks here
+
+ - <<: *linux-microbenchmarks-standalone-arm
+ name: linux-microbenchmarks-standalone-heuristic-bonsai-arm.2023-01
+ display_name: MicroBenchmarks Arm Standalone inMemory (Bonsai with Heuristic CE).2023-01
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ expansions:
+ <<: *standalone-arm-expansions
+ mongodb_setup: mongo-perf-standalone-heuristic-bonsai.2023-02
+ # yaml does not nicely merge arrays, so DO NOT ADD INDIVIDUAL TASKS HERE.
+ # Add tasks to the anchor that this variant references
+ # If diverging from that list, add the entire list of desired tasks here
+
- <<: *linux-microbenchmarks-repl-arm
name: linux-microbenchmarks-repl-all-feature-flags-arm.2023-01
display_name: MicroBenchmarks Arm 1-Node ReplSet inMemory (all feature flags).2023-01
- cron: "0 */4 * * *" # Every 4 hours starting at midnight
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
expansions:
<<: *repl-arm-expansions
mongodb_setup: mongo-perf-replica-all-feature-flags.2023-02
@@ -3278,13 +3719,13 @@ buildvariants:
<<: *linux-microbenchmarks-standalone-arm
name: linux-microbenchmarks-standalone-intel.2023-01
display_name: MicroBenchmarks Intel Standalone inMemory.2023-01
- cron: "0 */12 * * *" # Every 12 hours starting at midnight
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
expansions: &standalone-intel-expansions
<<: *standalone-arm-expansions
infrastructure_provisioning: workload_client_mongod_combined_intel.2023-01
compile_variant: ""
run_on:
- - "rhel70-perf-single"
+ - "rhel70-perf-microbenchmarks"
depends_on: *_compile_amazon2
# yaml does not nicely merge arrays, so DO NOT ADD INDIVIDUAL TASKS HERE.
# Add tasks to the anchor that this variant references
@@ -3294,13 +3735,13 @@ buildvariants:
<<: *linux-microbenchmarks-repl-arm
name: linux-microbenchmarks-repl-intel.2023-01
display_name: MicroBenchmarks Intel 1-Node ReplSet inMemory.2023-01
- cron: "0 */12 * * *" # Every 12 hours starting at midnight
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
expansions: &repl-intel-expansions
<<: *repl-arm-expansions
infrastructure_provisioning: workload_client_mongod_combined_intel.2023-01
compile_variant: ""
run_on:
- - "rhel70-perf-single"
+ - "rhel70-perf-microbenchmarks"
depends_on: *_compile_amazon2
# yaml does not nicely merge arrays, so DO NOT ADD INDIVIDUAL TASKS HERE.
# Add tasks to the anchor that this variant references
@@ -3309,7 +3750,7 @@ buildvariants:
- <<: *linux-microbenchmarks-standalone-intel
name: linux-microbenchmarks-standalone-all-feature-flags.2023-01
display_name: MicroBenchmarks Intel Standalone inMemory (all feature flags).2023-01
- cron: "0 */12 * * *" # Every 12 hours starting at midnight
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
expansions:
<<: *standalone-intel-expansions
mongodb_setup: mongo-perf-standalone-all-feature-flags.2023-02
@@ -3321,8 +3762,7 @@ buildvariants:
- <<: *linux-microbenchmarks-standalone-intel
name: linux-microbenchmarks-standalone-classic-query-engine.2023-01
display_name: MicroBenchmarks Intel Standalone inMemory (Classic Query Engine).2023-01
- # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799).
- cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ cron: "0 0 * * 4" # 00:00 on Thursday
expansions:
<<: *standalone-intel-expansions
mongodb_setup: mongo-perf-standalone-classic-query-engine.2023-02
@@ -3333,8 +3773,7 @@ buildvariants:
- <<: *linux-microbenchmarks-standalone-intel
name: linux-microbenchmarks-standalone-sbe.2023-01
display_name: MicroBenchmarks Intel Standalone inMemory (SBE).2023-01
- # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799).
- cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ cron: "0 0 * * 4" # 00:00 on Thursday
expansions:
<<: *standalone-intel-expansions
mongodb_setup: mongo-perf-standalone-sbe.2023-02
@@ -3342,10 +3781,32 @@ buildvariants:
# Add tasks to the anchor that this variant references
# If diverging from that list, add the entire list of desired tasks here
+ - <<: *linux-microbenchmarks-standalone-intel
+ name: linux-microbenchmarks-standalone-sampling-bonsai.2023-01
+ display_name: MicroBenchmarks Intel Standalone inMemory (Bonsai with Sampling CE).2023-01
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ expansions:
+ <<: *standalone-intel-expansions
+ mongodb_setup: mongo-perf-standalone-sampling-bonsai.2023-02
+ # yaml does not nicely merge arrays, so DO NOT ADD INDIVIDUAL TASKS HERE.
+ # Add tasks to the anchor that this variant references
+ # If diverging from that list, add the entire list of desired tasks here
+
+ - <<: *linux-microbenchmarks-standalone-intel
+ name: linux-microbenchmarks-standalone-heuristic-bonsai.2023-01
+ display_name: MicroBenchmarks Intel Standalone inMemory (Bonsai with Heuristic CE).2023-01
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
+ expansions:
+ <<: *standalone-intel-expansions
+ mongodb_setup: mongo-perf-standalone-heuristic-bonsai.2023-02
+ # yaml does not nicely merge arrays, so DO NOT ADD INDIVIDUAL TASKS HERE.
+ # Add tasks to the anchor that this variant references
+ # If diverging from that list, add the entire list of desired tasks here
+
- <<: *linux-microbenchmarks-repl-intel
name: linux-microbenchmarks-repl-all-feature-flags.2023-01
display_name: MicroBenchmarks Intel 1-Node ReplSet inMemory (all feature flags).2023-01
- cron: "0 */12 * * *" # Every 12 hours starting at midnight
+ cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday.
expansions:
<<: *repl-intel-expansions
mongodb_setup: mongo-perf-replica-all-feature-flags.2023-02
diff --git a/etc/third_party_components.yml b/etc/third_party_components.yml
index 8ef3575924b6b..cf88f09c00176 100644
--- a/etc/third_party_components.yml
+++ b/etc/third_party_components.yml
@@ -103,6 +103,13 @@ components:
local_directory_path: src/third_party/SafeInt
team_owner: "Service Architecture"
+ derickr/timelib:
+ homepage_url: https://github.com/derickr/timelib
+ open_hub_url: N/A
+ release_monitoring_id: -1
+ local_directory_path: src/third_party/timelib
+ team_owner: "Query"
+
discover-python:
homepage_url: https://pypi.org/project/discover/
open_hub_url: N/A
@@ -248,13 +255,6 @@ components:
team_owner: "Query"
upgrade_suppression: TODO SERVER-64574
- "mpark-variant-devel":
- homepage_url: https://github.com/mpark/variant
- open_hub_url: N/A
- release_monitoring_id: 18301
- local_directory_path: src/third_party/variant-1.4.0
- team_owner: "Service Architecture"
-
nlohmann.json.decomposed:
homepage_url: https://github.com/nlohmann/json
open_hub_url: https://www.openhub.net/p/nlohmann_json
@@ -359,14 +359,6 @@ components:
is_test_only: true
team_owner: "Wiredtiger"
- timelib:
- homepage_url: https://github.com/derickr/timelib
- open_hub_url: N/A
- release_monitoring_id: -1
- local_directory_path: src/third_party/timelib
- team_owner: "Query"
- # Note: Not in Black Duck
-
unicode:
homepage_url: http://www.unicode.org
open_hub_url: N/A
diff --git a/etc/tsan.suppressions b/etc/tsan.suppressions
index b9fd2d6960385..323d4383a8a6c 100644
--- a/etc/tsan.suppressions
+++ b/etc/tsan.suppressions
@@ -17,7 +17,7 @@ race:tzset_internal
# that false positives are more likely, we're deferring them until we have
# fixed the ones we know are real.
# TODO: https://jira.mongodb.org/browse/SERVER-48599
-race:src/third_party/wiredtiger/*
+called_from_lib:libwiredtiger.so
# These functions call malloc() down the line while inside a signal handler.
# Since we've never had problems with any of the allocators we use, and since
diff --git a/evergreen/build_metric_cedar_report.py b/evergreen/build_metric_cedar_report.py
index 6c89e733ccb6f..25b1356df8c39 100644
--- a/evergreen/build_metric_cedar_report.py
+++ b/evergreen/build_metric_cedar_report.py
@@ -16,65 +16,82 @@
pull_cache_metrics_json = args.cache_pull_metrics
cedar_report = []
+
def single_metric_test(test_name, metric_name, value):
return {
- "info": {
- "test_name": test_name,
- },
- "metrics": [
- {
- "name": metric_name,
- "value": round(value, 2)
- },
- ]
+ "info": {"test_name": test_name, },
+ "metrics": [{"name": metric_name, "value": round(value, 2)}, ]
}
+
with open(clean_build_metrics_json) as f:
aggregated_build_tasks = {}
build_metrics = json.load(f)
for task in build_metrics['build_tasks']:
- outputs_key = ' '.join(task['outputs'])
- if outputs_key in aggregated_build_tasks:
- aggregated_build_tasks[outputs_key]['mem_usage'] += task['mem_usage']
- aggregated_build_tasks[outputs_key]['time'] += (task['end_time'] - task['start_time'])
- else:
- aggregated_build_tasks[outputs_key] = {
- 'mem_usage': task['mem_usage'],
- 'time': task['end_time'] - task['start_time'],
- }
+ if task['builder'] in [
+ 'SharedLibrary',
+ 'StaticLibrary',
+ 'Program',
+ 'Object',
+ 'SharedObject',
+ 'StaticObject',
+ ]:
+ outputs_key = ' '.join(task['outputs'])
+ if outputs_key in aggregated_build_tasks:
+ if aggregated_build_tasks[outputs_key]['mem_usage'] < task['mem_usage']:
+ aggregated_build_tasks[outputs_key]['mem_usage'] = task['mem_usage']
+ aggregated_build_tasks[outputs_key]['time'] += (
+ task['end_time'] - task['start_time'])
+ else:
+ aggregated_build_tasks[outputs_key] = {
+ 'mem_usage': task['mem_usage'],
+ 'time': task['end_time'] - task['start_time'],
+ }
for output_files in aggregated_build_tasks:
cedar_report.append({
- "info": {
- "test_name": output_files,
- },
- "metrics": [
+ "info": {"test_name": output_files, }, "metrics": [
{
- "name": "seconds",
- "value": round(aggregated_build_tasks[output_files]['time'] / (10.0**9.0), 2)
+ "name": "seconds", "value": round(
+ aggregated_build_tasks[output_files]['time'] / (10.0**9.0), 2)
},
{
- "name": "MBs",
- "value": round(aggregated_build_tasks[output_files]['mem_usage'] / 1024.0 / 1024.0, 2)
+ "name":
+ "MBs", "value":
+ round(
+ aggregated_build_tasks[output_files]['mem_usage'] / 1024.0 / 1024.0,
+ 2)
},
]
})
try:
- cedar_report.append(single_metric_test("SCons memory usage", "MBs", build_metrics['scons_metrics']['memory']['post_build'] / 1024.0 / 1024.0))
+ cedar_report.append(
+ single_metric_test(
+ "SCons memory usage", "MBs",
+ build_metrics['scons_metrics']['memory']['post_build'] / 1024.0 / 1024.0))
except KeyError:
if sys.platform == 'darwin':
# MacOS has known memory reporting issues, although this is not directly related to scons which does not use
# psutil for this case, I think both use underlying OS calls to determine the memory: https://github.com/giampaolo/psutil/issues/1908
pass
-
- cedar_report.append(single_metric_test("System Memory Peak", "MBs", build_metrics['system_memory']['max'] / 1024.0 / 1024.0))
- cedar_report.append(single_metric_test("Total Build time", "seconds", build_metrics['scons_metrics']['time']['total']))
- cedar_report.append(single_metric_test("Total Build output size", "MBs", build_metrics['artifact_metrics']['total_artifact_size'] / 1024.0 / 1024.0))
+
+ cedar_report.append(
+ single_metric_test("System Memory Peak", "MBs",
+ build_metrics['system_memory']['max'] / 1024.0 / 1024.0))
+ cedar_report.append(
+ single_metric_test("Total Build time", "seconds",
+ build_metrics['scons_metrics']['time']['total']))
+ cedar_report.append(
+ single_metric_test(
+ "Total Build output size", "MBs",
+ build_metrics['artifact_metrics']['total_artifact_size'] / 1024.0 / 1024.0))
try:
- cedar_report.append(single_metric_test("Transitive Libdeps Edges", "edges", build_metrics['libdeps_metrics']['TRANS_EDGE']))
+ cedar_report.append(
+ single_metric_test("Transitive Libdeps Edges", "edges",
+ build_metrics['libdeps_metrics']['TRANS_EDGE']))
except KeyError:
pass
@@ -87,20 +104,17 @@ def single_metric_test(test_name, metric_name, value):
break
if mongod_metrics and mongod_metrics.get('bin_metrics'):
- cedar_report.append(single_metric_test("Mongod debug info size", "MBs", mongod_metrics['bin_metrics']['debug']['filesize'] / 1024.0 / 1024.0))
+ cedar_report.append(
+ single_metric_test(
+ "Mongod debug info size", "MBs",
+ mongod_metrics['bin_metrics']['debug']['filesize'] / 1024.0 / 1024.0))
with open(populate_cache_metrics_json) as f:
build_metrics = json.load(f)
cedar_report.append({
- "info": {
- "test_name": "cache_push_time",
- },
- "metrics": [
- {
- "name": "seconds",
- "value": build_metrics["cache_metrics"]['push_time'] / (10.0**9.0)
- },
+ "info": {"test_name": "cache_push_time", }, "metrics": [
+ {"name": "seconds", "value": build_metrics["cache_metrics"]['push_time'] / (10.0**9.0)},
]
})
@@ -108,18 +122,12 @@ def single_metric_test(test_name, metric_name, value):
build_metrics = json.load(f)
cedar_report.append({
- "info": {
- "test_name": "cache_pull_time",
- },
- "metrics": [
- {
- "name": "seconds",
- "value": build_metrics["cache_metrics"]['pull_time'] / (10.0**9.0)
- },
+ "info": {"test_name": "cache_pull_time", }, "metrics": [
+ {"name": "seconds", "value": build_metrics["cache_metrics"]['pull_time'] / (10.0**9.0)},
]
})
+print(f"Generated Cedar Report with {len(cedar_report)} perf results.")
+
with open("build_metrics_cedar_report.json", "w") as fh:
json.dump(cedar_report, fh)
-
-
diff --git a/evergreen/compiled_binaries_get.sh b/evergreen/compiled_binaries_get.sh
index 6c0f8d751467a..0a03489eafd9b 100755
--- a/evergreen/compiled_binaries_get.sh
+++ b/evergreen/compiled_binaries_get.sh
@@ -8,7 +8,7 @@ set -o verbose
# activate_venv will make sure we are using python 3
activate_venv
-setup_db_contrib_tool_venv
+setup_db_contrib_tool
rm -rf /data/install /data/multiversion
@@ -16,26 +16,6 @@ edition="${multiversion_edition}"
platform="${multiversion_platform}"
architecture="${multiversion_architecture}"
-if [ ! -z "${multiversion_edition_42_or_later}" ]; then
- edition="${multiversion_edition_42_or_later}"
-fi
-if [ ! -z "${multiversion_platform_42_or_later}" ]; then
- platform="${multiversion_platform_42_or_later}"
-fi
-if [ ! -z "${multiversion_architecture_42_or_later}" ]; then
- architecture="${multiversion_architecture_42_or_later}"
-fi
-
-if [ ! -z "${multiversion_edition_44_or_later}" ]; then
- edition="${multiversion_edition_44_or_later}"
-fi
-if [ ! -z "${multiversion_platform_44_or_later}" ]; then
- platform="${multiversion_platform_44_or_later}"
-fi
-if [ ! -z "${multiversion_architecture_44_or_later}" ]; then
- architecture="${multiversion_architecture_44_or_later}"
-fi
-
version=${project#mongodb-mongo-}
version=${version#v}
diff --git a/evergreen/external_auth_oidc_setup.sh b/evergreen/external_auth_oidc_setup.sh
old mode 100644
new mode 100755
index c082304a02230..c49e7caaa41ec
--- a/evergreen/external_auth_oidc_setup.sh
+++ b/evergreen/external_auth_oidc_setup.sh
@@ -8,6 +8,9 @@ set -o errexit
# Should output contents to new file in home directory.
cat << EOF > $HOME/oidc_e2e_setup.json
{
+ "tD548GwE1@outlook.com" : "${oidc_azure_test_user_account_one_secret}",
+ "tD548GwE2@outlook.com" : "${oidc_azure_test_user_account_two_secret}",
+ "tD548GwE3@outlook.com" : "${oidc_azure_test_user_account_three_secret}",
"testserversecurityone@okta-test.com" : "${oidc_okta_test_user_account_one_secret}",
"testserversecuritytwo@okta-test.com" : "${oidc_okta_test_user_account_two_secret}",
"testserversecuritythree@okta-test.com" : "${oidc_okta_test_user_account_three_secret}"
diff --git a/evergreen/failed_unittests_gather.sh b/evergreen/failed_unittests_gather.sh
index 7a3ff6b7a0f9e..72668fe55c35f 100644
--- a/evergreen/failed_unittests_gather.sh
+++ b/evergreen/failed_unittests_gather.sh
@@ -6,7 +6,9 @@ cd src
set -eou pipefail
# Only run on unit test tasks so we don't target mongod binaries from cores.
-if [ "${task_name}" != "run_unittests" ] && [ "${task_name}" != "run_dbtest" ] && [ "${task_name}" != "run_unittests_with_recording" ]; then
+if [ "${task_name}" != "run_unittests" ] && [ "${task_name}" != "run_dbtest" ] \
+ && [ "${task_name}" != "run_unittests_with_recording" ] \
+ && [[ ${task_name} != integration_tests* ]]; then
exit 0
fi
@@ -15,7 +17,7 @@ mkdir -p $unittest_bin_dir || true
# Find all core files
core_files=$(/usr/bin/find -H . \( -name "dump_*.core" -o -name "*.mdmp" \) 2> /dev/null)
-for core_file in $core_files; do
+while read -r core_file; do
# A core file name does not always have the executable name that generated it.
# See http://stackoverflow.com/questions/34801353/core-dump-filename-gets-thread-name-instead-of-executable-name-with-core-pattern
# On platforms with GDB, we get the binary name from core file
@@ -63,7 +65,7 @@ for core_file in $core_files; do
fi
done
-done
+done <<< "${core_files}"
# For recorded tests, use the text file to copy them over instead of relying on core dumps.
has_recorded_failures=""
diff --git a/evergreen/functions/added_and_modified_patch_files_get.sh b/evergreen/functions/added_and_modified_patch_files_get.sh
deleted file mode 100755
index 608b53787efdf..0000000000000
--- a/evergreen/functions/added_and_modified_patch_files_get.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
-. "$DIR/../prelude.sh"
-
-cd src
-
-set -o verbose
-set -o errexit
-
-git diff --name-only origin/${branch_name}... --line-prefix="${workdir}/src/" --diff-filter=d >> modified_and_created_patch_files.txt
-if [ -d src/mongo/db/modules/enterprise ]; then
- pushd src/mongo/db/modules/enterprise
- git diff HEAD --name-only --line-prefix="${workdir}/src/src/mongo/db/modules/enterprise/" --diff-filter=d >> ~1/modified_and_created_patch_files.txt
- popd
-fi
diff --git a/evergreen/functions/binaries_extract.py b/evergreen/functions/binaries_extract.py
new file mode 100644
index 0000000000000..356c9f15668d2
--- /dev/null
+++ b/evergreen/functions/binaries_extract.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+#
+# Copyright 2020 MongoDB Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+import argparse
+import subprocess
+import os
+import sys
+import pathlib
+import shutil
+import glob
+
+parser = argparse.ArgumentParser()
+
+parser.add_argument('--change-dir', type=str, action='store',
+ help="The directory to change into to perform the extraction.")
+parser.add_argument('--extraction-command', type=str, action='store',
+ help="The command to use for the extraction.")
+parser.add_argument('--tarball', type=str, action='store',
+ help="The tarball to perform the extraction on.")
+parser.add_argument(
+ '--move-output', type=str, action='append', help=
+ "Move an extracted entry to a new location after extraction. Format is colon separated, e.g. '--move-output=file/to/move:path/to/destination'. Can accept glob like wildcards."
+)
+args = parser.parse_args()
+
+if args.change_dir:
+ working_dir = pathlib.Path(args.change_dir).as_posix()
+ tarball = pathlib.Path(args.tarball).resolve().as_posix()
+ print(f"Switching to {working_dir} to perform the extraction in.")
+ os.makedirs(working_dir, exist_ok=True)
+else:
+ working_dir = None
+ tarball = pathlib.Path(args.tarball).as_posix()
+
+shell = os.environ.get('SHELL', '/bin/bash')
+
+if sys.platform == 'win32':
+ proc = subprocess.run(['C:/cygwin/bin/cygpath.exe', '-w', shell], text=True,
+ capture_output=True)
+ bash = pathlib.Path(proc.stdout.strip())
+ cmd = [bash.as_posix(), '-c', f"{args.extraction_command} {tarball}"]
+else:
+ cmd = [shell, '-c', f"{args.extraction_command} {tarball}"]
+
+print(f"Extracting: {' '.join(cmd)}")
+proc = subprocess.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ cwd=working_dir)
+
+print(proc.stdout)
+
+if args.move_output:
+ for arg in args.move_output:
+ try:
+ src, dst = arg.split(':')
+ print(f"Moving {src} to {dst}...")
+ files_to_move = glob.glob(src, recursive=True)
+ for file in files_to_move:
+ result_dst = shutil.move(file, dst)
+ print(f"Moved {file} to {result_dst}")
+ except ValueError as exc:
+ print(f"Bad format, needs to be glob like paths in the from 'src:dst', got: {arg}")
+ raise exc
+
+sys.exit(proc.returncode)
diff --git a/evergreen/functions/binaries_extract.sh b/evergreen/functions/binaries_extract.sh
deleted file mode 100755
index ec7b8dd3f8f06..0000000000000
--- a/evergreen/functions/binaries_extract.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
-. "$DIR/../prelude.sh"
-
-cd src
-
-set -o errexit
-${decompress} mongo-binaries.tgz
diff --git a/evergreen/functions/task_timeout_determine.sh b/evergreen/functions/task_timeout_determine.sh
index 49dda4dd4a992..b307586792db2 100644
--- a/evergreen/functions/task_timeout_determine.sh
+++ b/evergreen/functions/task_timeout_determine.sh
@@ -31,8 +31,15 @@ else
evg_alias="evg-alias-absent"
fi
+resmoke_test_flags=""
+if [[ -n "${test_flags}" ]]; then
+ resmoke_test_flags="--test-flags='${test_flags}'"
+fi
+
activate_venv
-PATH=$PATH:$HOME:/ $python buildscripts/evergreen_task_timeout.py $timeout_factor \
+PATH=$PATH:$HOME:/ eval $python buildscripts/evergreen_task_timeout.py \
+ $timeout_factor \
+ $resmoke_test_flags \
--install-dir "${install_dir}" \
--task-name ${task_name} \
--suite-name ${suite_name} \
diff --git a/evergreen/garasign_gpg_crypt_sign.sh b/evergreen/garasign_gpg_crypt_sign.sh
new file mode 100644
index 0000000000000..378e63b138541
--- /dev/null
+++ b/evergreen/garasign_gpg_crypt_sign.sh
@@ -0,0 +1,31 @@
+DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
+. "$DIR/prelude.sh"
+
+cd src
+
+set -o errexit
+set -o verbose
+
+ext="${ext:-tgz}"
+
+crypt_file_name=mongo_crypt_shared_v1-${push_name}-${push_arch}-${suffix}.${ext}
+mv "mongo_crypt_shared_v1.$ext" $crypt_file_name
+
+# generating checksums
+shasum -a 1 $crypt_file_name | tee $crypt_file_name.sha1
+shasum -a 256 $crypt_file_name | tee $crypt_file_name.sha256
+md5sum $crypt_file_name | tee $crypt_file_name.md5
+
+# signing crypt linux artifact with gpg
+cat << EOF >> gpg_signing_commands.sh
+gpgloader # loading gpg keys.
+gpg --yes -v --armor -o $crypt_file_name.sig --detach-sign $crypt_file_name
+EOF
+
+podman run \
+ -e GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username_70} \
+ -e GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password_70} \
+ --rm \
+ -v $(pwd):$(pwd) -w $(pwd) \
+ ${garasign_gpg_image} \
+ /bin/bash -c "$(cat ./gpg_signing_commands.sh)"
diff --git a/evergreen/garasign_gpg_sign.sh b/evergreen/garasign_gpg_sign.sh
index a75a6042118da..27e7f7916f35b 100644
--- a/evergreen/garasign_gpg_sign.sh
+++ b/evergreen/garasign_gpg_sign.sh
@@ -55,8 +55,8 @@ sign mongodb-cryptd-$push_name-$push_arch-$suffix.$ext
EOF
podman run \
- -e GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username} \
- -e GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password} \
+ -e GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username_70} \
+ -e GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password_70} \
--rm \
-v $(pwd):$(pwd) -w $(pwd) \
${garasign_gpg_image} \
diff --git a/evergreen/garasign_jsign_sign.sh b/evergreen/garasign_jsign_sign.sh
index 3ae3176cae9a8..474a517631849 100644
--- a/evergreen/garasign_jsign_sign.sh
+++ b/evergreen/garasign_jsign_sign.sh
@@ -9,15 +9,6 @@ cd src
msi_filename=mongodb-${push_name}-${push_arch}-${suffix}.msi
/usr/bin/find build/ -type f | grep msi$ | xargs -I original_filename cp original_filename $msi_filename || true
-# generating checksums
-if [ -e $msi_filename ]; then
- shasum -a 1 $msi_filename | tee $msi_filename.sha1
- shasum -a 256 $msi_filename | tee $msi_filename.sha256
- md5sum $msi_filename | tee $msi_filename.md5
-else
- echo "$msi_filename does not exist. Skipping checksum generation"
-fi
-
# signing windows artifacts with jsign
cat << 'EOF' > jsign_signing_commands.sh
function sign(){
@@ -40,3 +31,12 @@ podman run \
-v $(pwd):$(pwd) -w $(pwd) \
${garasign_jsign_image} \
/bin/bash -c "$(cat ./jsign_signing_commands.sh)"
+
+# generating checksums
+if [ -e $msi_filename ]; then
+ shasum -a 1 $msi_filename | tee $msi_filename.sha1
+ shasum -a 256 $msi_filename | tee $msi_filename.sha256
+ md5sum $msi_filename | tee $msi_filename.md5
+else
+ echo "$msi_filename does not exist. Skipping checksum generation"
+fi
diff --git a/evergreen/generate_buildid_debug_symbols_mapping.sh b/evergreen/generate_buildid_debug_symbols_mapping.sh
index d1866e961a555..142614ce821bd 100644
--- a/evergreen/generate_buildid_debug_symbols_mapping.sh
+++ b/evergreen/generate_buildid_debug_symbols_mapping.sh
@@ -6,10 +6,16 @@ cd src
set -o errexit
set -o verbose
+is_san_variant_arg=""
+if [[ -n "${san_options}" ]]; then
+ is_san_variant_arg="--is-san-variant"
+fi
+
activate_venv
$python buildscripts/debugsymb_mapper.py \
--version "${version_id}" \
--client-id "${symbolizer_client_id}" \
--client-secret "${symbolizer_client_secret}" \
- --variant "${build_variant}"
+ --variant "${build_variant}" \
+ $is_san_variant_arg
diff --git a/evergreen/generate_version_burn_in.sh b/evergreen/generate_version_burn_in.sh
index b7d2ff2471463..b92b9fcfd16d7 100644
--- a/evergreen/generate_version_burn_in.sh
+++ b/evergreen/generate_version_burn_in.sh
@@ -8,6 +8,7 @@ set -o verbose
setup_mongo_task_generator
activate_venv
+$python buildscripts/burn_in_tests.py generate-test-membership-map-file-for-ci
PATH=$PATH:$HOME:/ ./mongo-task-generator \
--expansion-file ../expansions.yml \
--evg-auth-file ./.evergreen.yml \
diff --git a/evergreen/jepsen_docker/list-append.sh b/evergreen/jepsen_docker/list-append.sh
index 4378b1bcadc94..2204a6a2e9b24 100644
--- a/evergreen/jepsen_docker/list-append.sh
+++ b/evergreen/jepsen_docker/list-append.sh
@@ -25,7 +25,6 @@ cd src
activate_venv
$python buildscripts/jepsen_report.py --start_time=$start_time --end_time=$end_time --elapsed=$elapsed_secs --emit_status_files --store ./jepsen-mongodb jepsen-mongodb/jepsen_${task_name}_${execution}.log
exit_code=$?
-cat report.json
if [ -f "jepsen_system_fail.txt" ]; then
mv jepsen_system_fail.txt jepsen-mongodb/jepsen_system_failure_${task_name}_${execution}
diff --git a/evergreen/lint_fuzzer_sanity_patch.py b/evergreen/lint_fuzzer_sanity_patch.py
new file mode 100644
index 0000000000000..3047ec0eb99df
--- /dev/null
+++ b/evergreen/lint_fuzzer_sanity_patch.py
@@ -0,0 +1,96 @@
+import os
+import sys
+import shutil
+import subprocess
+import glob
+from concurrent import futures
+from pathlib import Path
+import time
+from typing import List, Tuple
+
+# Get relative imports to work when the package is not installed on the PYTHONPATH.
+if __name__ == "__main__" and __package__ is None:
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
+
+# pylint: disable=wrong-import-position
+from buildscripts.linter.filediff import gather_changed_files_for_lint
+from buildscripts import simple_report
+
+# pylint: enable=wrong-import-position
+
+
+def is_js_file(filename: str) -> bool:
+ # return True
+ return (filename.startswith("jstests") or filename.startswith("src/mongo/db/modules/enterprise/jstests")) and filename.endswith(".js")
+
+
+diffed_files = [Path(f) for f in gather_changed_files_for_lint(is_js_file)]
+num_changed_files = len(diffed_files)
+
+if num_changed_files == 0:
+ print("No js files had changes in them. Exiting.")
+ sys.exit(0)
+
+INPUT_DIR = "jstestfuzzinput"
+OUTPUT_DIR = "jstestfuzzoutput"
+os.makedirs(INPUT_DIR, exist_ok=True)
+os.makedirs(OUTPUT_DIR, exist_ok=True)
+
+for file in diffed_files:
+ copy_dest = INPUT_DIR / file
+ os.makedirs(copy_dest.parent, exist_ok=True)
+ shutil.copy(file, copy_dest)
+
+OUTPUT_FULL_DIR = Path(os.getcwd()) / OUTPUT_DIR
+INPUT_FULL_DIR = Path(os.getcwd()) / INPUT_DIR
+
+subprocess.run([
+ "./src/scripts/npm_run.sh", "jstestfuzz", "--", "--jsTestsDir", INPUT_FULL_DIR, "--out",
+ OUTPUT_FULL_DIR, "--numSourceFiles",
+ str(min(num_changed_files, 250)), "--numGeneratedFiles", "250"
+], check=True, cwd="jstestfuzz")
+
+
+def _parse_jsfile(jsfile: Path) -> simple_report.Result:
+ """
+ Takes in a path to be attempted to parse
+ Returns what should be added to the report given to evergreen
+ """
+ print(f"Trying to parse jsfile {jsfile}")
+ start_time = time.time()
+ proc = subprocess.run(["./src/scripts/npm_run.sh", "parse-jsfiles", "--",
+ str(jsfile)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ cwd="jstestfuzz")
+ end_time = time.time()
+ status = "pass" if proc.returncode == 0 else "fail"
+ npm_run_output = proc.stdout.decode("UTF-8")
+ if proc.returncode == 0:
+ print(f"Successfully to parsed jsfile {jsfile}")
+ else:
+ print(f"Failed to parsed jsfile {jsfile}")
+ print(npm_run_output)
+ return simple_report.Result(status=status, exit_code=proc.returncode, start=start_time,
+ end=end_time, test_file=jsfile.name, log_raw=npm_run_output)
+
+
+report = simple_report.Report(failures=0, results=[])
+
+with futures.ThreadPoolExecutor() as executor:
+ parse_jsfiles_futures = [
+ executor.submit(_parse_jsfile, Path(jsfile))
+ for jsfile in glob.iglob(str(OUTPUT_FULL_DIR / "**"), recursive=True)
+ if os.path.isfile(jsfile)
+ ]
+
+ for future in futures.as_completed(parse_jsfiles_futures):
+ result = future.result()
+ report["results"].append(result)
+ report["failures"] += 1 if result["exit_code"] != 0 else 0
+
+simple_report.put_report(report)
+if report["failures"] > 0:
+ print("Had at least one failure, exiting with 1")
+ sys.exit(1)
+
+print("No failures, exiting success")
+sys.exit(0)
diff --git a/evergreen/lint_fuzzer_sanity_patch.sh b/evergreen/lint_fuzzer_sanity_patch.sh
deleted file mode 100644
index 85a387c66317a..0000000000000
--- a/evergreen/lint_fuzzer_sanity_patch.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
-. "$DIR/prelude.sh"
-
-cd src
-
-set -o pipefail
-set -o verbose
-
-activate_venv
-
-mkdir -p jstestfuzzinput jstestfuzzoutput
-
-# We need to be the jstestfuzz repo for node to install/run
-cd jstestfuzz
-
-indir="$(pwd)/../jstestfuzzinput"
-outdir="$(pwd)/../jstestfuzzoutput"
-
-# Grep all the js files from modified_and_created_patch_files.txt and put them into $indir.
-(grep -v "\.tpl\.js$" ../modified_and_created_patch_files.txt | grep ".*jstests/.*\.js$" | xargs -I {} cp {} $indir || true)
-
-# Count the number of files in $indir.
-if [[ "$(ls -A $indir)" ]]; then
- num_files=$(ls -A $indir | wc -l)
-
- # Only fetch 50 files to generate jsfuzz testing files.
- if [[ $num_files -gt 50 ]]; then
- num_files=50
- fi
-
- ./src/scripts/npm_run.sh jstestfuzz -- --jsTestsDir $indir --out $outdir --numSourceFiles $num_files --numGeneratedFiles 50
-
- # Run parse-jsfiles on 50 files at a time with 32 processes in parallel.
- ls -1 -d $outdir/* | xargs -P 32 -L 50 ./src/scripts/npm_run.sh parse-jsfiles -- 2>&1 | tee lint_fuzzer_sanity.log
- exit_code=$?
-
- # Exit out of the jstestfuzz directory
- cd ..
- $python ./buildscripts/simple_report.py --test-name lint_fuzzer_sanity_patch --log-file jstestfuzz/lint_fuzzer_sanity.log --exit-code $exit_code
-fi
diff --git a/evergreen/multiversion_setup.sh b/evergreen/multiversion_setup.sh
index 4690f713457f0..aab4e86c7bc6e 100644
--- a/evergreen/multiversion_setup.sh
+++ b/evergreen/multiversion_setup.sh
@@ -7,8 +7,7 @@ set -o errexit
set -o verbose
activate_venv
-
-setup_db_contrib_tool_venv
+setup_db_contrib_tool
export PIPX_HOME="${workdir}/pipx"
export PIPX_BIN_DIR="${workdir}/pipx/bin"
@@ -20,49 +19,6 @@ edition="${multiversion_edition}"
platform="${multiversion_platform}"
architecture="${multiversion_architecture}"
-# The platform and architecture for how some of the binaries are reported in
-# https://downloads.mongodb.org/full.json changed between MongoDB 4.0 and MongoDB 4.2.
-# Certain build variants define additional multiversion_*_42_or_later expansions in order to
-# be able to fetch a complete set of versions.
-
-if [ ! -z "${multiversion_edition_42_or_later}" ]; then
- edition="${multiversion_edition_42_or_later}"
-fi
-
-if [ ! -z "${multiversion_platform_42_or_later}" ]; then
- platform="${multiversion_platform_42_or_later}"
-fi
-
-if [ ! -z "${multiversion_architecture_42_or_later}" ]; then
- architecture="${multiversion_architecture_42_or_later}"
-fi
-
-db-contrib-tool setup-repro-env \
- --installDir /data/install \
- --linkDir /data/multiversion \
- --edition $edition \
- --platform $platform \
- --architecture $architecture \
- --debug \
- 4.2
-
-# The platform and architecture for how some of the binaries are reported in
-# https://downloads.mongodb.org/full.json changed between MongoDB 4.2 and MongoDB 4.4.
-# Certain build variants define additional multiversion_*_44_or_later expansions in order to
-# be able to fetch a complete set of versions.
-
-if [ ! -z "${multiversion_edition_44_or_later}" ]; then
- edition="${multiversion_edition_44_or_later}"
-fi
-
-if [ ! -z "${multiversion_platform_44_or_later}" ]; then
- platform="${multiversion_platform_44_or_later}"
-fi
-
-if [ ! -z "${multiversion_architecture_44_or_later}" ]; then
- architecture="${multiversion_architecture_44_or_later}"
-fi
-
last_lts_arg="--installLastLTS"
last_continuous_arg="--installLastContinuous"
@@ -74,14 +30,23 @@ if [[ -n "${last_continuous_evg_version_id}" ]]; then
last_continuous_arg="${last_continuous_evg_version_id}"
fi
-db-contrib-tool setup-repro-env \
- --installDir /data/install \
+base_command="db-contrib-tool setup-repro-env"
+evergreen_args="--installDir /data/install \
--linkDir /data/multiversion \
- --edition $edition \
--platform $platform \
- --architecture $architecture \
+ --architecture $architecture"
+local_args="--edition $edition \
--fallbackToMaster \
- --resmokeCmd "python buildscripts/resmoke.py" \
+ --resmokeCmd \"python buildscripts/resmoke.py\" \
--debug \
- $last_lts_arg \
- $last_continuous_arg 4.4 5.0
+ ${last_lts_arg} \
+ ${last_continuous_arg} 4.4 5.0 6.0"
+
+remote_invocation="${base_command} ${evergreen_args} ${local_args}"
+eval "${remote_invocation}"
+echo "Verbatim db-contrib-tool invocation: ${remote_invocation}"
+
+local_invocation="${base_command} ${local_args}"
+echo "Local db-contrib-tool invocation: ${local_invocation}"
+
+echo "${local_invocation}" > local-db-contrib-tool-invocation.txt
diff --git a/evergreen/notary_client_crypt_run.sh b/evergreen/notary_client_crypt_run.sh
deleted file mode 100644
index 4d73c3fed988f..0000000000000
--- a/evergreen/notary_client_crypt_run.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
-. "$DIR/prelude.sh"
-
-cd src
-
-. ./notary_env.sh
-
-set -o errexit
-set -o verbose
-
-ext="${ext:-tgz}"
-
-mv "mongo_crypt_shared_v1.$ext" mongo_crypt_shared_v1-${push_name}-${push_arch}-${suffix}.${ext}
-
-/usr/local/bin/notary-client.py \
- --key-name "server-7.0" \
- --auth-token-file ${workdir}/src/signing_auth_token \
- --comment "Evergreen Automatic Signing ${revision} - ${build_variant} - ${branch_name}" \
- --notary-url http://notary-service.build.10gen.cc:5000 \
- mongo_crypt_shared_v1-${push_name}-${push_arch}-${suffix}.${ext}
diff --git a/evergreen/notary_client_run.sh b/evergreen/notary_client_run.sh
deleted file mode 100644
index 41173e36fb1b6..0000000000000
--- a/evergreen/notary_client_run.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
-. "$DIR/prelude.sh"
-
-cd src
-
-. ./notary_env.sh
-
-set -o errexit
-set -o verbose
-
-long_ext=${ext}
-if [ "$long_ext" == "tgz" ]; then
- long_ext="tar.gz"
-fi
-
-mv mongo-binaries.tgz mongodb-${push_name}-${push_arch}-${suffix}.${ext}
-mv mongo-cryptd.tgz mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext} || true
-mv mh.tgz mh-${push_name}-${push_arch}-${suffix}.${ext} || true
-mv mongo-debugsymbols.tgz mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext} || true
-mv distsrc.${ext} mongodb-src-${src_suffix}.${long_ext} || true
-/usr/bin/find build/ -type f | grep msi$ | xargs -I original_filename cp original_filename mongodb-${push_name}-${push_arch}-${suffix}.msi || true
-
-/usr/local/bin/notary-client.py --key-name "server-7.0" --auth-token-file ${workdir}/src/signing_auth_token --comment "Evergreen Automatic Signing ${revision} - ${build_variant} - ${branch_name}" --notary-url http://notary-service.build.10gen.cc:5000 --skip-missing mongodb-${push_name}-${push_arch}-${suffix}.${ext} mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext} mongodb-${push_name}-${push_arch}-${suffix}.msi mongodb-src-${src_suffix}.${long_ext} mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext}
diff --git a/evergreen/packages_publish.sh b/evergreen/packages_publish.sh
index 960bc3e0a9246..60686adcb0710 100644
--- a/evergreen/packages_publish.sh
+++ b/evergreen/packages_publish.sh
@@ -1,12 +1,24 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
+set -o verbose
+
+packagesfile=packages.tgz
+
+curl https://s3.amazonaws.com/mciuploads/${project}/${build_variant}/${revision}/artifacts/${build_id}-packages.tgz >> $packagesfile
+
+podman run \
+ -v $(pwd):$(pwd) \
+ -w $(pwd) \
+ --env-host \
+ ${UPLOAD_LOCK_IMAGE} \
+ -key=${version_id}/${build_id}/packages/${packagesfile} -tag=task-id=${EVERGREEN_TASK_ID} ${packagesfile}
+
cd src
. ./notary_env.sh
set -o errexit
-set -o verbose
CURATOR_RELEASE=${curator_release}
curl -L -O http://boxes.10gen.com/build/curator/curator-dist-rhel70-$CURATOR_RELEASE.tar.gz
diff --git a/evergreen/prelude_db_contrib_tool.sh b/evergreen/prelude_db_contrib_tool.sh
index 8e9ad1a3995dd..40e0e3458b2fc 100644
--- a/evergreen/prelude_db_contrib_tool.sh
+++ b/evergreen/prelude_db_contrib_tool.sh
@@ -1,11 +1,13 @@
-function setup_db_contrib_tool_venv {
+function setup_db_contrib_tool {
- mkdir ${workdir}/pipx
+ mkdir -p ${workdir}/pipx
export PIPX_HOME="${workdir}/pipx"
export PIPX_BIN_DIR="${workdir}/pipx/bin"
export PATH="$PATH:$PIPX_BIN_DIR"
python -m pip --disable-pip-version-check install "pip==21.0.1" "wheel==0.37.0" || exit 1
- python -m pip --disable-pip-version-check install "pipx" || exit 1
- pipx install "db-contrib-tool==0.6.0" || exit 1
+ # We force reinstall here because when we download the previous venv the shebang
+ # in pipx still points to the old machines python location.
+ python -m pip --disable-pip-version-check install --force-reinstall --no-deps "pipx==1.2.0" || exit 1
+ pipx install "db-contrib-tool==0.6.5" || exit 1
}
diff --git a/evergreen/prelude_mongo_task_generator.sh b/evergreen/prelude_mongo_task_generator.sh
index 94c363f4e4c4a..bda9892144637 100644
--- a/evergreen/prelude_mongo_task_generator.sh
+++ b/evergreen/prelude_mongo_task_generator.sh
@@ -1,6 +1,6 @@
function setup_mongo_task_generator {
if [ ! -f mongo-task-generator ]; then
- curl -L https://github.com/mongodb/mongo-task-generator/releases/download/v0.7.3/mongo-task-generator --output mongo-task-generator
+ curl -L https://github.com/mongodb/mongo-task-generator/releases/download/v0.7.8/mongo-task-generator --output mongo-task-generator
chmod +x mongo-task-generator
fi
}
diff --git a/evergreen/prelude_venv.sh b/evergreen/prelude_venv.sh
index 6b97a8a9cfb75..a4e9318375e83 100644
--- a/evergreen/prelude_venv.sh
+++ b/evergreen/prelude_venv.sh
@@ -22,8 +22,24 @@ function activate_venv {
if [ "Windows_NT" = "$OS" ]; then
export PYTHONPATH="$PYTHONPATH;$(cygpath -w ${workdir}/src)"
+ elif [ "$(uname)" = "Darwin" ]; then
+ #SERVER-75626 After activating the virtual environment under the mocos host. the PYTHONPATH setting
+ #is incorrect, and the site-packages directory of the virtual environment cannot be found in the sys.path.
+ python_version=$($python -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')
+ export PYTHONPATH="${workdir}/venv/lib/python${python_version}/site-packages:${PYTHONPATH}:${workdir}/src"
else
- export PYTHONPATH="$PYTHONPATH:${workdir}/src"
+ python_version=$($python -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')
+ site_packages="${workdir}/venv/lib/python${python_version}/site-packages"
+ python -c "import sys; print(sys.path)"
+
+ # Check if site_packages is already in sys.path
+ in_sys_path=$($python -c "import sys; print('$site_packages' in sys.path)")
+ if [ "$in_sys_path" = "False" ]; then
+ export PYTHONPATH="${site_packages}:${PYTHONPATH}:${workdir}/src"
+ else
+ export PYTHONPATH="$PYTHONPATH:${workdir}/src"
+ fi
+ python -c "import sys; print(sys.path)"
fi
echo "python set to $(which $python)"
diff --git a/evergreen/resmoke_tests_execute.sh b/evergreen/resmoke_tests_execute.sh
index 37dc3773e7175..6a812a9e32e61 100644
--- a/evergreen/resmoke_tests_execute.sh
+++ b/evergreen/resmoke_tests_execute.sh
@@ -14,6 +14,11 @@ if [[ ${disable_unit_tests} = "false" && ! -f ${skip_tests} ]]; then
# activate the virtualenv if it has been set up
activate_venv
+ # Install db-contrib-tool to symbolize crashes during resmoke suite runs
+ # This is not supported on Windows and MacOS, so doing it only on Linux
+ if [ "$(uname)" == "Linux" ]; then
+ setup_db_contrib_tool
+ fi
if [[ -f "patch_test_tags.tgz" ]]; then
tags_build_variant="${build_variant}"
diff --git a/evergreen/run_python_script.sh b/evergreen/run_python_script.sh
index 35181dec3c1be..746c229e9d8ad 100644
--- a/evergreen/run_python_script.sh
+++ b/evergreen/run_python_script.sh
@@ -8,4 +8,5 @@ set -o verbose
cd src
activate_venv
-$python $@
+echo $python $@
+$python "$@"
diff --git a/evergreen/run_upload_lock_push.sh b/evergreen/run_upload_lock_push.sh
new file mode 100755
index 0000000000000..7b69d921e0980
--- /dev/null
+++ b/evergreen/run_upload_lock_push.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+
+# in the future we will want to errexit, but only once we remove
+# continue_on_err from the command
+
+# executables and source archive are always expected on every build
+# source archives should be fine to be uploaded by whichever variant gets
+# there first
+declare -A ARTIFACTS=(
+ [${SERVER_TARBALL_PATH}]=${SERVER_TARBALL_KEY}
+ [${SOURCE_TARBALL_PATH}]=${SOURCE_TARBALL_KEY}
+ [${SERVER_TARBALL_SIGNATURE_PATH}]=${SERVER_TARBALL_SIGNATURE_KEY}
+ [${SOURCE_TARBALL_SIGNATURE_PATH}]=${SOURCE_TARBALL_SIGNATURE_KEY}
+ [${SERVER_TARBALL_SHA1_PATH}]=${SERVER_TARBALL_SHA1_KEY}
+ [${SOURCE_TARBALL_SHA1_PATH}]=${SOURCE_TARBALL_SHA1_KEY}
+ [${SERVER_TARBALL_SHA256_PATH}]=${SERVER_TARBALL_SHA256_KEY}
+ [${SOURCE_TARBALL_SHA256_PATH}]=${SOURCE_TARBALL_SHA256_KEY}
+ [${SERVER_TARBALL_MD5_PATH}]=${SERVER_TARBALL_MD5_KEY}
+ [${SOURCE_TARBALL_MD5_PATH}]=${SOURCE_TARBALL_MD5_KEY}
+)
+
+# mongocryptd is only built for enterprise variants
+if [ -f "${CRYPTD_TARBALL_PATH}" ]; then
+ ARTIFACTS[${CRYPTD_TARBALL_PATH}]=${CRYPTD_TARBALL_KEY}
+ ARTIFACTS[${CRYPTD_TARBALL_SIGNATURE_PATH}]=${CRYPTD_TARBALL_SIGNATURE_KEY}
+ ARTIFACTS[${CRYPTD_TARBALL_SHA1_PATH}]=${CRYPTD_TARBALL_SHA1_KEY}
+ ARTIFACTS[${CRYPTD_TARBALL_SHA256_PATH}]=${CRYPTD_TARBALL_SHA256_KEY}
+ ARTIFACTS[${CRYPTD_TARBALL_MD5_PATH}]=${CRYPTD_TARBALL_MD5_KEY}
+fi
+
+# mongohouse only built sometimes
+# we do not sign mongohouse, so no detached signature and no checksums
+if [ -f "${MONGOHOUSE_TARBALL_PATH}" ]; then
+ ARTIFACTS[${MONGOHOUSE_TARBALL_PATH}]=${MONGOHOUSE_TARBALL_KEY}
+fi
+
+# debug symbols are only built sometimes
+# not clear which variants that is the case for
+if [ -f "${DEBUG_SYMBOLS_TARBALL_PATH}" ]; then
+ ARTIFACTS[${DEBUG_SYMBOLS_TARBALL_PATH}]=${DEBUG_SYMBOLS_TARBALL_KEY}
+ ARTIFACTS[${DEBUG_SYMBOLS_TARBALL_SIGNATURE_PATH}]=${DEBUG_SYMBOLS_TARBALL_SIGNATURE_KEY}
+ ARTIFACTS[${DEBUG_SYMBOLS_TARBALL_SHA1_PATH}]=${DEBUG_SYMBOLS_TARBALL_SHA1_KEY}
+ ARTIFACTS[${DEBUG_SYMBOLS_TARBALL_SHA256_PATH}]=${DEBUG_SYMBOLS_TARBALL_SHA256_KEY}
+ ARTIFACTS[${DEBUG_SYMBOLS_TARBALL_MD5_PATH}]=${DEBUG_SYMBOLS_TARBALL_MD5_KEY}
+fi
+
+# MSIs are only built on windows
+# note there is no detached signature file
+if [ -f "${MSI_PATH}" ]; then
+ ARTIFACTS[${MSI_PATH}]=${MSI_KEY}
+ ARTIFACTS[${MSI_SHA1_PATH}]=${MSI_SHA1_KEY}
+ ARTIFACTS[${MSI_SHA256_PATH}]=${MSI_SHA256_KEY}
+ ARTIFACTS[${MSI_MD5_PATH}]=${MSI_MD5_KEY}
+fi
+
+set -o verbose
+
+for path in "${!ARTIFACTS[@]}"; do
+
+ key=${ARTIFACTS[${path}]}
+ podman run \
+ -v $(pwd):$(pwd) \
+ -w $(pwd) \
+ --env-host \
+ ${UPLOAD_LOCK_IMAGE} \
+ -key=${key} -tag=task-id=${EVERGREEN_TASK_ID} ${path}
+
+done
diff --git a/evergreen/scons_compile.sh b/evergreen/scons_compile.sh
index 2da927b9039cc..0fe4cc99c08be 100755
--- a/evergreen/scons_compile.sh
+++ b/evergreen/scons_compile.sh
@@ -60,6 +60,64 @@ if [ "${generating_for_ninja}" = "true" ] && [ "Windows_NT" = "$OS" ]; then
fi
activate_venv
+# if build_patch_id is passed, try to download binaries from specified
+# evergreen patch.
+# This is purposfully before the venv setup so we do not touch the venv deps
+if [ -n "${build_patch_id}" ]; then
+ echo "build_patch_id detected, trying to skip task"
+ if [ "${task_name}" = "compile_dist_test" ] || [ "${task_name}" = "compile_dist_test_half" ]; then
+ echo "Skipping ${task_name} compile without downloading any files"
+ exit 0
+ fi
+
+ # On windows we change the extension to zip
+ if [ -z "${ext}" ]; then
+ ext="tgz"
+ fi
+
+ extra_db_contrib_args=""
+
+ # get the platform of the dist archive. This is needed if
+ # db-contrib-tool cannot autodetect the platform of the ec2 instance.
+ regex='MONGO_DISTMOD=([a-z0-9]*)'
+ if [[ ${compile_flags} =~ ${regex} ]]; then
+ extra_db_contrib_args="${extra_db_contrib_args} --platform=${BASH_REMATCH[1]}"
+ fi
+
+ if [ "${task_name}" = "archive_dist_test" ]; then
+ file_name="mongodb-binaries.${ext}"
+ invocation="db-contrib-tool setup-repro-env ${build_patch_id} \
+ --variant=${compile_variant} --extractDownloads=False \
+ --binariesName=${file_name} --installDir=./ ${extra_db_contrib_args}"
+ fi
+
+ if [ "${task_name}" = "archive_dist_test_debug" ]; then
+ file_name="mongo-debugsymbols.${ext}"
+ invocation="db-contrib-tool setup-repro-env ${build_patch_id} \
+ --variant=${compile_variant} --extractDownloads=False \
+ --debugsymbolsName=${file_name} --installDir=./ \
+ --skipBinaries --downloadSymbols ${extra_db_contrib_args}"
+ fi
+
+ if [ -n "${invocation}" ]; then
+ setup_db_contrib_tool
+
+ echo "db-contrib-tool invocation: ${invocation}"
+ eval ${invocation}
+ if [ $? -ne 0 ]; then
+ echo "Could not retrieve files with db-contrib-tool"
+ exit 1
+ fi
+ echo "Downloaded: ${file_name}"
+ mv "${build_patch_id}/${file_name}" "${file_name}"
+ echo "Moved ${file_name} to the correct location"
+ echo "Skipping ${task_name} compile"
+ exit 0
+ fi
+
+ echo "Could not skip ${task_name} compile, compiling as normal"
+fi
+
set -o pipefail
eval ${compile_env} $python ./buildscripts/scons.py \
${compile_flags} ${task_compile_flags} ${task_compile_flags_extra} \
diff --git a/evergreen/selinux_run_test.sh b/evergreen/selinux_run_test.sh
index 318d73adbe2f3..c7ec9502a4645 100755
--- a/evergreen/selinux_run_test.sh
+++ b/evergreen/selinux_run_test.sh
@@ -3,76 +3,139 @@
# Notes on how to run this manually:
# - repo must be unpacked into source tree
#
-# export ssh_key=$HOME/.ssh/id_rsa
-# export hostname=ec2-3-91-230-150.compute-1.amazonaws.com
-# export user=ec2-user
-# export bypass_prelude=yes
+# export SSH_KEY=$HOME/.ssh/id_rsa
+# export SELINUX_HOSTNAME=ec2-3-91-230-150.compute-1.amazonaws.com
+# export SELINUX_USER=ec2-user
+# export BYPASS_PRELUDE=yes
+# export SRC="$(basename $(pwd) | tee /dev/stderr)"
+# export TEST_LIST='jstests/selinux/*.js'
# export workdir="$(dirname $(pwd) | tee /dev/stderr)"
-# export src="$(basename $(pwd) | tee /dev/stderr)"
-# export test_list='jstests/selinux/*.js'
-# export pkg_variant=mongodb-enterprise
# evergreen/selinux_run_test.sh
set -o errexit
-DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
-if [ "$bypass_prelude" != "yes" ]; then
- . "$DIR/prelude.sh"
+readonly k_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
+
+if [ "$BYPASS_PRELUDE" != "yes" ]; then
+ . "$k_dir/prelude.sh"
activate_venv
- src="src"
+ readonly k_src="src"
+else
+ readonly k_src="$SRC"
fi
-set -o xtrace
-
-if [ "$hostname" == "" ]; then
- hostname="$(tr -d '"[]{}' < "$workdir"/$src/hosts.yml | cut -d , -f 1 | awk -F : '{print $2}')"
+# If no selinux hostname is defined by external env, then we are running through evergreen, which has dumped spawn host
+# properties about this host into hosts.yml via host.list
+# (https://github.com/evergreen-ci/evergreen/blob/main/docs/Project-Configuration/Project-Commands.md#hostlist),
+# from which we can derive the hostname of the remote host
+# Also note that $workdir here is a built-in expansion from evergreen: see more info at
+# https://github.com/evergreen-ci/evergreen/blob/main/docs/Project-Configuration/Project-Configuration-Files.md#default-expansions
+if [ "$SELINUX_HOSTNAME" == "" ]; then
+ readonly k_selinux_hostname="$(tr -d '"[]{}' < "$workdir"/$k_src/hosts.yml | cut -d , -f 1 | awk -F : '{print $2}')"
+ cat "$workdir"/$k_src/hosts.yml
+else
+ readonly k_selinux_hostname="$SELINUX_HOSTNAME"
fi
-if [ "$user" == "" ]; then
- user=$USER
-fi
+# SELINUX_USER injected from evergreen config, do not change
+readonly k_host="${SELINUX_USER}@${k_selinux_hostname}"
+
+# Obtain the ssh key and properties from expansions.yml, output from evergreen via the expansions.write command
+# (https://github.com/evergreen-ci/evergreen/blob/main/docs/Project-Configuration/Project-Commands.md#expansionswrite)
+if [ "$SSH_KEY" == "" ]; then
+ readonly k_ssh_key="$workdir/selinux.pem"
-host="${user}@${hostname}"
-python="${python:-python3}"
+ "$workdir"/$k_src/buildscripts/yaml_key_value.py --yamlFile="$workdir"/expansions.yml \
+ --yamlKey=__project_aws_ssh_key_value > "$k_ssh_key"
+
+ chmod 600 "$k_ssh_key"
+
+ result="$(openssl rsa -in "$k_ssh_key" -check -noout | tee /dev/stderr)"
-if [ "$ssh_key" == "" ]; then
- ssh_key="$workdir/selinux.pem"
- "$workdir"/$src/buildscripts/yaml_key_value.py --yamlFile="$workdir"/expansions.yml \
- --yamlKey=__project_aws_ssh_key_value > "$ssh_key"
- chmod 600 "$ssh_key"
- result="$(openssl rsa -in "$ssh_key" -check -noout | tee /dev/stderr)"
if [ "$result" != "RSA key ok" ]; then
exit 1
fi
+else
+ readonly k_ssh_key="$SSH_KEY"
fi
-attempts=0
-connection_attempts=50
-
-# Check for remote connectivity
-set +o errexit
-ssh_options="-i $ssh_key -o IdentitiesOnly=yes -o StrictHostKeyChecking=no"
-while ! ssh $ssh_options -o ConnectTimeout=10 "$host" echo "I am working"; do
- if [ "$attempts" -ge "$connection_attempts" ]; then exit 1; fi
- ((attempts++))
- printf "SSH connection attempt %d/%d failed. Retrying...\n" "$attempts" "$connection_attempts"
- sleep 10
-done
+readonly k_ssh_options="-i $k_ssh_key -o IdentitiesOnly=yes -o StrictHostKeyChecking=no"
+
+function copy_sources_to_target() {
+
+ rsync -ar -e "ssh $k_ssh_options" \
+ --exclude 'tmp' --exclude 'build' --exclude '.*' \
+ "$workdir"/$k_src/* "$k_host":
+
+ return $?
+}
+
+function configure_target_machine() {
+ ssh $k_ssh_options "$k_host" evergreen/selinux_test_setup.sh
+ return $?
+}
+
+function execute_tests_on_target() {
+ ssh $k_ssh_options "$k_host" evergreen/selinux_test_executor.sh "$1"
+ return $?
+}
+
+function check_remote_connectivity() {
+ ssh -q $k_ssh_options -o ConnectTimeout=10 "$k_host" echo "I am working"
+ return $?
+}
+
+function retry_command() {
+
+ local connection_attempts=$1
+ local cmd="$2"
+ shift 2 #eat the first 2 parameters to pass on any remaining to the calling function
+
+ local attempts=0
+ set +o errexit
+
+ while true; do
+ "$cmd" "$@"
+
+ local result=$?
+
+ if [[ $result -eq 0 ]]; then
+ set -o errexit
+ return $result
+ fi
+
+ if [[ $attempts -ge $connection_attempts ]]; then
+ printf "%s failed after %d attempts with final error code %s.\n" "$cmd" "$attempts" "$result"
+ exit 1
+ fi
+
+ sleep 10
+ ((attempts++))
+
+ done
+}
+
+echo "===> Checking for remote connectivity..."
+retry_command 20 check_remote_connectivity
-set -o errexit
echo "===> Copying sources to target..."
-rsync -ar -e "ssh $ssh_options" \
- --exclude 'tmp' --exclude 'build' --exclude '.*' \
- "$workdir"/$src/* "$host":
+retry_command 5 copy_sources_to_target
echo "===> Configuring target machine..."
-ssh $ssh_options "$host" evergreen/selinux_test_setup.sh
+retry_command 5 configure_target_machine
echo "===> Executing tests..."
-list="$(
+readonly list="$(
cd src
- for x in $test_list; do echo "$x"; done
+
+ # $TEST_LIST defined in evegreen "run selinux tests" function, do not change
+ for x in $TEST_LIST; do echo "$x"; done
)"
+
for test in $list; do
- ssh $ssh_options "$host" evergreen/selinux_test_executor.sh "$test"
+ execute_tests_on_target "$test"
+ res="$?"
+ if [[ $res -ne 0 ]]; then
+ exit "$res"
+ fi
done
diff --git a/evergreen/selinux_test_executor.sh b/evergreen/selinux_test_executor.sh
index e4f5c8bc1bb62..481cc2c519931 100755
--- a/evergreen/selinux_test_executor.sh
+++ b/evergreen/selinux_test_executor.sh
@@ -1,108 +1,175 @@
#!/bin/bash
+set +o errexit
-set -o errexit
-set -o xtrace
+readonly k_log_path="/var/log/mongodb/mongod.log"
+readonly k_mongo="$(pwd)/dist-test/bin/mongo"
+readonly k_test_path="$1"
+return_code=1
-mongo="$(pwd)/dist-test/bin/mongo"
-export PATH="$(dirname "$mongo"):$PATH"
-if [ ! -f "$mongo" ]; then
- echo "Mongo shell at $mongo is missing"
- exit 1
-fi
+export PATH="$(dirname "$k_mongo"):$PATH"
-function print() {
+function print_err() {
echo "$@" >&2
}
function monitor_log() {
- sed "s!^!mongod| $(date '+%F %H-%M-%S') !" <(sudo --non-interactive tail -f /var/log/mongodb/mongod.log)
+ sed "s!^!mongod| $(date '+%F %H-%M-%S') !" <(sudo --non-interactive tail -f $k_log_path)
}
-TEST_PATH="$1"
-if [ ! -f "$TEST_PATH" ]; then
- print "No test supplied or test file not found. Run:"
- print " $(basename "${BASH_SOURCE[0]}") "
- exit 1
-fi
+function output_ausearch() {
+ local cmd_parameters="AVC,USER_AVC,SELINUX_ERR,USER_SELINUX_ERR"
+
+ echo ""
+ echo "====== SELinux errors (ausearch -m $cmd_parameters): ======"
+ sudo --non-interactive ausearch -m $cmd_parameters -ts $1
+}
-# test file is even good before going on
-if ! "$mongo" --nodb --norc --quiet "$TEST_PATH"; then
- print "File $TEST_PATH has syntax errors"
+function output_journalctl() {
+ echo ""
+ echo "============================== journalctl ========================================="
+ sudo --non-interactive journalctl --no-pager --catalog --since="$1" | grep -i mongo
+}
+
+function fail_and_exit_err() {
+
+ echo ""
+ echo "==================================================================================="
+ echo "++++++++ Test failed, outputting last 5 seconds of additional log info ++++++++++++"
+ echo "==================================================================================="
+ output_ausearch "$(date --utc --date='5 seconds ago' '+%x %H:%M:%S')"
+ output_journalctl "$(date --utc --date='5 seconds ago' +'%Y-%m-%d %H:%M:%S')"
+
+ echo ""
+ echo "==== FAIL: $1 ===="
exit 1
-fi
+}
-# stop mongod, zero mongo log, clean up database, set all booleans to off
-sudo --non-interactive bash -c '
- systemctl stop mongod
+function create_mongo_config() {
+ echo "Writing /etc/mongod.conf for $k_test_path:"
+ "$k_mongo" --nodb --norc --quiet --eval='
+ assert(load("'"$k_test_path"'"));
+ const test = new TestDefinition();
+ print(JSON.stringify(test.config, null, 2));
- rm -f /var/log/mongodb/mongod.log
- touch /var/log/mongodb/mongod.log
- chown mongod /var/log/mongodb/mongod.log
+ ' | sudo --non-interactive tee /etc/mongod.conf
+}
- rm -rf /var/lib/mongo/*
+function start_mongod() {
+ # Start mongod and if it won't come up, fail and exit
- rm -rf /etc/sysconfig/mongod /etc/mongod
+ sudo --non-interactive systemctl start mongod \
+ && sudo --non-interactive systemctl status mongod || (
+ fail_and_exit_err "systemd failed to start mongod server!"
+ )
+}
- setsebool mongod_can_connect_ldap off
- setsebool mongod_can_use_kerberos off
-'
+function wait_for_mongod_to_accept_connections() {
+ # Once the mongod process starts via systemd, it can still take a couple of seconds
+ # to set up and accept connections... we will wait for log id 23016 to show up
+ # indicating that the server is ready to accept incoming connections before starting the tests
-# create mongo config
-"$mongo" --nodb --norc --quiet --eval='
- assert(load("'"$TEST_PATH"'"));
- const test = new TestDefinition();
- print(typeof(test.config) === "string" ? test.config : JSON.stringify(test.config, null, 2));
-' | sudo --non-interactive tee /etc/mongod.conf
+ local server_ready=0
+ local wait_seconds=2
+ local wait_retries_max=30
+ local wait_retries=0
-# setup
-"$mongo" --nodb --norc --quiet --eval='
- assert(load("'"$TEST_PATH"'"));
- const test = new TestDefinition();
- jsTest.log("Running setup()");
- test.setup();
-'
+ while [[ $wait_retries -le $wait_retries_max ]]; do
+ local server_status="$(grep 23016 $k_log_path || echo "")"
-# start log monitor, also kill it on exit
-monitor_log &
-MONITORPID="$!"
-trap "sudo --non-interactive pkill -P $MONITORPID" SIGINT SIGTERM ERR EXIT
-
-# start mongod and if it won't come up, log SELinux errors
-ts="$(date --utc --date='1 seconds ago' '+%x %H:%M:%S')"
-tsj="$(date --utc --date='1 seconds ago' +'%Y-%m-%d %H:%M:%S')"
-sudo --non-interactive systemctl start mongod \
- && sudo --non-interactive systemctl status mongod || (
- set +o errexit
- echo "================== SELinux errors: =================="
- sudo --non-interactive ausearch -m AVC,USER_AVC,SELINUX_ERR,USER_SELINUX_ERR -ts $ts
- echo "================== journalctl =================="
- sudo --non-interactive journalctl --no-pager --catalog --since="$tsj" | grep -i mongo
- echo "================== /var/log/mongodb/mongod.log =================="
- sudo --non-interactive cat /var/log/mongodb/mongod.log
- echo "==== FAIL: mongod service was not started successfully"
+ if [ "$server_status" != "" ]; then
+ server_ready=1
+ break
+ fi
+
+ sleep $wait_seconds
+ ((wait_retries++))
+ done
+
+ if [ ! $server_ready ]; then
+ fail_and_exit_err "failed to connect to mongod server after waiting for $(($wait_seconds * $wait_retries)) seconds!"
+ fi
+}
+
+function clear_mongo_config() {
+ # stop mongod, zero mongo log, clean up database, set all booleans to off
+ sudo --non-interactive bash -c '
+ systemctl stop mongod
+
+ rm -f '"$k_log_path"'
+ touch '"$k_log_path"'
+ chown mongod '"$k_log_path"'
+
+ rm -rf /var/lib/mongo/*
+
+ rm -rf /etc/sysconfig/mongod /etc/mongod
+
+ setsebool mongod_can_connect_ldap off
+ setsebool mongod_can_use_kerberos off
+ '
+}
+
+function exit_with_code() {
+ exit $return_code
+}
+
+function setup_test_definition() {
+ "$k_mongo" --nodb --norc --quiet --eval='
+ assert(load("'"$k_test_path"'"));
+ (() => {
+ const test = new TestDefinition();
+ print("Running setup() for '"$k_test_path"'");
+ test.setup();
+ })();
+ '
+}
+
+function run_test() {
+ "$k_mongo" --norc --gssapiServiceName=mockservice --eval='
+ assert(load("'"$k_test_path"'"));
+ print("Running test '"$k_test_path"'");
+
+ const test = new TestDefinition();
+
+ try {
+ await test.run();
+ } finally {
+ test.teardown();
+ }
+ ' || fail_and_exit_err "Test failed"
+
+ echo "SUCCESS: $k_test_path"
+}
+
+if [ ! -f "$k_mongo" ]; then
+ print_err "Mongo shell at $k_mongo is missing"
exit 1
-)
-
-# run test and teardown
-"$mongo" --norc --gssapiServiceName=mockservice --eval='
- assert(load("'"$TEST_PATH"'"));
- // name is such to prevent collisions
- const test_812de7ce = new TestDefinition();
- try {
- jsTest.log("Running test");
- test_812de7ce.run();
- } finally {
- test_812de7ce.teardown();
- }
-' || (
- echo "==== FAIL: test returned result: $?"
- echo "=== SELinux errors:"
- set +o errexit
- sudo --non-interactive ausearch -m AVC,USER_AVC,SELINUX_ERR,USER_SELINUX_ERR -ts $ts
- echo "=== /var/log/mongodb/mongod.log:"
- sudo --non-interactive cat /var/log/mongodb/mongod.log
+fi
+
+if [ ! -f "$k_test_path" ]; then
+ print_err "No test supplied or test file not found. Run:"
+ print_err "$(basename "${BASH_SOURCE[0]}") "
+ exit 1
+fi
+
+# Ensure file containing tests is valid before executing
+if ! "$k_mongo" --nodb --norc --quiet "$k_test_path"; then
+ print_err "File $k_test_path has syntax errors"
exit 1
-)
+fi
+
+echo "STARTING TEST: $k_test_path"
+
+clear_mongo_config
+create_mongo_config
+setup_test_definition
+
+# start log monitor, also kill it on exit
+monitor_log &
+monitor_pid="$!"
+trap "sudo --non-interactive pkill -P $monitor_pid; exit_with_code" SIGINT SIGTERM ERR EXIT
+
+start_mongod
+wait_for_mongod_to_accept_connections
+run_test
-set +o xtrace
-echo "SUCCESS: $TEST_PATH"
+return_code=0
diff --git a/jstests/aggregation/accumulators/first_n_last_n.js b/jstests/aggregation/accumulators/first_n_last_n.js
index 87c35c35c77f5..926535eb1ffee 100644
--- a/jstests/aggregation/accumulators/first_n_last_n.js
+++ b/jstests/aggregation/accumulators/first_n_last_n.js
@@ -4,6 +4,7 @@
(function() {
"use strict";
+load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
load("jstests/aggregation/extras/utils.js");
const coll = db[jsTestName()];
@@ -20,17 +21,21 @@ const kMaxSales = 20;
let expectedFirstThree = [];
let expectedLastThree = [];
let expectedAllResults = [];
+let expectedFirstNWithInitExpr = [];
+let expectedLastNWithInitExpr = [];
for (const states
of [{state: 'AZ', sales: 3}, {state: 'CA', sales: 2}, {state: 'NY', sales: kMaxSales}]) {
let allResults = [];
let firstThree = [];
let lastThree = [];
+ let firstWithInitExpr = [];
+ let lastWithInitExpr = [];
const state = states['state'];
const sales = states['sales'];
for (let i = 0; i < kMaxSales; ++i) {
const salesAmt = i * 10;
if (i < sales) {
- docs.push({state: state, sales: salesAmt});
+ docs.push({state: state, sales: salesAmt, stateObj: {"st": state}, n: 3});
// First N candidate.
if (i < defaultN) {
@@ -40,12 +45,21 @@ for (const states
if (i + defaultN >= sales) {
lastThree.push(salesAmt);
}
+
+ if (i == 0 || (state == 'AZ' && i < defaultN)) {
+ firstWithInitExpr.push(salesAmt);
+ }
+ if (i + 1 == sales || (state == 'AZ' && i + defaultN >= sales)) {
+ lastWithInitExpr.push(salesAmt);
+ }
allResults.push(salesAmt);
}
}
expectedFirstThree.push({_id: state, sales: firstThree});
expectedLastThree.push({_id: state, sales: lastThree});
expectedAllResults.push({_id: state, sales: allResults});
+ expectedFirstNWithInitExpr.push({_id: state, sales: firstWithInitExpr});
+ expectedLastNWithInitExpr.push({_id: state, sales: lastWithInitExpr});
}
assert.commandWorked(coll.insert(docs));
@@ -65,6 +79,54 @@ function runFirstLastN(n, expectedFirstNResults, expectedLastNResults) {
() => "expected " + tojson(expectedFirstNResults) + " actual " +
tojson(actualFirstNResults));
+ const firstNResultsWithInitExpr =
+ coll.aggregate([
+ {$sort: {_id: 1}},
+ {
+ $group: {
+ _id: {"st": "$state"},
+ sales: {
+ $firstN: {
+ input: "$sales",
+ n: {$cond: {if: {$eq: ["$st", 'AZ']}, then: defaultN, else: 1}}
+ }
+ }
+ }
+ },
+ ])
+ .toArray();
+
+ let expectedResult = [];
+ expectedFirstNWithInitExpr.forEach(
+ i => expectedResult.push({'_id': {'st': i['_id']}, sales: i['sales']}));
+ assert(arrayEq(expectedResult, firstNResultsWithInitExpr),
+ () => "expected " + tojson(expectedResult) + " actual " +
+ tojson(firstNResultsWithInitExpr));
+
+ const firstNResultsWithInitExprAndVariableGroupId =
+ coll.aggregate([
+ {$sort: {_id: 1}},
+ {
+ $group: {
+ _id: "$stateObj",
+ sales: {
+ $firstN: {
+ input: "$sales",
+ n: {$cond: {if: {$eq: ["$st", 'AZ']}, then: defaultN, else: 1}}
+ }
+ }
+ }
+ },
+ ])
+ .toArray();
+
+ expectedResult = [];
+ expectedFirstNWithInitExpr.forEach(
+ i => expectedResult.push({'_id': {'st': i['_id']}, sales: i['sales']}));
+ assert(arrayEq(expectedResult, firstNResultsWithInitExprAndVariableGroupId),
+ () => "expected " + tojson(expectedResult) + " actual " +
+ tojson(firstNResultsWithInitExprAndVariableGroupId));
+
const actualLastNResults =
coll.aggregate([
{$sort: {_id: 1}},
@@ -75,6 +137,54 @@ function runFirstLastN(n, expectedFirstNResults, expectedLastNResults) {
arrayEq(expectedLastNResults, actualLastNResults),
() => "expected " + tojson(expectedLastNResults) + " actual " + tojson(actualLastNResults));
+ const lastNResultsWithInitExpr =
+ coll.aggregate([
+ {$sort: {_id: 1}},
+ {
+ $group: {
+ _id: {"st": "$state"},
+ sales: {
+ $lastN: {
+ input: "$sales",
+ n: {$cond: {if: {$eq: ["$st", 'AZ']}, then: defaultN, else: 1}}
+ }
+ }
+ }
+ },
+ ])
+ .toArray();
+
+ expectedResult = [];
+ expectedLastNWithInitExpr.forEach(
+ i => expectedResult.push({'_id': {'st': i['_id']}, sales: i['sales']}));
+ assert(
+ arrayEq(expectedResult, lastNResultsWithInitExpr),
+ () => "expected " + tojson(expectedResult) + " actual " + tojson(lastNResultsWithInitExpr));
+
+ const lastNResultsWithInitExprAndVariableGroupId =
+ coll.aggregate([
+ {$sort: {_id: 1}},
+ {
+ $group: {
+ _id: "$stateObj",
+ sales: {
+ $lastN: {
+ input: "$sales",
+ n: {$cond: {if: {$eq: ["$st", 'AZ']}, then: defaultN, else: 1}}
+ }
+ }
+ }
+ },
+ ])
+ .toArray();
+
+ expectedResult = [];
+ expectedLastNWithInitExpr.forEach(
+ i => expectedResult.push({'_id': {'st': i['_id']}, sales: i['sales']}));
+ assert(arrayEq(expectedResult, lastNResultsWithInitExprAndVariableGroupId),
+ () => "expected " + tojson(expectedResult) + " actual " +
+ tojson(lastNResultsWithInitExprAndVariableGroupId));
+
function reorderBucketResults(bucketResults) {
// Using a computed projection will put the fields out of order. As such, we re-order them
// below.
@@ -213,4 +323,4 @@ assert.commandFailedWithCode(
"aggregate",
{pipeline: [{$group: {_id: {'st': '$state'}, sales: {$firstN: {n: 2}}}}], cursor: {}}),
5787907);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/aggregation/accumulators/median_approx.js b/jstests/aggregation/accumulators/median_approx.js
index 0e0b1d6dbfaaa..166172d0661f9 100644
--- a/jstests/aggregation/accumulators/median_approx.js
+++ b/jstests/aggregation/accumulators/median_approx.js
@@ -3,7 +3,6 @@
* field 'p':[0.5].
* @tags: [
* requires_fcv_70,
- * featureFlagApproxPercentiles
* ]
*/
(function() {
diff --git a/jstests/aggregation/accumulators/min_n_max_n.js b/jstests/aggregation/accumulators/min_n_max_n.js
index c7660fc5df265..e7a4ea329cf3d 100644
--- a/jstests/aggregation/accumulators/min_n_max_n.js
+++ b/jstests/aggregation/accumulators/min_n_max_n.js
@@ -4,6 +4,8 @@
(function() {
"use strict";
+load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
+
const coll = db[jsTestName()];
coll.drop();
diff --git a/jstests/aggregation/accumulators/percentiles_approx.js b/jstests/aggregation/accumulators/percentiles_approx.js
index ee6b8db778a4f..29bad4f376a54 100644
--- a/jstests/aggregation/accumulators/percentiles_approx.js
+++ b/jstests/aggregation/accumulators/percentiles_approx.js
@@ -2,7 +2,6 @@
* Tests for the approximate percentile accumulator semantics.
* @tags: [
* requires_fcv_70,
- * featureFlagApproxPercentiles
* ]
*/
(function() {
@@ -16,10 +15,11 @@ const coll = db[jsTestName()];
* Tests for correctness without grouping. Each group gets its own accumulator so we can validate
* the basic $percentile functionality using a single group.
*/
-function testWithSingleGroup({docs, percentileSpec, expectedResult, msg}) {
+function testWithSingleGroup({docs, percentileSpec, letSpec, expectedResult, msg}) {
coll.drop();
coll.insertMany(docs);
- const res = coll.aggregate([{$group: {_id: null, p: percentileSpec}}]).toArray();
+ const res =
+ coll.aggregate([{$group: {_id: null, p: percentileSpec}}], {let : letSpec}).toArray();
// For $percentile the result should be ordered to match the spec, so assert exact equality.
assert.eq(expectedResult, res[0].p, msg + `; Result: ${tojson(res)}`);
@@ -67,6 +67,41 @@ testWithSingleGroup({
msg: "Multiple percentiles"
});
+testWithSingleGroup({
+ docs: [{x: 0}, {x: 1}, {x: 2}],
+ percentileSpec: {$percentile: {p: "$$ps", input: "$x", method: "approximate"}},
+ letSpec: {ps: [0.5, 0.9, 0.1]},
+ expectedResult: [1, 2, 0],
+ msg: "Multiple percentiles using variable in the percentile spec for the whole array"
+});
+
+testWithSingleGroup({
+ docs: [{x: 0}, {x: 1}, {x: 2}],
+ percentileSpec: {$percentile: {p: ["$$p90"], input: "$x", method: "approximate"}},
+ letSpec: {p90: 0.9},
+ expectedResult: [2],
+ msg: "Single percentile using variable in the percentile spec for the array elements"
+});
+
+testWithSingleGroup({
+ docs: [{x: 0}, {x: 1}, {x: 2}],
+ percentileSpec: {
+ $percentile:
+ {p: {$concatArrays: [[0.1, 0.5], ["$$p90"]]}, input: "$x", method: "approximate"}
+ },
+ letSpec: {p90: 0.9},
+ expectedResult: [0, 1, 2],
+ msg: "Multiple percentiles using const expression in the percentile spec"
+});
+
+testWithSingleGroup({
+ docs: [{x: 0}, {x: 1}, {x: 2}],
+ percentileSpec: {$percentile: {p: "$$ps", input: {$add: [42, "$x"]}, method: "approximate"}},
+ letSpec: {ps: [0.5, 0.9, 0.1]},
+ expectedResult: [42 + 1, 42 + 2, 42 + 0],
+ msg: "Multiple percentiles using expression as input"
+});
+
function testWithMultipleGroups({docs, percentileSpec, expectedResult, msg}) {
coll.drop();
coll.insertMany(docs);
diff --git a/jstests/aggregation/accumulators/percentiles_syntax.js b/jstests/aggregation/accumulators/percentiles_syntax.js
index c1aa4a050fed6..b93954cbe67f5 100644
--- a/jstests/aggregation/accumulators/percentiles_syntax.js
+++ b/jstests/aggregation/accumulators/percentiles_syntax.js
@@ -2,7 +2,6 @@
* Tests for the $percentile accumulator syntax.
* @tags: [
* requires_fcv_70,
- * featureFlagApproxPercentiles
* ]
*/
(function() {
@@ -16,91 +15,218 @@ coll.drop();
// order to check its format.
coll.insert({x: 42});
-/**
- * Tests to check that invalid $percentile specifications are rejected.
- */
-function assertInvalidSyntax(percentileSpec, msg) {
- assert.commandFailed(
- coll.runCommand("aggregate",
- {pipeline: [{$group: {_id: null, p: percentileSpec}}], cursor: {}}),
- msg);
+function assertInvalidSyntax({pSpec, letSpec, msg}) {
+ let command = {pipeline: [{$group: {_id: null, p: pSpec}}], let : letSpec, cursor: {}};
+ assert.commandFailed(coll.runCommand("aggregate", command), msg);
}
-assertInvalidSyntax({$percentile: 0.5}, "Should fail if $percentile is not an object");
-
-assertInvalidSyntax({$percentile: {input: "$x", method: "approximate"}},
- "Should fail if $percentile is missing 'p' field");
-
-assertInvalidSyntax({$percentile: {p: [0.5], method: "approximate"}},
- "Should fail if $percentile is missing 'input' field");
+function assertValidSyntax({pSpec, letSpec, msg}) {
+ let command = {pipeline: [{$group: {_id: null, p: pSpec}}], let : letSpec, cursor: {}};
+ assert.commandWorked(coll.runCommand("aggregate", command), msg);
+}
-assertInvalidSyntax({$percentile: {p: [0.5], input: "$x"}},
- "Should fail if $percentile is missing 'method' field");
+/**
+ * Test missing or unexpected fields in $percentile spec.
+ */
+assertInvalidSyntax(
+ {pSpec: {$percentile: 0.5}, msg: "Should fail if $percentile is not an object"});
-assertInvalidSyntax({$percentile: {p: [0.5], input: "$x", method: "approximate", extras: 42}},
- "Should fail if $percentile contains an unexpected field");
+assertInvalidSyntax({
+ pSpec: {$percentile: {input: "$x", method: "approximate"}},
+ msg: "Should fail if $percentile is missing 'p' field"
+});
-assertInvalidSyntax({$percentile: {p: 0.5, input: "$x", method: "approximate"}},
- "Should fail if 'p' field in $percentile isn't array");
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5], method: "approximate"}},
+ msg: "Should fail if $percentile is missing 'input' field"
+});
-assertInvalidSyntax({$percentile: {p: [], input: "$x", method: "approximate"}},
- "Should fail if 'p' field in $percentile is an empty array");
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5], input: "$x"}},
+ msg: "Should fail if $percentile is missing 'method' field"
+});
-assertInvalidSyntax(
- {$percentile: {p: [0.5, "foo"], input: "$x", method: "approximate"}},
- "Should fail if 'p' field in $percentile is an array with a non-numeric element");
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5], input: "$x", method: "approximate", extras: 42}},
+ msg: "Should fail if $percentile contains an unexpected field"
+});
-assertInvalidSyntax(
- {$percentile: {p: [0.5, 10], input: "$x", method: "approximate"}},
- "Should fail if 'p' field in $percentile is an array with any value outside of [0, 1] range");
+/**
+ * Test invalid 'p' field, specified as a constant.
+ */
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: 0.5, input: "$x", method: "approximate"}},
+ msg: "Should fail if 'p' field in $percentile isn't array"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [], input: "$x", method: "approximate"}},
+ msg: "Should fail if 'p' field in $percentile is an empty array"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5, "foo"], input: "$x", method: "approximate"}},
+ msg: "Should fail if 'p' field in $percentile is an array with a non-numeric element"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5, 10], input: "$x", method: "approximate"}},
+ msg:
+ "Should fail if 'p' field in $percentile is an array with any value outside of [0, 1] range"
+});
-assertInvalidSyntax({$percentile: {p: [0.5, 0.7], input: "$x", method: 42}},
- "Should fail if 'method' field isn't a string");
+/**
+ * Test invalid 'p' field, specified as an expression.
+ */
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: ["$x"], input: "$x", method: "approximate"}},
+ msg: "'p' should not accept non-const expressions"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: {$add: [0.1, 0.5]}, input: "$x", method: "approximate"}},
+ msg: "'p' should not accept expressions that evaluate to a non-array"
+});
+
+assertInvalidSyntax({
+ pSpec: {
+ $percentile:
+ {p: {$concatArrays: [[0.01, 0.1], ["foo"]]}, input: "$x", method: "approximate"}
+ },
+ msg: "'p' should not accept expressions that evaluate to an array with non-numeric elements"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: "$$pvals", input: "$x", method: "approximate"}},
+ letSpec: {pvals: 0.5},
+ msg: "'p' should not accept variables that evaluate to a non-array"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: "$$pvals", input: "$x", method: "approximate"}},
+ letSpec: {pvals: [0.5, "foo"]},
+ msg: "'p' should not accept variables that evaluate to an array with non-numeric elements"
+});
-assertInvalidSyntax({$percentile: {p: [0.5, 0.7], input: "$x", method: "fancy"}},
- "Should fail if 'method' isn't one of _predefined_ strings");
+/**
+ * Test invalid 'method' field.
+ */
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.7], input: "$x", method: 42}},
+ msg: "$percentile should fail if 'method' field isn't a string"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.7], input: "$x", method: "fancy"}},
+ msg: "$percentile should fail if 'method' isn't one of _predefined_ strings"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.7], input: "$x", method: "discrete"}},
+ msg: "$percentile should fail because discrete 'method' isn't supported yet"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.7], input: "$x", method: "continuous"}},
+ msg: "$percentile should fail because continuous 'method' isn't supported yet"
+});
/**
- * Tests for $median. $median desugars to $percentile with the field p:[0.5] added, and therefore
- * has similar syntax to $percentile.
+ * Tests for invalid $median.
*/
+assertInvalidSyntax({
+ pSpec: {$median: {p: [0.5], input: "$x", method: "approximate"}},
+ msg: "$median should fail if 'p' is defined"
+});
-assertInvalidSyntax({$median: {p: [0.5], input: "$x", method: "approximate"}},
- "Should fail if 'p' is defined");
+assertInvalidSyntax({
+ pSpec: {$median: {method: "approximate"}},
+ msg: "$median should fail if 'input' field is missing"
+});
+
+assertInvalidSyntax(
+ {pSpec: {$median: {input: "$x"}}, msg: "Median should fail if 'method' field is missing"});
-assertInvalidSyntax({$median: {method: "approximate"}},
- "Should fail if $median is missing 'input' field");
+assertInvalidSyntax({
+ pSpec: {$median: {input: "$x", method: "approximate", extras: 42}},
+ msg: "$median should fail if there is an unexpected field"
+});
-assertInvalidSyntax({$median: {input: "$x"}}, "Should fail if $median is missing 'method' field");
+assertInvalidSyntax({
+ pSpec: {$median: {input: "$x", method: "fancy"}},
+ msg: "$median should fail if 'method' isn't one of the _predefined_ strings"
+});
+
+assertInvalidSyntax({
+ pSpec: {$median: {input: "$x", method: "discrete"}},
+ msg: "$median should fail because discrete 'method' isn't supported yet"
+});
+
+assertInvalidSyntax({
+ pSpec: {$median: {input: "$x", method: "continuous"}},
+ msg: "$median should fail because continuous 'method' isn't supported yet"
+});
-assertInvalidSyntax({$median: {input: "$x", method: "approximate", extras: 42}},
- "Should fail if $median contains an unexpected field");
/**
* Test that valid $percentile specifications are accepted. The results, i.e. semantics, are tested
* elsewhere and would cover all of the cases below, we are providing them here nonetheless for
* completeness.
*/
-function assertValidSyntax(percentileSpec, msg) {
- assert.commandWorked(
- coll.runCommand("aggregate",
- {pipeline: [{$group: {_id: null, p: percentileSpec}}], cursor: {}}),
- msg);
-}
-
-assertValidSyntax(
- {$percentile: {p: [0.0, 0.0001, 0.5, 0.995, 1.0], input: "$x", method: "approximate"}},
- "Should be able to specify an array of percentiles");
-
-assertValidSyntax(
- {$percentile: {p: [0.5, 0.9], input: {$divide: ["$x", 2]}, method: "approximate"}},
- "Should be able to specify 'input' as an expression");
-
-assertValidSyntax({$percentile: {p: [0.5, 0.9], input: "x", method: "approximate"}},
- "Non-numeric inputs should be gracefully ignored");
+assertValidSyntax({
+ pSpec: {$percentile: {p: [0.0, 0.0001, 0.5, 0.995, 1.0], input: "$x", method: "approximate"}},
+ msg: "Should be able to specify an array of percentiles"
+});
+
+assertValidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.9], input: {$divide: ["$x", 2]}, method: "approximate"}},
+ msg: "Should be able to specify 'input' as an expression"
+});
+
+assertValidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.9], input: "x", method: "approximate"}},
+ msg: "Non-numeric inputs should be gracefully ignored"
+});
+
+assertValidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.9], input: {$add: [2, "$x"]}, method: "approximate"}},
+ msg: "'input' should be able to use expressions"
+});
+
+assertValidSyntax({
+ pSpec: {
+ $percentile: {p: [0.5, 0.9], input: {$concatArrays: [[2], ["$x"]]}, method: "approximate"}
+ },
+ msg: "'input' should be able to use expressions even if the result of their eval is non-numeric"
+});
+
+assertValidSyntax({
+ pSpec: {
+ $percentile:
+ {p: {$concatArrays: [[0.01, 0.1], [0.9, 0.99]]}, input: "$x", method: "approximate"}
+ },
+ msg: "'p' should be able to use expressions that evaluate to an array"
+});
+
+assertValidSyntax({
+ pSpec: {$percentile: {p: [{$add: [0.1, 0.5]}], input: "$x", method: "approximate"}},
+ msg: "'p' should be able to use expressions for the array elements"
+});
+
+assertValidSyntax({
+ pSpec: {$percentile: {p: "$$pvals", input: "$x", method: "approximate"}},
+ letSpec: {pvals: [0.5, 0.9]},
+ msg: "'p' should be able to use variables for the array"
+});
+
+assertValidSyntax({
+ pSpec: {$percentile: {p: ["$$p1", "$$p2"], input: "$x", method: "approximate"}},
+ letSpec: {p1: 0.5, p2: 0.9},
+ msg: "'p' should be able to use variables for the array elements"
+});
/**
- * Tests for $median. $median desugars to $percentile with the field p:[0.5] added.
+ * Tests for valid $median.
*/
-
-assertValidSyntax({$median: {input: "$x", method: "approximate"}}, "Simple base case for $median.");
+assertValidSyntax(
+ {pSpec: {$median: {input: "$x", method: "approximate"}}, msg: "Simple base case for $median."});
})();
diff --git a/jstests/aggregation/accumulators/top_bottom_top_n_bottom_n.js b/jstests/aggregation/accumulators/top_bottom_top_n_bottom_n.js
index d0844a4acacae..4e33a4c42b215 100644
--- a/jstests/aggregation/accumulators/top_bottom_top_n_bottom_n.js
+++ b/jstests/aggregation/accumulators/top_bottom_top_n_bottom_n.js
@@ -4,6 +4,8 @@
(function() {
"use strict";
+load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
+
const coll = db[jsTestName()];
coll.drop();
@@ -392,4 +394,17 @@ const testOperatorText = (op) => {
// most relevant results first.
testOperatorText("$bottomN");
testOperatorText("$topN");
+
+// Test constant output and sortBy.
+assert(coll.drop());
+assert.commandWorked(coll.insertMany([{a: 1}, {a: 2}, {a: 3}]));
+const testConstantOutputAndSort = (op) => {
+ const results =
+ coll.aggregate([{$group: {_id: null, result: {[op]: {n: 3, output: "abc", sortBy: {}}}}}])
+ .toArray();
+ assert.eq(results.length, 1, results);
+ assert.docEq(results[0], {_id: null, result: ["abc", "abc", "abc"]}, results);
+};
+testConstantOutputAndSort("$topN");
+testConstantOutputAndSort("$bottomN");
})();
diff --git a/jstests/aggregation/add_with_date.js b/jstests/aggregation/add_with_date.js
index 4d76a6908d7cd..00b4a3ddb2b34 100644
--- a/jstests/aggregation/add_with_date.js
+++ b/jstests/aggregation/add_with_date.js
@@ -1,9 +1,5 @@
// Test $add with date
-(function() {
-"use strict";
-
load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const coll = db.getSiblingDB(jsTestName()).coll;
coll.drop();
@@ -132,5 +128,4 @@ assert.eq(ISODate("2019-01-30T07:30:10.958Z"), getResultOfExpression({
"$doubleVal",
NumberLong("-2397083434877565864")
]
- }));
-}());
+ }));
\ No newline at end of file
diff --git a/jstests/aggregation/api_version_stage_allowance_checks.js b/jstests/aggregation/api_version_stage_allowance_checks.js
index 3e6ec804b69a8..50b40ce4a3a30 100644
--- a/jstests/aggregation/api_version_stage_allowance_checks.js
+++ b/jstests/aggregation/api_version_stage_allowance_checks.js
@@ -140,18 +140,6 @@ result = testDB.runCommand({
});
assert.commandWorked(result);
-// Tests that the internal '$_generateV2ResumeTokens' option does not fail with 'apiStrict: true'.
-result = testDB.runCommand({
- aggregate: collName,
- pipeline: [{$project: {_id: 0}}],
- cursor: {},
- writeConcern: {w: "majority"},
- $_generateV2ResumeTokens: false,
- apiVersion: "1",
- apiStrict: true
-});
-assert.commandWorked(result);
-
// Tests that time-series collection can be queried (invoking $_internalUnpackBucket stage)
// from an external client with 'apiStrict'.
(function testInternalUnpackBucketAllowance() {
diff --git a/jstests/aggregation/bugs/exclusion_projection_does_not_affect_field_order.js b/jstests/aggregation/bugs/exclusion_projection_does_not_affect_field_order.js
index 09d2239389a6c..135b33e794f56 100644
--- a/jstests/aggregation/bugs/exclusion_projection_does_not_affect_field_order.js
+++ b/jstests/aggregation/bugs/exclusion_projection_does_not_affect_field_order.js
@@ -33,4 +33,9 @@ assert.eq(
{$sort: {_id: 1}}
])
.toArray());
+
+assert.commandWorked(coll.insert({_id: 4, c: {y: 11, z: 22, a: 33}, a: 1}));
+
+assert.eq([{_id: 1}, {_id: 2, c: 1}, {_id: 3, y: 1, z: 1}, {_id: 4, c: {y: 11, a: 33}, a: 1}],
+ coll.aggregate([{$project: {"c.z": 0}}, {$sort: {_id: 1}}]).toArray());
}());
diff --git a/jstests/aggregation/bugs/groupMissing.js b/jstests/aggregation/bugs/groupMissing.js
index 2bbad19baafba..d54d5a7cc05b4 100644
--- a/jstests/aggregation/bugs/groupMissing.js
+++ b/jstests/aggregation/bugs/groupMissing.js
@@ -6,8 +6,6 @@
// covered, which will not happen if the $sort is within a $facet stage.
// @tags: [
// do_not_wrap_aggregations_in_facets,
-// # TODO SERVER-67550: Equality to null does not match undefined in CQF.
-// cqf_incompatible,
// ]
load('jstests/aggregation/extras/utils.js'); // For assertArrayEq.
@@ -83,20 +81,28 @@ coll.insert({a: 1});
let collScanResult = coll.aggregate({$match: {a: 1}}, {$project: {_id: 0, a: 1, b: 1}}).toArray();
assertArrayEq({actual: collScanResult, expected: [{"a": 1, "b": null}, {"a": 1}]});
-// After creating the index, the plan will use PROJECTION_COVERED, and the index will incorrectly
-// provide a null for the missing "b" value.
-coll.createIndex({a: 1, b: 1});
-// Assert that the bug SERVER-23229 is still present.
-assertArrayEq({
- actual: coll.aggregate({$match: {a: 1}}, {$project: {_id: 0, a: 1, b: 1}}).toArray(),
- expected: [{"a": 1, "b": null}, {"a": 1, "b": null}]
-});
-// Correct behavior after SERVER-23229 is fixed.
-if (0) {
- assertArrayEq({
- actual: coll.aggregate({$match: {a: 1}}, {$project: {_id: 0, a: 1, b: 1}}).toArray(),
- expected: collScanResult
- });
+// After creating the index, the classic plan will use PROJECTION_COVERED, and the index will
+// incorrectly provide a null for the missing "b" value. Bonsai does not exhibit SERVER-23229. So,
+// either the new engine is used and the correct results (collScanResult) are seen, or we see the
+// incorrect result, where all values of "b" are null.
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+const possibleResults = [collScanResult, [{"a": 1, "b": null}, {"a": 1, "b": null}]];
+
+function checkActualMatchesAnExpected(actual) {
+ let foundMatch = false;
+ for (let i = 0; i < possibleResults.length; i++) {
+ foundMatch |= arrayEq(actual, possibleResults[i]);
+ }
+ assert(foundMatch,
+ `Expected actual results to match one of the possible results. actual=${
+ tojson(actual)}, possibleResults=${tojson(possibleResults)}`);
}
+
+// Check behavior with and without a hint.
+checkActualMatchesAnExpected(
+ coll.aggregate([{$match: {a: 1}}, {$project: {_id: 0, a: 1, b: 1}}]).toArray());
+checkActualMatchesAnExpected(
+ coll.aggregate([{$match: {a: 1}}, {$project: {_id: 0, a: 1, b: 1}}], {hint: {a: 1, b: 1}})
+ .toArray());
}());
diff --git a/jstests/aggregation/bugs/hash_lookup_spill_large_and_small_documents_correctly.js b/jstests/aggregation/bugs/hash_lookup_spill_large_and_small_documents_correctly.js
new file mode 100644
index 0000000000000..de8f79bbea7b7
--- /dev/null
+++ b/jstests/aggregation/bugs/hash_lookup_spill_large_and_small_documents_correctly.js
@@ -0,0 +1,74 @@
+// Regression test to check that different document sizes work correctly with $lookup.
+// @tags: [
+// requires_fcv_71,
+// ]
+(function() {
+'use strict';
+
+load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers'
+
+const localColl = db.lookup_spill_local;
+const foreignColl = db.lookup_spill_foreign;
+localColl.drop();
+foreignColl.drop();
+
+const memoryLimit = 128; // Spill at 128 bytes
+
+function setHashLookupMemoryLimit(memoryLimit) {
+ const commandResArr = FixtureHelpers.runCommandOnEachPrimary({
+ db: db.getSiblingDB("admin"),
+ cmdObj: {
+ setParameter: 1,
+ internalQuerySlotBasedExecutionHashLookupApproxMemoryUseInBytesBeforeSpill: memoryLimit,
+ }
+ });
+ assert.gt(commandResArr.length, 0, "Setting memory limit on primaries failed");
+ assert.commandWorked(commandResArr[0]);
+}
+
+function runHashLookupSpill() {
+ const smallStr = "small";
+ const bigStr = Array(memoryLimit).toString();
+ const localDoc = {_id: 1, a: 2};
+ const foreignDocs = [
+ {_id: 0, b: 1, padding: smallStr},
+ {_id: 1, b: 2, padding: bigStr},
+ {_id: 2, b: 1, padding: smallStr},
+ {_id: 3, b: 2, padding: bigStr},
+ {_id: 4, b: 1, padding: smallStr},
+ {_id: 5, b: 2, padding: bigStr},
+ {_id: 6, b: 1, padding: smallStr},
+ {_id: 7, b: 2, padding: bigStr},
+ {_id: 8, b: 1, padding: smallStr},
+ ];
+
+ assert.commandWorked(localColl.insert(localDoc));
+ assert.commandWorked(foreignColl.insertMany(foreignDocs));
+ const pipeline = [
+ {$lookup: {from: foreignColl.getName(), localField: "a", foreignField: "b", as: "matched"}},
+ {$sort: {_id: 1}}
+ ];
+
+ const result = localColl.aggregate(pipeline).toArray();
+ assert.eq(result.length, 1, result);
+ assert.eq(result[0].matched.length, 4, result);
+ for (let matched of result[0].matched) {
+ assert.eq(matched.padding, bigStr);
+ }
+}
+
+const oldMemoryLimit =
+ assert
+ .commandWorked(db.adminCommand({
+ getParameter: 1,
+ internalQuerySlotBasedExecutionHashLookupApproxMemoryUseInBytesBeforeSpill: 1
+ }))
+ .internalQuerySlotBasedExecutionHashLookupApproxMemoryUseInBytesBeforeSpill;
+
+try {
+ setHashLookupMemoryLimit(memoryLimit);
+ runHashLookupSpill();
+} finally {
+ setHashLookupMemoryLimit(oldMemoryLimit);
+}
+})();
diff --git a/jstests/aggregation/bugs/optimize_text.js b/jstests/aggregation/bugs/optimize_text.js
index 9dcede0c57a1c..6909003c392f9 100644
--- a/jstests/aggregation/bugs/optimize_text.js
+++ b/jstests/aggregation/bugs/optimize_text.js
@@ -4,10 +4,7 @@
// # because the shard doesn't know whether the merger needs the textScore metadata.
// assumes_unsharded_collection,
// ]
-(function() {
-'use strict';
-
-load("jstests/libs/analyze_plan.js");
+import {planHasStage} from "jstests/libs/analyze_plan.js";
const coll = db.optimize_text;
assert.commandWorked(coll.createIndex({"$**": "text"}));
@@ -37,5 +34,4 @@ assert(!planHasStage(db, aggExplain, 'TEXT_OR'), aggExplain);
// Non-blocking $text plans with just one search term do not need an OR stage, as a further
// optimization.
assert(!planHasStage(db, findSingleTermExplain, 'OR'), findSingleTermExplain);
-assert(!planHasStage(db, findSingleTermExplain, 'TEXT_OR'), findSingleTermExplain);
-})();
+assert(!planHasStage(db, findSingleTermExplain, 'TEXT_OR'), findSingleTermExplain);
\ No newline at end of file
diff --git a/jstests/aggregation/bugs/server14670.js b/jstests/aggregation/bugs/server14670.js
index adadb154da030..c8422cf9be7c0 100644
--- a/jstests/aggregation/bugs/server14670.js
+++ b/jstests/aggregation/bugs/server14670.js
@@ -1,21 +1,49 @@
-// SERVER-14670 introduced the $strLenBytes and $strLenCP aggregation expressions. In this file, we
-// test the error cases for these expressions.
-load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+/**
+ * SERVER-14670 introduced the $strLenBytes and $strLenCP aggregation expressions. In this file, we
+ * test their expected behaviour.
+ * */
(function() {
"use strict";
-var coll = db.substr;
-coll.drop();
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
-// Need an empty document for the pipeline.
-coll.insert({});
+var coll = db.substr;
+assert(coll.drop());
-assertErrorCode(coll,
- [{$project: {strLen: {$strLenBytes: 1}}}],
- 34473,
- "$strLenBytes requires a string argument.");
+assert.commandWorked(
+ coll.insert({strField: "MyString", intField: 1, nullField: null, specialCharField: "é"}));
assertErrorCode(
coll, [{$project: {strLen: {$strLenCP: 1}}}], 34471, "$strLenCP requires a string argument.");
+
+assert.eq({"strLen": 8},
+ coll.aggregate({$project: {_id: 0, strLen: {$strLenBytes: "$strField"}}}).toArray()[0]);
+
+assertErrorCode(coll,
+ [{$project: {strLen: {$strLenBytes: "$intField"}}}],
+ 5155800,
+ "$strLenBytes requires a string argument");
+
+assertErrorCode(coll,
+ [{$project: {strLen: {$strLenBytes: "$nullField"}}}],
+ 5155800,
+ "$strLenBytes requires a string argument");
+
+assertErrorCode(coll,
+ [{$project: {strLen: {$strLenBytes: "$b"}}}],
+ 5155800,
+ "$strLenBytes requires a string argument");
+
+// Checks that strLenBytes and strLenCP return different things for multi-byte characters.
+assert.eq({"strLenBytes": 2, "strLenCP": 1},
+ coll.aggregate({
+ $project: {
+ _id: 0,
+ strLenBytes: {$strLenBytes: "$specialCharField"},
+ strLenCP: {$strLenCP: "$specialCharField"}
+ }
+ })
+ .toArray()[0]);
}());
diff --git a/jstests/aggregation/bugs/server22093.js b/jstests/aggregation/bugs/server22093.js
index 1adc279571d75..65430a4673ff0 100644
--- a/jstests/aggregation/bugs/server22093.js
+++ b/jstests/aggregation/bugs/server22093.js
@@ -11,10 +11,7 @@
// assumes_unsharded_collection,
// do_not_wrap_aggregations_in_facets,
// ]
-load('jstests/libs/analyze_plan.js');
-
-(function() {
-"use strict";
+import {aggPlanHasStage, getAggPlanStage, planHasStage} from "jstests/libs/analyze_plan.js";
var coll = db.countscan;
coll.drop();
@@ -32,10 +29,20 @@ var simpleGroup = coll.aggregate([{$group: {_id: null, count: {$sum: 1}}}]).toAr
assert.eq(simpleGroup.length, 1);
assert.eq(simpleGroup[0]["count"], 15);
+// Retrieve the query plain from explain, whose shape varies depending on the query and the
+// engines used (classic/sbe).
+const getQueryPlan = function(explain) {
+ if (explain.stages) {
+ explain = explain.stages[0].$cursor;
+ }
+ let winningPlan = explain.queryPlanner.winningPlan;
+ return winningPlan.queryPlan ? winningPlan.queryPlan : winningPlan;
+};
+
var explained =
coll.explain().aggregate([{$match: {foo: {$gt: 0}}}, {$group: {_id: null, count: {$sum: 1}}}]);
-assert(planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
+assert(planHasStage(db, getQueryPlan(explained), "COUNT_SCAN"));
explained = coll.explain().aggregate([
{$match: {foo: {$gt: 0}}},
@@ -43,15 +50,15 @@ explained = coll.explain().aggregate([
{$group: {_id: null, count: {$sum: 1}}}
]);
-assert(planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
+assert(planHasStage(db, getQueryPlan(explained), "COUNT_SCAN"));
// Make sure a $count stage can use the COUNT_SCAN optimization.
explained = coll.explain().aggregate([{$match: {foo: {$gt: 0}}}, {$count: "count"}]);
-assert(planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
+assert(planHasStage(db, getQueryPlan(explained), "COUNT_SCAN"));
// A $match that is not a single range cannot use the COUNT_SCAN optimization.
explained = coll.explain().aggregate([{$match: {foo: {$in: [0, 1]}}}, {$count: "count"}]);
-assert(!planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
+assert(!planHasStage(db, getQueryPlan(explained), "COUNT_SCAN"));
// Test that COUNT_SCAN can be used when there is a $sort.
explained = coll.explain().aggregate([{$sort: {foo: 1}}, {$count: "count"}]);
@@ -67,8 +74,8 @@ assert.eq(true, countScan.indexBounds.startKeyInclusive, explained);
assert.eq({foo: MaxKey}, countScan.indexBounds.endKey, explained);
assert.eq(true, countScan.indexBounds.endKeyInclusive, explained);
-// Test that the inclusivity/exclusivity of the index bounds for COUNT_SCAN are correct when there
-// is a $sort in the opposite direction of the index.
+// Test that the inclusivity/exclusivity of the index bounds for COUNT_SCAN are correct when
+// there is a $sort in the opposite direction of the index.
explained = coll.explain().aggregate(
[{$match: {foo: {$gte: 0, $lt: 10}}}, {$sort: {foo: -1}}, {$count: "count"}]);
countScan = getAggPlanStage(explained, "COUNT_SCAN");
@@ -77,4 +84,3 @@ assert.eq({foo: 0}, countScan.indexBounds.startKey, explained);
assert.eq(true, countScan.indexBounds.startKeyInclusive, explained);
assert.eq({foo: 10}, countScan.indexBounds.endKey, explained);
assert.eq(false, countScan.indexBounds.endKeyInclusive, explained);
-}());
diff --git a/jstests/aggregation/bugs/server4638.js b/jstests/aggregation/bugs/server4638.js
index ee6f7cfd6df1e..1b0fe18f0dd2a 100644
--- a/jstests/aggregation/bugs/server4638.js
+++ b/jstests/aggregation/bugs/server4638.js
@@ -1,13 +1,13 @@
// SERVER-4638 - this tests explicit undefined values
// This case is marked as a dup of SERVER-4674
-t = db.server4638;
+let t = db.server4638;
t.drop();
t.insert({_id: 0, x: 0, undef: undefined});
// Make sure having an undefined doesn't break pipelines not using the field
-res = t.aggregate({$project: {x: 1}}).toArray();
+let res = t.aggregate({$project: {x: 1}}).toArray();
assert.eq(res[0].x, 0);
// Make sure having an undefined doesn't break pipelines that do use the field
diff --git a/jstests/aggregation/bugs/server4656.js b/jstests/aggregation/bugs/server4656.js
index 185f74bec548c..f0451a5c3f548 100644
--- a/jstests/aggregation/bugs/server4656.js
+++ b/jstests/aggregation/bugs/server4656.js
@@ -3,7 +3,7 @@
var c = db.c;
c.drop();
-NUM_OBJS = 100;
+let NUM_OBJS = 100;
var randoms = {};
function generateRandom() {
diff --git a/jstests/aggregation/bugs/server4738.js b/jstests/aggregation/bugs/server4738.js
index 7a482ab0042bb..e1ad3fead386d 100644
--- a/jstests/aggregation/bugs/server4738.js
+++ b/jstests/aggregation/bugs/server4738.js
@@ -1,5 +1,5 @@
// test to make sure we accept all numeric types for inclusion
-c = db.blah;
+let c = db.blah;
c.drop();
c.save({key: 4, v: 3, x: 2});
diff --git a/jstests/aggregation/bugs/server6120.js b/jstests/aggregation/bugs/server6120.js
index c66b296a5a7a7..d6ca0a129fdd1 100644
--- a/jstests/aggregation/bugs/server6120.js
+++ b/jstests/aggregation/bugs/server6120.js
@@ -1,6 +1,6 @@
// Value::coerceToBool() is consistent with BSONElement::trueValue(). SERVER-6120
-t = db.jstests_aggregation_server6120;
+let t = db.jstests_aggregation_server6120;
t.drop();
t.save({object: {a: 1}});
diff --git a/jstests/aggregation/bugs/server6125.js b/jstests/aggregation/bugs/server6125.js
index 6414a2eae4581..00e6c9530fe2a 100644
--- a/jstests/aggregation/bugs/server6125.js
+++ b/jstests/aggregation/bugs/server6125.js
@@ -10,7 +10,7 @@
// to make results array nested (problem 2)
function nestArray(nstArray) {
- for (x = 0; x < nstArray.length; x++) {
+ for (let x = 0; x < nstArray.length; x++) {
nstArray[x].a = {b: nstArray[x].a};
}
}
diff --git a/jstests/aggregation/bugs/server6131.js b/jstests/aggregation/bugs/server6131.js
index 640eea2723e17..67f662e10101c 100644
--- a/jstests/aggregation/bugs/server6131.js
+++ b/jstests/aggregation/bugs/server6131.js
@@ -1,6 +1,6 @@
// $unwind applied to an empty array field drops the field from the source document. SERVER-6131
-t = db.jstests_aggregation_server6131;
+let t = db.jstests_aggregation_server6131;
t.drop();
function assertAggregationResults(expected, aggregation) {
diff --git a/jstests/aggregation/bugs/server6181.js b/jstests/aggregation/bugs/server6181.js
index d48a5dbfe02b2..d894962015665 100644
--- a/jstests/aggregation/bugs/server6181.js
+++ b/jstests/aggregation/bugs/server6181.js
@@ -1,11 +1,11 @@
// SERVER-6181 Correctly support an expression for _id
-c = db.c;
+let c = db.c;
c.drop();
c.save({a: 2});
-res = c.aggregate({$project: {_id: '$a'}});
+let res = c.aggregate({$project: {_id: '$a'}});
assert.eq(res.toArray(), [{_id: 2}]);
res = c.aggregate({$project: {_id: {$add: [1, '$a']}}});
diff --git a/jstests/aggregation/bugs/server6184.js b/jstests/aggregation/bugs/server6184.js
index bc2ce8c0f675f..8e18e3efb46bc 100644
--- a/jstests/aggregation/bugs/server6184.js
+++ b/jstests/aggregation/bugs/server6184.js
@@ -1,13 +1,13 @@
// SERVER-6184 Support mixing nested and dotted fields with common prefixes
// @tags: [tests_projection_field_order]
-c = db.c;
+let c = db.c;
c.drop();
c.save({a: 'missing', b: {c: 'bar', a: 'baz', z: 'not there'}});
function test(projection) {
- res = c.aggregate({$project: projection});
+ let res = c.aggregate({$project: projection});
assert.eq(res.toArray()[0], {b: {c: 'bar', a: 'baz'}});
}
@@ -16,8 +16,6 @@ test({_id: 0, 'b.c': 1, b: {a: 1}});
// Synthetic fields should be in the order they appear in the $project
-one = {
- $add: [1]
-};
-res = c.aggregate({$project: {_id: 0, 'A.Z': one, A: {Y: one, A: one}, 'A.B': one}});
+let one = {$add: [1]};
+let res = c.aggregate({$project: {_id: 0, 'A.Z': one, A: {Y: one, A: one}, 'A.B': one}});
assert.eq(res.toArray()[0], {A: {Z: 1, Y: 1, A: 1, B: 1}});
diff --git a/jstests/aggregation/bugs/server6186.js b/jstests/aggregation/bugs/server6186.js
index 391643b1d2493..58bb1afcce2cd 100644
--- a/jstests/aggregation/bugs/server6186.js
+++ b/jstests/aggregation/bugs/server6186.js
@@ -1,6 +1,6 @@
// $substr returns an empty string if the position argument is out of bounds. SERVER-6186
-t = db.jstests_aggregation_server6186;
+let t = db.jstests_aggregation_server6186;
t.drop();
t.save({});
@@ -23,8 +23,8 @@ function assertSubstr(string, pos, n) {
}
function checkVariousSubstrings(string) {
- for (pos = 0; pos < 5; ++pos) {
- for (n = -2; n < 7; ++n) {
+ for (let pos = 0; pos < 5; ++pos) {
+ for (let n = -2; n < 7; ++n) {
assertSubstr(string, pos, n);
}
}
diff --git a/jstests/aggregation/bugs/server6189.js b/jstests/aggregation/bugs/server6189.js
index 13385aa0443ee..d13417da6462b 100644
--- a/jstests/aggregation/bugs/server6189.js
+++ b/jstests/aggregation/bugs/server6189.js
@@ -1,6 +1,6 @@
// server6189 - Support date operators with dates before 1970
-c = db.c;
+let c = db.c;
function test(date, testSynthetics) {
print("testing " + date);
diff --git a/jstests/aggregation/bugs/server6190.js b/jstests/aggregation/bugs/server6190.js
index 1eebb5af73c22..b750f4b2aaa76 100644
--- a/jstests/aggregation/bugs/server6190.js
+++ b/jstests/aggregation/bugs/server6190.js
@@ -3,7 +3,7 @@
load('jstests/aggregation/extras/utils.js');
load("jstests/libs/sbe_assert_error_override.js");
-t = db.jstests_aggregation_server6190;
+let t = db.jstests_aggregation_server6190;
t.drop();
t.save({});
diff --git a/jstests/aggregation/bugs/server6192_server6193.js b/jstests/aggregation/bugs/server6192_server6193.js
index 114a967598799..e38b03cdd86a3 100644
--- a/jstests/aggregation/bugs/server6192_server6193.js
+++ b/jstests/aggregation/bugs/server6192_server6193.js
@@ -11,10 +11,7 @@
// do_not_wrap_aggregations_in_facets,
// requires_pipeline_optimization,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For 'getPlanStage'.
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
const t = db.jstests_aggregation_server6192;
t.drop();
@@ -71,5 +68,4 @@ assertNotOptimized({$or: [0, '$x']});
assertNotOptimized({$and: ['$x', '$x']});
assertNotOptimized({$or: ['$x', '$x']});
assertNotOptimized({$and: ['$x']});
-assertNotOptimized({$or: ['$x']});
-}());
+assertNotOptimized({$or: ['$x']});
\ No newline at end of file
diff --git a/jstests/aggregation/bugs/server6194.js b/jstests/aggregation/bugs/server6194.js
index 53c23f60c1f48..731735d7a68c2 100644
--- a/jstests/aggregation/bugs/server6194.js
+++ b/jstests/aggregation/bugs/server6194.js
@@ -1,11 +1,9 @@
// make sure $concat doesn't optimize constants to the end
-c = db.c;
+let c = db.c;
c.drop();
c.save({x: '3'});
-project = {
- $project: {a: {$concat: ['1', {$concat: ['foo', '$x', 'bar']}, '2']}}
-};
+let project = {$project: {a: {$concat: ['1', {$concat: ['foo', '$x', 'bar']}, '2']}}};
assert.eq('1foo3bar2', c.aggregate(project).toArray()[0].a);
diff --git a/jstests/aggregation/bugs/server6195.js b/jstests/aggregation/bugs/server6195.js
index 156489e9c374a..e007429fe58e4 100644
--- a/jstests/aggregation/bugs/server6195.js
+++ b/jstests/aggregation/bugs/server6195.js
@@ -3,7 +3,7 @@
load('jstests/aggregation/extras/utils.js');
load('jstests/libs/sbe_assert_error_override.js'); // Override error-code-checking APIs.
-c = db.s6570;
+let c = db.s6570;
c.drop();
c.save({v: "$", w: ".", x: "foo", y: "bar", z: "z\0z"});
diff --git a/jstests/aggregation/bugs/server6232.js b/jstests/aggregation/bugs/server6232.js
index 21ed599af7345..73ab0e50317c4 100644
--- a/jstests/aggregation/bugs/server6232.js
+++ b/jstests/aggregation/bugs/server6232.js
@@ -5,7 +5,7 @@ db.s6232.drop();
db.s6232.save({});
// case where an empty object is evaluated
-result = db.s6232.aggregate({$project: {a: {$and: [{}]}}});
+let result = db.s6232.aggregate({$project: {a: {$and: [{}]}}});
assert.eq(result.toArray()[0].a, true);
// case where result should contain a new empty object
diff --git a/jstests/aggregation/bugs/server6238.js b/jstests/aggregation/bugs/server6238.js
index ddc29ec33d8b1..90192cb626369 100644
--- a/jstests/aggregation/bugs/server6238.js
+++ b/jstests/aggregation/bugs/server6238.js
@@ -1,7 +1,7 @@
// do not allow creation of fields with a $ prefix
load('jstests/aggregation/extras/utils.js');
-c = db.c;
+const c = db.c;
c.drop();
c.insert({a: 1});
diff --git a/jstests/aggregation/bugs/server6269.js b/jstests/aggregation/bugs/server6269.js
index c92245f619836..b6fb2f809cc8b 100644
--- a/jstests/aggregation/bugs/server6269.js
+++ b/jstests/aggregation/bugs/server6269.js
@@ -1,6 +1,6 @@
// Correct skipping behavior when $skip is applied after $unwind. SERVER-6269
-c = db.jstests_aggregation_server6269;
+let c = db.jstests_aggregation_server6269;
c.drop();
c.save({_id: 0, a: [1, 2, 3]});
diff --git a/jstests/aggregation/bugs/server6275.js b/jstests/aggregation/bugs/server6275.js
index 39feeb2552ee0..dccba755d5073 100644
--- a/jstests/aggregation/bugs/server6275.js
+++ b/jstests/aggregation/bugs/server6275.js
@@ -1,5 +1,5 @@
// confirm that undefined no longer counts as 0 in $avg
-c = db.c;
+let c = db.c;
c.drop();
c.save({a: 1});
c.save({a: 4});
diff --git a/jstests/aggregation/bugs/server6468.js b/jstests/aggregation/bugs/server6468.js
index 09515c746fafb..ab5a89d5ca2e4 100644
--- a/jstests/aggregation/bugs/server6468.js
+++ b/jstests/aggregation/bugs/server6468.js
@@ -1,11 +1,11 @@
// SERVER-6468 nested and dotted projections should be treated the same
-c = db.c;
+let c = db.c;
c.drop();
c.save({a: 'foo', b: {c: 'bar', z: 'not there'}});
function test(projection) {
- res = c.aggregate({$project: projection});
+ let res = c.aggregate({$project: projection});
assert.eq(res.toArray()[0], {b: {c: 'bar'}});
}
diff --git a/jstests/aggregation/bugs/server6531.js b/jstests/aggregation/bugs/server6531.js
index 5f5ebee3836d6..cc6d05079c9c4 100644
--- a/jstests/aggregation/bugs/server6531.js
+++ b/jstests/aggregation/bugs/server6531.js
@@ -1,6 +1,6 @@
// SERVER-6531 support $within in $match aggregation operations
-c = db.s6531;
+let c = db.s6531;
c.drop();
for (var x = 0; x < 10; x++) {
@@ -10,10 +10,10 @@ for (var x = 0; x < 10; x++) {
}
function test(variant) {
- query = {loc: {$within: {$center: [[5, 5], 3]}}};
- sort = {_id: 1};
- aggOut = c.aggregate({$match: query}, {$sort: sort});
- cursor = c.find(query).sort(sort);
+ let query = {loc: {$within: {$center: [[5, 5], 3]}}};
+ let sort = {_id: 1};
+ let aggOut = c.aggregate({$match: query}, {$sort: sort});
+ let cursor = c.find(query).sort(sort);
assert.eq(aggOut.toArray(), cursor.toArray());
}
diff --git a/jstests/aggregation/bugs/server6556.js b/jstests/aggregation/bugs/server6556.js
index a6d1a0c483765..ef31a8111d1a8 100644
--- a/jstests/aggregation/bugs/server6556.js
+++ b/jstests/aggregation/bugs/server6556.js
@@ -1,6 +1,6 @@
// ensure strings containing null characters dont end at that null
-c = db.s6556;
+let c = db.s6556;
c.drop();
c.save({foo: "as\0df"});
diff --git a/jstests/aggregation/bugs/server6570.js b/jstests/aggregation/bugs/server6570.js
index b12a83967bab0..c423a1c60c758 100644
--- a/jstests/aggregation/bugs/server6570.js
+++ b/jstests/aggregation/bugs/server6570.js
@@ -2,7 +2,7 @@
load('jstests/aggregation/extras/utils.js');
load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
-c = db.s6570;
+let c = db.s6570;
c.drop();
c.save({x: 17, y: "foo"});
diff --git a/jstests/aggregation/bugs/server6861.js b/jstests/aggregation/bugs/server6861.js
index e6748bd07da68..ee012e705c941 100644
--- a/jstests/aggregation/bugs/server6861.js
+++ b/jstests/aggregation/bugs/server6861.js
@@ -2,7 +2,7 @@
// SERVER-6861
load('jstests/aggregation/extras/utils.js');
-t = db.jstests_server6861;
+let t = db.jstests_server6861;
t.drop();
t.save({a: 1});
diff --git a/jstests/aggregation/bugs/server72651.js b/jstests/aggregation/bugs/server72651.js
index b4100bdc32c4f..e1f0b57ffcd1f 100644
--- a/jstests/aggregation/bugs/server72651.js
+++ b/jstests/aggregation/bugs/server72651.js
@@ -9,4 +9,4 @@ assert.commandWorked(c.insert({_id: 0, a: 1}));
assert.eq(
[],
c.aggregate([{$project: {"b": 1}}, {$match: {$expr: {$getField: {$literal: "a"}}}}]).toArray());
-})();
\ No newline at end of file
+})();
diff --git a/jstests/aggregation/bugs/server75670.js b/jstests/aggregation/bugs/server75670.js
index 932c04d7b4db8..60bdcbb48d265 100644
--- a/jstests/aggregation/bugs/server75670.js
+++ b/jstests/aggregation/bugs/server75670.js
@@ -27,4 +27,4 @@ assert(resultsEq(
])
.toArray(),
));
-})();
\ No newline at end of file
+})();
diff --git a/jstests/aggregation/bugs/server7768.js b/jstests/aggregation/bugs/server7768.js
index b7ce2669e3adb..e33739ea2416f 100644
--- a/jstests/aggregation/bugs/server7768.js
+++ b/jstests/aggregation/bugs/server7768.js
@@ -20,4 +20,4 @@ let res = db.runCommand({
assert.commandWorked(res);
assert.eq(res.cursor.firstBatch, [{foo: 1}]);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/aggregation/bugs/server7900.js b/jstests/aggregation/bugs/server7900.js
index 20bf085c7a1d9..103ebd0b7260d 100644
--- a/jstests/aggregation/bugs/server7900.js
+++ b/jstests/aggregation/bugs/server7900.js
@@ -1,10 +1,10 @@
// server-7900 - $sort + $limit ignores limit when using index for sort
-c = db.s7900;
+let c = db.s7900;
c.drop();
for (var i = 0; i < 5; i++)
c.insert({_id: i});
-res = c.aggregate({$sort: {_id: -1}}, {$limit: 2}); // uses index for sort
+let res = c.aggregate({$sort: {_id: -1}}, {$limit: 2}); // uses index for sort
assert.eq(res.toArray(), [{_id: 4}, {_id: 3}]);
diff --git a/jstests/aggregation/bugs/server8581.js b/jstests/aggregation/bugs/server8581.js
index ae616cb21d316..0045425d39723 100644
--- a/jstests/aggregation/bugs/server8581.js
+++ b/jstests/aggregation/bugs/server8581.js
@@ -1,7 +1,7 @@
// Check $redact pipeline stage.
load('jstests/aggregation/extras/utils.js');
-t = db.jstests_aggregation_redact;
+let t = db.jstests_aggregation_redact;
t.drop();
// this document will always be present but its content will change
@@ -42,15 +42,15 @@ t.save({
level: 4,
});
-a1 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 1]}, "$$DESCEND", "$$PRUNE"]}});
-a2 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 2]}, "$$DESCEND", "$$PRUNE"]}});
-a3 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 3]}, "$$DESCEND", "$$PRUNE"]}});
-a4 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 4]}, "$$DESCEND", "$$PRUNE"]}});
-a5 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 5]}, "$$DESCEND", "$$PRUNE"]}});
+let a1 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 1]}, "$$DESCEND", "$$PRUNE"]}});
+let a2 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 2]}, "$$DESCEND", "$$PRUNE"]}});
+let a3 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 3]}, "$$DESCEND", "$$PRUNE"]}});
+let a4 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 4]}, "$$DESCEND", "$$PRUNE"]}});
+let a5 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 5]}, "$$DESCEND", "$$PRUNE"]}});
-a1result = [{_id: 1, level: 1, l: {}, o: [], q: 14}];
+let a1result = [{_id: 1, level: 1, l: {}, o: [], q: 14}];
-a2result = [{
+let a2result = [{
_id: 1,
level: 1,
h: {
@@ -61,7 +61,7 @@ a2result = [{
q: 14
}];
-a3result = [{
+let a3result = [{
_id: 1,
level: 1,
b: {
@@ -77,7 +77,7 @@ a3result = [{
q: 14
}];
-a4result = [
+let a4result = [
{
_id: 1,
level: 1,
@@ -97,7 +97,7 @@ a4result = [
}
];
-a5result = [
+let a5result = [
{
_id: 1,
level: 1,
@@ -139,13 +139,13 @@ t.drop();
// entire document should be present at 2 and beyond
t.save({_id: 1, level: 2, b: {level: 3, c: 2}, d: {level: 1, e: 8}, f: 9});
-b1 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 1]}, "$$KEEP", "$$PRUNE"]}});
-b2 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 2]}, "$$KEEP", "$$PRUNE"]}});
-b3 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 3]}, "$$KEEP", "$$PRUNE"]}});
+let b1 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 1]}, "$$KEEP", "$$PRUNE"]}});
+let b2 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 2]}, "$$KEEP", "$$PRUNE"]}});
+let b3 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 3]}, "$$KEEP", "$$PRUNE"]}});
-b1result = [];
+let b1result = [];
-b23result = [{_id: 1, level: 2, b: {level: 3, c: 2}, d: {level: 1, e: 8}, f: 9}];
+let b23result = [{_id: 1, level: 2, b: {level: 3, c: 2}, d: {level: 1, e: 8}, f: 9}];
assert.eq(b1.toArray(), b1result);
assert.eq(b2.toArray(), b23result);
diff --git a/jstests/aggregation/bugs/skip_limit_overflow.js b/jstests/aggregation/bugs/skip_limit_overflow.js
index 2ca22a0c3e561..e5448bad3df7f 100644
--- a/jstests/aggregation/bugs/skip_limit_overflow.js
+++ b/jstests/aggregation/bugs/skip_limit_overflow.js
@@ -12,10 +12,7 @@
* requires_pipeline_optimization,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStages' and other explain helpers.
+import {aggPlanHasStage, getAggPlanStages} from "jstests/libs/analyze_plan.js";
const coll = db.server39788;
coll.drop();
@@ -217,5 +214,4 @@ testPipeline(
$skip: {path: "$skip", expectedValue: [NumberLong("9223372036854775807")]},
SKIP: {path: "skipAmount", expectedValue: [10]}
},
- ["$sort"]);
-})();
+ ["$sort"]);
\ No newline at end of file
diff --git a/jstests/aggregation/bugs/strcasecmp.js b/jstests/aggregation/bugs/strcasecmp.js
index 736e7ec0dac19..44a2668bcc435 100644
--- a/jstests/aggregation/bugs/strcasecmp.js
+++ b/jstests/aggregation/bugs/strcasecmp.js
@@ -1,6 +1,6 @@
// Aggregation $strcasecmp tests.
-t = db.jstests_aggregation_strcasecmp;
+let t = db.jstests_aggregation_strcasecmp;
t.drop();
t.save({});
diff --git a/jstests/aggregation/explain.js b/jstests/aggregation/explain.js
index 6ec350d96e56e..1cba7ca539940 100644
--- a/jstests/aggregation/explain.js
+++ b/jstests/aggregation/explain.js
@@ -1,10 +1,7 @@
// Tests the behavior of explain() when used with the aggregation pipeline.
// - Explain() should not read or modify the plan cache.
// - The result should always include serverInfo.
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js'); // For getAggPlanStage().
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
let coll = db.explain;
coll.drop();
@@ -34,5 +31,4 @@ assert.eq(null, getAggPlanStage(result, "CACHED_PLAN"));
// that this implementation also includes serverInfo.
result = coll.explain().aggregate([{$lookup: {from: 'other_coll', pipeline: [], as: 'docs'}}]);
assert(result.hasOwnProperty('serverInfo'), result);
-assert.hasFields(result.serverInfo, ['host', 'port', 'version', 'gitVersion']);
-})();
+assert.hasFields(result.serverInfo, ['host', 'port', 'version', 'gitVersion']);
\ No newline at end of file
diff --git a/jstests/aggregation/explain_limit.js b/jstests/aggregation/explain_limit.js
index 5e017f9b74e76..171e6f6e2fbf6 100644
--- a/jstests/aggregation/explain_limit.js
+++ b/jstests/aggregation/explain_limit.js
@@ -8,11 +8,8 @@
// # Implicit index creation may change the plan/engine used.
// assumes_no_implicit_index_creation,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
let coll = db.explain_limit;
@@ -100,4 +97,3 @@ checkResults({results: execLevel, verbosity: "executionStats"});
allPlansExecLevel = coll.explain("allPlansExecution").aggregate(pipeline);
checkResults({results: allPlansExecLevel, verbosity: "allPlansExecution"});
-})();
diff --git a/jstests/aggregation/explain_per_stage_exec_stats.js b/jstests/aggregation/explain_per_stage_exec_stats.js
index f7f65121a2dae..4c47ac14bf338 100644
--- a/jstests/aggregation/explain_per_stage_exec_stats.js
+++ b/jstests/aggregation/explain_per_stage_exec_stats.js
@@ -3,10 +3,8 @@
* execution time (executionTimeMillisEstimate) when explain is run with verbosities
* "executionStats" and "allPlansExecution".
*/
-(function() {
-"use strict";
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
load("jstests/libs/fixture_helpers.js"); // For isReplSet().
const coll = db.explain_per_stage_exec_stats;
@@ -156,5 +154,4 @@ assert.eq(numberOfDocsReturnedByMatchStage(coll.explain("executionStats").aggreg
}
};
checkResults(result, assertOutputBytesSize);
-})();
-}());
+})();
\ No newline at end of file
diff --git a/jstests/aggregation/explain_writing_aggs.js b/jstests/aggregation/explain_writing_aggs.js
index 67b71d017fd07..9a74584c2c371 100644
--- a/jstests/aggregation/explain_writing_aggs.js
+++ b/jstests/aggregation/explain_writing_aggs.js
@@ -8,11 +8,8 @@
* assumes_write_concern_unchanged,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos().
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStage().
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos().
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode().
let sourceColl = db.explain_writing_aggs_source;
@@ -71,5 +68,4 @@ withEachMergeMode(function({whenMatchedMode, whenNotMatchedMode}) {
assert.eq(mergeExplain.$merge.whenNotMatched, whenNotMatchedMode, mergeExplain);
assert.eq(mergeExplain.$merge.on, "_id", mergeExplain);
assert.eq(targetColl.find().itcount(), 0, explain);
-});
-}());
+});
\ No newline at end of file
diff --git a/jstests/aggregation/expressions/add.js b/jstests/aggregation/expressions/add.js
index cc074a9ae08d4..c411c3f213bab 100644
--- a/jstests/aggregation/expressions/add.js
+++ b/jstests/aggregation/expressions/add.js
@@ -1,5 +1,7 @@
(function() {
"use strict";
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and assertErrMsgContains.
+load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
// In SERVER-63012, translation of $add expression into sbe now defaults the translation of $add
// with no operands to a zero integer constant.
@@ -43,4 +45,49 @@ let addResult = coll.aggregate([{$project: {add: {$add: queryArr}}}]).toArray();
let sumResult = coll.aggregate([{$project: {sum: {$sum: queryArr}}}]).toArray();
assert.neq(addResult[0]["add"], sumResult[0]["sum"]);
assert.eq(addResult[0]["add"], arr.reduce((a, b) => a + b));
+
+assert.eq(true, coll.drop());
+// Doubles are rounded to int64 when added to Date
+assert.commandWorked(coll.insert({_id: 0, lhs: new Date(1683794065002), rhs: 0.5}));
+assert.commandWorked(coll.insert({_id: 1, lhs: new Date(1683794065002), rhs: 1.4}));
+assert.commandWorked(coll.insert({_id: 2, lhs: new Date(1683794065002), rhs: 1.5}));
+assert.commandWorked(coll.insert({_id: 3, lhs: new Date(1683794065002), rhs: 1.7}));
+// Decimals are rounded to int64, when tie rounded to even, when added to Date
+assert.commandWorked(
+ coll.insert({_id: 4, lhs: new Date(1683794065002), rhs: new NumberDecimal("1.4")}));
+assert.commandWorked(
+ coll.insert({_id: 5, lhs: new Date(1683794065002), rhs: new NumberDecimal("1.5")}));
+assert.commandWorked(
+ coll.insert({_id: 6, lhs: new Date(1683794065002), rhs: new NumberDecimal("1.7")}));
+assert.commandWorked(
+ coll.insert({_id: 7, lhs: new Date(1683794065002), rhs: new NumberDecimal("2.5")}));
+
+let result1 =
+ coll.aggregate([{$project: {sum: {$add: ["$lhs", "$rhs"]}}}, {$sort: {_id: 1}}]).toArray();
+assert.eq(result1[0].sum, new Date(1683794065003));
+assert.eq(result1[1].sum, new Date(1683794065003));
+assert.eq(result1[2].sum, new Date(1683794065004));
+assert.eq(result1[3].sum, new Date(1683794065004));
+assert.eq(result1[4].sum, new Date(1683794065003));
+assert.eq(result1[5].sum, new Date(1683794065004));
+assert.eq(result1[6].sum, new Date(1683794065004));
+assert.eq(result1[7].sum, new Date(1683794065004));
+
+coll.drop();
+
+assert.commandWorked(coll.insert([{
+ _id: 0,
+ veryBigPositiveLong: NumberLong("9223372036854775806"),
+ veryBigPositiveDouble: 9223372036854775806,
+ veryBigPositiveDecimal: NumberDecimal("9223372036854775806")
+}]));
+
+let pipeline = [{$project: {res: {$add: [new Date(10), "$veryBigPositiveLong"]}}}];
+assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow");
+
+pipeline = [{$project: {res: {$add: [new Date(10), "$veryBigPositiveDouble"]}}}];
+assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow");
+
+pipeline = [{$project: {res: {$add: [new Date(10), "$veryBigPositiveDecimal"]}}}];
+assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow");
}());
diff --git a/jstests/aggregation/expressions/arith_overflow.js b/jstests/aggregation/expressions/arith_overflow.js
new file mode 100644
index 0000000000000..a391224ff8469
--- /dev/null
+++ b/jstests/aggregation/expressions/arith_overflow.js
@@ -0,0 +1,31 @@
+// Tests for $add, $subtract and $multiply aggregation expression type promotion on overflow
+// @tags: [require_fcv_71]
+(function() {
+"use strict";
+
+const coll = db.arith_overflow;
+
+function runTest(operator, expectedResults) {
+ const result =
+ coll.aggregate([{$project: {res: {[operator]: ["$lhs", "$rhs"]}}}, {$sort: {_id: 1}}])
+ .toArray()
+ .map(r => r.res);
+ assert.eq(result, expectedResults);
+}
+
+// $add
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, lhs: NumberInt(2e+9), rhs: NumberInt(2e+9)}));
+assert.commandWorked(coll.insert({_id: 1, lhs: NumberLong(9e+18), rhs: NumberLong(9e+18)}));
+
+runTest("$add", [NumberLong(4e+9), 1.8e+19]);
+
+// $subtract
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, lhs: NumberInt(2e+9), rhs: NumberInt(-2e+9)}));
+assert.commandWorked(coll.insert({_id: 1, lhs: NumberLong(9e+18), rhs: NumberLong(-9e+18)}));
+
+runTest("$subtract", [NumberLong(4e+9), 1.8e+19]);
+// $multiply uses same arguments
+runTest("$multiply", [NumberLong(-4e+18), -8.1e+37]);
+}());
diff --git a/jstests/aggregation/expressions/array_expression.js b/jstests/aggregation/expressions/array_expression.js
new file mode 100644
index 0000000000000..8eb9c06c0e274
--- /dev/null
+++ b/jstests/aggregation/expressions/array_expression.js
@@ -0,0 +1,33 @@
+// Tests for $array expression.
+(function() {
+"use strict";
+
+let coll = db.array_expr;
+coll.drop();
+
+function assertArray(expArray, ...inputs) {
+ assert(coll.drop());
+ if (inputs.length == 0) {
+ assert.commandWorked(coll.insert({}));
+ } else if (inputs.length == 1) {
+ assert.commandWorked(coll.insert({a: inputs[0]}));
+ } else {
+ assert.commandWorked(coll.insert({a: inputs[0], b: inputs[1]}));
+ }
+ const result = coll.aggregate([{$project: {out: ["$a", "$b"]}}]).toArray()[0].out;
+ assert.eq(result, expArray);
+}
+
+assertArray([1, 2], 1, 2);
+assertArray([null, null], null, null);
+assertArray(["TestInput", null], "TestInput", null);
+assertArray([{a: 1, b: 2}, [1, 2]], {a: 1, b: 2}, [1, 2]);
+assertArray(["TestInput", null], "TestInput");
+assertArray([null, null]);
+
+// no arg
+assert(coll.drop());
+assert.commandWorked(coll.insert({}));
+let result = coll.aggregate([{$project: {out: []}}]).toArray()[0].out;
+assert.eq(result, []);
+}());
diff --git a/jstests/aggregation/expressions/collation_optimize_fetch.js b/jstests/aggregation/expressions/collation_optimize_fetch.js
new file mode 100644
index 0000000000000..4ce827aca686d
--- /dev/null
+++ b/jstests/aggregation/expressions/collation_optimize_fetch.js
@@ -0,0 +1,95 @@
+/**
+ * The combination of collation, index scan, sorting and fetching needs close consideration to
+ * ensure optimal ordering of the operations. If the collation of the query is the same as the
+ * collation of the index, the index can be used to satisfy group, sort and limiting before fetching
+ * the data to return non ICU encoded values. The before mentioned operations can operate on ICU
+ * encoded values. This testsuite analyzes the number of documents fetched from the collated
+ * collection in combination with a limit operator. This optimization was added with SERVER-63132.
+ *
+ * @tags: [
+ * requires_fcv_71,
+ * assumes_no_implicit_collection_creation_after_drop,
+ * ]
+ */
+
+(function() {
+"use strict";
+
+var results;
+const caseInsensitiveUS = {
+ locale: "en",
+ strength: 2
+};
+const caseInsensitiveDE = {
+ locale: "de_AT",
+ strength: 2
+};
+const documents = [
+ {_id: 0, a: 'A', b: 'B', c: 'A', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}},
+ {_id: 1, a: 'a', b: 'B', c: 'b', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}},
+ {_id: 2, a: 'A', b: 'B', c: 'C', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}},
+ {_id: 3, a: 'a', b: 'B', c: 'D', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}},
+ {_id: 4, a: 'A', b: 'B', c: 'e', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}},
+ {_id: 5, a: 'a', b: 'b', c: 'F', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}},
+ {_id: 6, a: 'A', b: 'b', c: 'g', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}},
+ {_id: 7, a: 'a', b: 'b', c: 'H', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}},
+ {_id: 8, a: 'A', b: 'b', c: 'I', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}},
+ {_id: 9, a: 'a', b: 'b', c: 'j', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}},
+];
+const indexes = [{a: 1, b: 1, c: 1}, {a: 1, d: 1}, {"e.a": 1, "e.b": 1}];
+
+function initCollection(collectionCollation, indexCollation) {
+ db.collation_optimize_fetch.drop();
+
+ // Setup the collection.
+ assert.commandWorked(db.createCollection(
+ "collation_optimize_fetch", collectionCollation ? {collation: collectionCollation} : ""));
+
+ // Setup the indexes.
+ indexes.forEach(idx => (assert.commandWorked(db.collation_optimize_fetch.createIndex(
+ idx, indexCollation ? {collation: indexCollation} : ""))));
+
+ // Insert docs.
+ assert.commandWorked(db.collation_optimize_fetch.insert(documents));
+}
+
+function runTest(expectedDocumentCount) {
+ // Run the tests with the provided indexes.
+ assert.eq(expectedDocumentCount,
+ db.collation_optimize_fetch.explain("executionStats")
+ .find({a: 'a'})
+ .sort({c: 1})
+ .limit(5)
+ .next()
+ .executionStats.totalDocsExamined);
+ assert.eq(expectedDocumentCount,
+ db.collation_optimize_fetch.explain("executionStats")
+ .find({a: 'a'})
+ .sort({d: 1})
+ .limit(5)
+ .next()
+ .executionStats.totalDocsExamined);
+ assert.eq(expectedDocumentCount,
+ db.collation_optimize_fetch.explain("executionStats")
+ .find({"e.a": 'ae'})
+ .sort({"e.b": 1})
+ .limit(5)
+ .next()
+ .executionStats.totalDocsExamined);
+}
+
+// Only 5 documents should be fetched as the sort and limit can be satisfied by the IDX.
+initCollection(caseInsensitiveUS);
+runTest(5);
+
+// 10 documents need to be fetched as the IDX has a different collation than the query.
+initCollection(null, caseInsensitiveUS);
+runTest(10);
+
+// Two different collations on the index and collection requires to fetch all 10 documents.
+initCollection(caseInsensitiveDE, caseInsensitiveUS);
+runTest(10);
+
+// Cleanup.
+db.collation_optimize_fetch.drop();
+})();
diff --git a/jstests/aggregation/expressions/concat_arrays.js b/jstests/aggregation/expressions/concat_arrays.js
index 3c52f31cd08c9..02648550eda03 100644
--- a/jstests/aggregation/expressions/concat_arrays.js
+++ b/jstests/aggregation/expressions/concat_arrays.js
@@ -11,12 +11,9 @@
// # tests from implicit index creation suites.
// assumes_no_implicit_index_creation,
// ]
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const coll = db.projection_expr_concat_arrays;
coll.drop();
@@ -174,5 +171,4 @@ runAndAssert(["$arr1", [1, 2, 3], "$arr2"], [
[1, 2, 3],
null,
null
-]);
-}());
+]);
\ No newline at end of file
diff --git a/jstests/aggregation/expressions/date_to_string.js b/jstests/aggregation/expressions/date_to_string.js
index 5ee2873288227..3aa293287392b 100644
--- a/jstests/aggregation/expressions/date_to_string.js
+++ b/jstests/aggregation/expressions/date_to_string.js
@@ -189,7 +189,11 @@ assert.eq(
.toArray());
/* --------------------------------------------------------------------------------------- */
-/* Test that the default format is "%Y-%m-%dT%H:%M:%S.%LZ" if none specified. */
+/* Test that the default format is
+/* "%Y-%m-%dT%H:%M:%S.%LZ" if no timezone is specified or UTC is explicitly specified
+/* "%Y-%m-%dT%H:%M:%S.%L" if a non-UTC timezone is explicitly specified
+/* The last case also verifies the Daylight Savings Time change versus UTC.
+ */
coll.drop();
assert.commandWorked(coll.insert([
@@ -198,11 +202,35 @@ assert.commandWorked(coll.insert([
{_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")},
]));
+// No timezone specified. Defaults to UTC time, and the format includes the 'Z' (UTC) suffix.
assert.eq(
[
- {_id: 0, date: "2017-01-04T10:08:51.911Z"},
- {_id: 1, date: "2017-07-04T11:09:12.911Z"},
- {_id: 2, date: "2017-12-04T10:09:14.911Z"},
+ {_id: 0, date: "2017-01-04T15:08:51.911Z"},
+ {_id: 1, date: "2017-07-04T15:09:12.911Z"},
+ {_id: 2, date: "2017-12-04T15:09:14.911Z"},
+ ],
+ coll.aggregate([{$project: {date: {$dateToString: {date: "$date"}}}}, {$sort: {_id: 1}}])
+ .toArray());
+
+// UTC timezone explicitly specified. Gives UTC time, and the format includes the 'Z' (UTC) suffix.
+assert.eq(
+ [
+ {_id: 0, date: "2017-01-04T15:08:51.911Z"},
+ {_id: 1, date: "2017-07-04T15:09:12.911Z"},
+ {_id: 2, date: "2017-12-04T15:09:14.911Z"},
+ ],
+ coll.aggregate([
+ {$project: {date: {$dateToString: {date: "$date", timezone: "UTC"}}}},
+ {$sort: {_id: 1}}
+ ])
+ .toArray());
+
+// Non-UTC timezone explicitly specified. Gives the requested time, and the format omits 'Z'.
+assert.eq(
+ [
+ {_id: 0, date: "2017-01-04T10:08:51.911"},
+ {_id: 1, date: "2017-07-04T11:09:12.911"},
+ {_id: 2, date: "2017-12-04T10:09:14.911"},
],
coll.aggregate([
{$project: {date: {$dateToString: {date: "$date", timezone: "America/New_York"}}}},
diff --git a/jstests/aggregation/expressions/median_expression_approx.js b/jstests/aggregation/expressions/median_expression_approx.js
index dd05a9106ba09..92964e0754b29 100644
--- a/jstests/aggregation/expressions/median_expression_approx.js
+++ b/jstests/aggregation/expressions/median_expression_approx.js
@@ -3,7 +3,6 @@
* with the field 'p':[0.5].
* @tags: [
* requires_fcv_70,
- * featureFlagApproxPercentiles
* ]
*/
(function() {
diff --git a/jstests/aggregation/expressions/multiply.js b/jstests/aggregation/expressions/multiply.js
index da4c612ff7771..101a0e365a08b 100644
--- a/jstests/aggregation/expressions/multiply.js
+++ b/jstests/aggregation/expressions/multiply.js
@@ -55,6 +55,7 @@ const binaryTestCases = [
},
{document: {left: NumberDecimal("12.5"), right: null}, expected: null},
+ // Test null
{document: {left: null, right: NumberInt(2)}, expected: null},
{document: {left: null, right: 2.55}, expected: null},
{document: {left: null, right: NumberLong("2")}, expected: null},
diff --git a/jstests/aggregation/expressions/n_expressions.js b/jstests/aggregation/expressions/n_expressions.js
index 9798488900e13..10213dc9683fa 100644
--- a/jstests/aggregation/expressions/n_expressions.js
+++ b/jstests/aggregation/expressions/n_expressions.js
@@ -56,4 +56,4 @@ testExpr({$minN: args}, [3, 4]);
testExpr({$maxN: args}, [5, 4]);
testExpr({$firstN: args}, [3, 4]);
testExpr({$lastN: args}, [4, 5]);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/aggregation/expressions/percentile_expression_approx.js b/jstests/aggregation/expressions/percentile_expression_approx.js
index 34673489fdca4..498e864a31541 100644
--- a/jstests/aggregation/expressions/percentile_expression_approx.js
+++ b/jstests/aggregation/expressions/percentile_expression_approx.js
@@ -2,7 +2,6 @@
* Tests for the approximate percentile expression semantics.
* @tags: [
* requires_fcv_70,
- * featureFlagApproxPercentiles
* ]
*/
(function() {
@@ -12,10 +11,10 @@ load("jstests/aggregation/extras/utils.js");
const coll = db[jsTestName()];
-function testWithProject({doc, percentileSpec, expectedResult, msg}) {
+function testWithProject({doc, percentileSpec, letSpec, expectedResult, msg}) {
coll.drop();
coll.insert(doc);
- const res = coll.aggregate([{$project: {p: percentileSpec}}]).toArray();
+ const res = coll.aggregate([{$project: {p: percentileSpec}}], {let : letSpec}).toArray();
// For $percentile the result should be ordered to match the spec, so assert exact equality.
assert.eq(expectedResult, res[0].p, msg + ` result: ${tojson(res)}`);
}
@@ -102,6 +101,27 @@ testWithProject({
msg: "Multiple percentiles when single input expression resolves to a non-numeric scalar"
});
+testWithProject({
+ doc: {x: [2, 1], y: 3},
+ percentileSpec: {
+ $percentile: {
+ p: [0.5, 0.9],
+ input: {$concatArrays: ["$x", [{$add: [42, "$y"]}]]},
+ method: "approximate"
+ }
+ },
+ expectedResult: [2, 42 + 3],
+ msg: "Input as complex expression"
+});
+
+testWithProject({
+ doc: {x: [2, 3, 1]},
+ percentileSpec: {$percentile: {p: "$$ps", input: "$x", method: "approximate"}},
+ letSpec: {ps: [0.1, 0.5, 0.9]},
+ expectedResult: [1, 2, 3],
+ msg: "'p' specified as a variable"
+});
+
/**
* 'rand()' generates a uniform distribution from [0.0, 1.0] so we can check accuracy of the result
* in terms of values rather than in terms of rank.
diff --git a/jstests/aggregation/expressions/percentile_expression_syntax.js b/jstests/aggregation/expressions/percentile_expression_syntax.js
index a156b599eec7e..65d112a67e4a4 100644
--- a/jstests/aggregation/expressions/percentile_expression_syntax.js
+++ b/jstests/aggregation/expressions/percentile_expression_syntax.js
@@ -2,7 +2,6 @@
* Tests for the $percentile expression syntax.
* @tags: [
* requires_fcv_70,
- * featureFlagApproxPercentiles
* ]
*/
(function() {
@@ -15,101 +14,220 @@ coll.drop();
assert.commandWorked(coll.insert([{_id: 0, k1: 3, k2: 2, k3: "hi", k4: [1, 2, 3]}]));
-/**
- * Tests to check that invalid $percentile specifications are rejected.
- */
-function assertInvalidSyntax(percentileSpec, msg) {
- assert.commandFailed(
- coll.runCommand("aggregate", {pipeline: [{$project: {p: percentileSpec}}], cursor: {}}),
- msg);
+function assertInvalidSyntax({pSpec, letSpec, msg}) {
+ let command = {pipeline: [{$project: {p: pSpec}}], let : letSpec, cursor: {}};
+ assert.commandFailed(coll.runCommand("aggregate", command), msg);
}
-assertInvalidSyntax({$percentile: 0.5}, "Should fail if $percentile is not an object");
-
-assertInvalidSyntax({$percentile: {input: ["$k1", "$k2"], method: "approximate"}},
- "Should fail if $percentile is missing 'p' field");
-
-assertInvalidSyntax({$percentile: {p: [0.5], method: "approximate"}},
- "Should fail if $percentile is missing 'input' field");
-
-assertInvalidSyntax({$percentile: {p: [0.5], input: "$k1"}},
- "Should fail if $percentile is missing 'method' field");
+function assertValidSyntax({pSpec, letSpec, msg}) {
+ let command = {pipeline: [{$project: {p: pSpec}}], let : letSpec, cursor: {}};
+ assert.commandWorked(coll.runCommand("aggregate", command), msg);
+}
+/**
+ * Test missing or unexpected fields in $percentile spec.
+ */
assertInvalidSyntax(
- {$percentile: {p: [0.5], input: ["$k1", "$k2"], method: "approximate", extras: 42}},
- "Should fail if $percentile contains an unexpected field");
-
-assertInvalidSyntax({$percentile: {p: 0.5, input: ["$k1", "$k2"], method: "approximate"}},
- "Should fail if 'p' field in $percentile isn't array");
+ {pSpec: {$percentile: 0.5}, msg: "Should fail if $percentile is not an object"});
-assertInvalidSyntax({$percentile: {p: [], input: ["$k1", "$k2"], method: "approximate"}},
- "Should fail if 'p' field in $percentile is an empty array");
+assertInvalidSyntax({
+ pSpec: {$percentile: {input: ["$k1", "$k2"], method: "approximate"}},
+ msg: "Should fail if $percentile is missing 'p' field"
+});
-assertInvalidSyntax({$percentile: {p: [0.5], input: [], method: "approximate"}},
- "Should fail if 'input' field in $percentile is an empty array");
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5], method: "approximate"}},
+ msg: "Should fail if $percentile is missing 'input' field"
+});
-assertInvalidSyntax(
- {$percentile: {p: [0.5, "foo"], input: ["$k1", "$k2"], method: "approximate"}},
- "Should fail if 'p' field in $percentile is an array with a non-numeric element");
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5], input: "$k1"}},
+ msg: "Should fail if $percentile is missing 'method' field"
+});
-assertInvalidSyntax(
- {$percentile: {p: [0.5, 10], input: ["$k1", "$k2"], method: "approximate"}},
- "Should fail if 'p' field in $percentile is an array with any value outside of [0, 1] range");
-
-assertInvalidSyntax({$percentile: {p: [0.5, 0.7], input: ["$k1", "$k2"], method: 42}},
- "Should fail if 'method' field isn't a string");
-
-assertInvalidSyntax({$percentile: {p: [0.5, 0.7], input: ["$k1", "$k2"], method: "fancy"}},
- "Should fail if 'method' isn't one of _predefined_ strings");
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5], input: ["$k1", "$k2"], method: "approximate", extras: 42}},
+ msg: "Should fail if $percentile contains an unexpected field"
+});
/**
- * Tests for $median. $median desugars to $percentile with the field p:[0.5] added, and therefore
- * has similar syntax to $percentile.
+ * Test invalid 'p' field, specified as a constant.
*/
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: 0.5, input: ["$k1", "$k2"], method: "approximate"}},
+ msg: "Should fail if 'p' field in $percentile isn't array"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [], input: ["$k1", "$k2"], method: "approximate"}},
+ msg: "Should fail if 'p' field in $percentile is an empty array"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5, "foo"], input: ["$k1", "$k2"], method: "approximate"}},
+ msg: "Should fail if 'p' field in $percentile is an array with a non-numeric element"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5, 10], input: ["$k1", "$k2"], method: "approximate"}},
+ msg:
+ "Should fail if 'p' field in $percentile is an array with any value outside of [0, 1] range"
+});
-assertInvalidSyntax({$median: {p: [0.5], input: "$k4", method: "approximate"}},
- "Should fail if 'p' is defined");
-
-assertInvalidSyntax({$median: {method: "approximate"}},
- "Should fail if $median is missing 'input' field");
-
-assertInvalidSyntax({$median: {input: [], method: "approximate"}},
- "Should fail if $median has an empty array as its 'input' field");
+/**
+ * Test invalid 'p' field, specified as an expression.
+ */
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: ["$x"], input: ["$k1", "$k2"], method: "approximate"}},
+ msg: "'p' should not accept non-const expressions"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: {$add: [0.1, 0.5]}, input: ["$k1", "$k2"], method: "approximate"}},
+ msg: "'p' should not accept expressions that evaluate to a non-array"
+});
+
+assertInvalidSyntax({
+ pSpec: {
+ $percentile: {
+ p: {$concatArrays: [[0.01, 0.1], ["foo"]]},
+ input: ["$k1", "$k2"],
+ method: "approximate"
+ }
+ },
+ msg: "'p' should not accept expressions that evaluate to an array with non-numeric elements"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: "$$pvals", input: ["$k1", "$k2"], method: "approximate"}},
+ letSpec: {pvals: 0.5},
+ msg: "'p' should not accept variables that evaluate to a non-array"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: "$$pvals", input: ["$k1", "$k2"], method: "approximate"}},
+ letSpec: {pvals: [0.5, "foo"]},
+ msg: "'p' should not accept variables that evaluate to an array with non-numeric elements"
+});
-assertInvalidSyntax({$median: {input: ["$k1", "$k2"]}},
- "Should fail if $median is missing 'method' field");
+/**
+ * Test invalid 'method' field.
+ */
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.7], input: ["$k1", "$k2"], method: 42}},
+ msg: "$percentile should fail if 'method' field isn't a string"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.7], input: ["$k1", "$k2"], method: "fancy"}},
+ msg: "$percentile should fail if 'method' isn't one of the _predefined_ strings"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.7], input: ["$k1", "$k2"], method: "continuous"}},
+ msg: "$percentile should fail because continuous 'method' isn't supported yet"
+});
+
+assertInvalidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.7], input: ["$k1", "$k2"], method: "discrete"}},
+ msg: "$percentile should fail because discrete 'method' isn't supported yet"
+});
-assertInvalidSyntax({$median: {input: "$x", method: "approximate", extras: 42}},
- "Should fail if $median contains an unexpected field");
+/**
+ * Tests for $median.
+ */
+assertInvalidSyntax({
+ pSpec: {$median: {p: [0.5], input: "$k4", method: "approximate"}},
+ msg: "Should fail if 'p' is defined"
+});
+
+assertInvalidSyntax({
+ pSpec: {$median: {method: "approximate"}},
+ msg: "Should fail if $median is missing 'input' field"
+});
+
+assertInvalidSyntax({
+ pSpec: {$median: {input: ["$k1", "$k2"]}},
+ msg: "Should fail if $median is missing 'method' field"
+});
+
+assertInvalidSyntax({
+ pSpec: {$median: {input: "$x", method: "approximate", extras: 42}},
+ msg: "Should fail if $median contains an unexpected field"
+});
+
+assertInvalidSyntax({
+ pSpec: {$median: {input: ["$k1", "$k2"], method: "fancy"}},
+ msg: "$median should fail if 'method' isn't one of the _predefined_ strings"
+});
+
+assertInvalidSyntax({
+ pSpec: {$median: {input: ["$k1", "$k2"], method: "continuous"}},
+ msg: "$median should fail because continuous 'method' isn't supported yet"
+});
+
+assertInvalidSyntax({
+ pSpec: {$median: {input: ["$k1", "$k2"], method: "discrete"}},
+ msg: "$median should fail because discrete 'method' isn't supported yet"
+});
/**
* Test that valid $percentile specifications are accepted. The results, i.e. semantics, are
* tested elsewhere and would cover all of the cases below, we are providing them here
* nonetheless for completeness.
*/
-function assertValidSyntax(percentileSpec, msg) {
- assert.commandWorked(
- coll.runCommand("aggregate", {pipeline: [{$project: {p: percentileSpec}}], cursor: {}}),
- msg);
-}
-
-assertValidSyntax(
- {$percentile: {p: [0.0, 0.0001, 0.5, 0.995, 1.0], input: ["$k1"], method: "approximate"}},
- "Should be able to specify an array of percentiles");
-
-assertValidSyntax({$percentile: {p: [0.5, 0.9], input: ["k3"], method: "approximate"}},
- "Non-numeric expressions in input array should be gracefully ignored");
-
-assertValidSyntax({$percentile: {p: [0.5], input: "$k4", method: "approximate"}},
- "Should work if 'input' field in $percentile is a single expression");
+assertValidSyntax({
+ pSpec: {
+ $percentile:
+ {p: [0.0, 0.0001, 0.5, 0.995, 1.0], input: ["$k1", "$k2"], method: "approximate"}
+ },
+ msg: "Should be able to specify an array of percentiles"
+});
/**
- * Tests for $median. $median desugars to $percentile with the field p:[0.5] added.
+ * Test valid 'input' field (even if they don't make sense).
*/
+assertValidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.9], input: "something", method: "approximate"}},
+ msg: "Non-array 'input' field should be gracefully ignored"
+});
+
+assertValidSyntax({
+ pSpec: {$percentile: {p: [0.5], input: [], method: "approximate"}},
+ msg: "Empty array in the 'input' should be ignored"
+});
+
+assertValidSyntax({
+ pSpec: {$percentile: {p: [0.5, 0.9], input: ["k3"], method: "approximate"}},
+ msg: "Non-numeric expressions in the 'input' array should be gracefully ignored"
+});
+
+assertValidSyntax({
+ pSpec: {$percentile: {p: [0.5], input: "$k4", method: "approximate"}},
+ msg: "Should work if 'input' field in $percentile is a simple expression"
+});
+
+assertValidSyntax({
+ pSpec: {
+ $percentile: {
+ p: [0.5],
+ input: {$concatArrays: [["$k1", "$k2"], [{$add: [2, "$k1"]}], "$k4"]},
+ method: "approximate"
+ }
+ },
+ msg: "Should work if 'input' field in $percentile is a complex expression"
+});
-assertValidSyntax({$median: {input: "$k4", method: "approximate"}},
- "Simple base case for $median with single expression input field");
-
-assertValidSyntax({$median: {input: ["$k1", "$k2"], method: "approximate"}},
- "Simple base case for $median with array input field");
+/**
+ * Tests for $median.
+ */
+assertValidSyntax({
+ pSpec: {$median: {input: "$k4", method: "approximate"}},
+ msg: "Simple base case for $median with single expression input field"
+});
+
+assertValidSyntax({
+ pSpec: {$median: {input: ["$k1", "$k2"], method: "approximate"}},
+ msg: "Simple base case for $median with array input field"
+});
})();
diff --git a/jstests/aggregation/expressions/rand.js b/jstests/aggregation/expressions/rand.js
index d4d3559bc1adf..a5086b61ea756 100644
--- a/jstests/aggregation/expressions/rand.js
+++ b/jstests/aggregation/expressions/rand.js
@@ -1,11 +1,6 @@
/**
* Test the $rand expression.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStage().
-
const coll = db.expression_rand;
coll.drop();
@@ -28,5 +23,4 @@ print("Average: ", avg);
// Test certainty within 10 standard deviations.
const err = 10.0 / Math.sqrt(12.0 * N);
assert.lte(0.5 - err, avg);
-assert.gte(0.5 + err, avg);
-}());
+assert.gte(0.5 + err, avg);
\ No newline at end of file
diff --git a/jstests/aggregation/expressions/subtract.js b/jstests/aggregation/expressions/subtract.js
index 91e46c08b72ce..1be9f731cbbfd 100644
--- a/jstests/aggregation/expressions/subtract.js
+++ b/jstests/aggregation/expressions/subtract.js
@@ -1,7 +1,11 @@
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and assertErrMsgContains.
+load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
+
+// Tests for $subtract aggregation expression
(function() {
"use strict";
-const coll = db.add_coll;
+const coll = db.subtract_coll;
coll.drop();
assert.commandWorked(coll.insert({_id: 0, lhs: 1, rhs: 1}));
@@ -14,8 +18,20 @@ assert.commandWorked(
assert.commandWorked(coll.insert({_id: 5, lhs: new Date(1912392670000), rhs: 70000}));
assert.commandWorked(
coll.insert({_id: 6, lhs: new Date(1912392670000), rhs: new Date(1912392600000)}));
-assert.commandWorked(coll.insert(
- {_id: 7, lhs: NumberLong("9000000000000000000"), rhs: NumberLong("-9000000000000000000")}));
+// Doubles are rounded to int64 when subtracted from Date
+assert.commandWorked(coll.insert({_id: 7, lhs: new Date(1683794065002), rhs: 0.5}));
+assert.commandWorked(coll.insert({_id: 8, lhs: new Date(1683794065002), rhs: 1.4}));
+assert.commandWorked(coll.insert({_id: 9, lhs: new Date(1683794065002), rhs: 1.5}));
+assert.commandWorked(coll.insert({_id: 10, lhs: new Date(1683794065002), rhs: 1.7}));
+// Decimals are rounded to int64, when tie rounded to even, when subtracted from Date
+assert.commandWorked(
+ coll.insert({_id: 11, lhs: new Date(1683794065002), rhs: new NumberDecimal("1.4")}));
+assert.commandWorked(
+ coll.insert({_id: 12, lhs: new Date(1683794065002), rhs: new NumberDecimal("1.5")}));
+assert.commandWorked(
+ coll.insert({_id: 13, lhs: new Date(1683794065002), rhs: new NumberDecimal("1.7")}));
+assert.commandWorked(
+ coll.insert({_id: 14, lhs: new Date(1683794065002), rhs: new NumberDecimal("2.5")}));
const result =
coll.aggregate([{$project: {diff: {$subtract: ["$lhs", "$rhs"]}}}, {$sort: {_id: 1}}])
@@ -27,8 +43,39 @@ assert.eq(result[3].diff, 10.0);
assert.eq(result[4].diff, NumberDecimal("9990.00005"));
assert.eq(result[5].diff, new Date(1912392600000));
assert.eq(result[6].diff, 70000);
+assert.eq(result[7].diff, new Date(1683794065001));
+assert.eq(result[8].diff, new Date(1683794065001));
+assert.eq(result[9].diff, new Date(1683794065000));
+assert.eq(result[10].diff, new Date(1683794065000));
+assert.eq(result[11].diff, new Date(1683794065001));
+assert.eq(result[12].diff, new Date(1683794065000));
+assert.eq(result[13].diff, new Date(1683794065000));
+assert.eq(result[14].diff, new Date(1683794065000));
+
+// Following cases will report overflow error
+coll.drop();
+
+assert.commandWorked(coll.insert([{
+ _id: 0,
+ veryBigNegativeLong: NumberLong("-9223372036854775808"),
+ veryBigNegativeDouble: -9223372036854775808,
+ veryBigNegativeDecimal: NumberDecimal("-9223372036854775808"),
+ doubleNaN: NaN,
+ decimalNaN: NumberDecimal("NaN"),
+}]));
+
+let pipeline = [{$project: {res: {$subtract: [new Date(10), "$veryBigNegativeLong"]}}}];
+assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow");
+
+pipeline = [{$project: {res: {$subtract: [new Date(10), "$veryBigNegativeDouble"]}}}];
+assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow");
+
+pipeline = [{$project: {res: {$subtract: [new Date(10), "$veryBigNegativeDecimal"]}}}];
+assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow");
+
+pipeline = [{$project: {res: {$subtract: [new Date(-1), "$doubleNaN"]}}}];
+assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow");
-// TODO WRITING-10039 After type promotion algorithm is fixed, we need to use more strict assert
-// to check type promotion
-assert.eq(bsonWoCompare(result[7].diff, 1.8e+19), 0);
+pipeline = [{$project: {res: {$subtract: [new Date(-1), "$decimalNaN"]}}}];
+assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow");
}());
diff --git a/jstests/aggregation/expressions/trim.js b/jstests/aggregation/expressions/trim.js
index 821e15ea4a38f..472c37c15e578 100644
--- a/jstests/aggregation/expressions/trim.js
+++ b/jstests/aggregation/expressions/trim.js
@@ -5,6 +5,7 @@
"use strict";
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode, testExpression and
// testExpressionWithCollation.
+load("jstests/libs/sbe_assert_error_override.js");
const coll = db.trim_expressions;
@@ -81,10 +82,48 @@ assert.eq(
{_id: 4, proof: null},
]);
-// Test that errors are reported correctly.
-assertErrorCode(coll, [{$project: {x: {$trim: " x "}}}], 50696);
-assertErrorCode(coll, [{$project: {x: {$trim: {input: 4}}}}], 50699);
-assertErrorCode(coll, [{$project: {x: {$trim: {input: {$add: [4, 2]}}}}}], 50699);
-assertErrorCode(coll, [{$project: {x: {$trim: {input: "$_id"}}}}], 50699);
-assertErrorCode(coll, [{$project: {x: {$trim: {input: " x ", chars: "$_id"}}}}], 50700);
+// Semantically same as the tests above but non-constant input for 'chars'
+coll.drop();
+assert.commandWorked(coll.insert([
+ {_id: 0, proof: "Left as an exercise for the reader∎", extra: "∎"},
+ {_id: 1, proof: "∎∃ proof∎", extra: "∎"},
+ {
+ _id: 2,
+ proof: "Just view the problem as a continuous DAG whose elements are taylor series∎",
+ extra: "∎"
+ },
+ {_id: 3, proof: null},
+ {_id: 4},
+]));
+assert.eq(
+ coll.aggregate(
+ [{$sort: {_id: 1}}, {$project: {proof: {$rtrim: {input: "$proof", chars: "$extra"}}}}])
+ .toArray(),
+ [
+ {_id: 0, proof: "Left as an exercise for the reader"},
+ {_id: 1, proof: "∎∃ proof"},
+ {
+ _id: 2,
+ proof: "Just view the problem as a continuous DAG whose elements are taylor series"
+ },
+ {_id: 3, proof: null},
+ {_id: 4, proof: null},
+ ]);
+
+coll.drop();
+assert.commandWorked(coll.insert([
+ {_id: 0, nonObject: " x "},
+ {_id: 1, constantNum: 4},
+]));
+
+// Test that errors are reported correctly (for all of $trim, $ltrim, $rtrim).
+for (const op of ["$trim", "$ltrim", "$rtrim"]) {
+ assertErrorCode(coll, [{$project: {x: {[op]: {}}}}], 50695);
+ assertErrorCode(coll, [{$project: {x: {[op]: "$nonObject"}}}], 50696);
+ assertErrorCode(coll, [{$project: {x: {[op]: {input: "$constantNum"}}}}], 50699);
+ assertErrorCode(
+ coll, [{$project: {x: {[op]: {input: {$add: ["$constantNum", "$constantNum"]}}}}}], 50699);
+ assertErrorCode(coll, [{$project: {x: {[op]: {input: "$_id"}}}}], 50699);
+ assertErrorCode(coll, [{$project: {x: {[op]: {input: "$nonObject", chars: "$_id"}}}}], 50700);
+}
}());
diff --git a/jstests/aggregation/expressions/unary_numeric.js b/jstests/aggregation/expressions/unary_numeric.js
index 4c01bbdc750e4..90527cc4a54dc 100644
--- a/jstests/aggregation/expressions/unary_numeric.js
+++ b/jstests/aggregation/expressions/unary_numeric.js
@@ -315,4 +315,4 @@ assertErrorCode(coll, [{$project: {a: {$exp: "$x"}}}], 28765);
assertErrorCode(coll, [{$project: {a: {$log10: "$x"}}}], 28765);
assertErrorCode(coll, [{$project: {a: {$ln: "$x"}}}], 28765);
assertErrorCode(coll, [{$project: {a: {$sqrt: "$x"}}}], 28765);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/aggregation/extras/merge_helpers.js b/jstests/aggregation/extras/merge_helpers.js
index 0475f3d3d1be0..4ef88541ab6b3 100644
--- a/jstests/aggregation/extras/merge_helpers.js
+++ b/jstests/aggregation/extras/merge_helpers.js
@@ -86,4 +86,4 @@ function assertMergeSucceedsWithExpectedUniqueIndex(
// recreation in the sharded collections passthrough suites.
function dropWithoutImplicitRecreate(collName) {
db.runCommand({drop: collName});
-}
\ No newline at end of file
+}
diff --git a/jstests/aggregation/extras/window_function_helpers.js b/jstests/aggregation/extras/window_function_helpers.js
index 49e2af29c4a60..f30b5c0c1e955 100644
--- a/jstests/aggregation/extras/window_function_helpers.js
+++ b/jstests/aggregation/extras/window_function_helpers.js
@@ -1,10 +1,10 @@
load("jstests/aggregation/extras/utils.js"); // arrayEq
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
/**
* Create a collection of tickers and prices.
*/
-function seedWithTickerData(coll, docsPerTicker) {
+export function seedWithTickerData(coll, docsPerTicker) {
for (let i = 0; i < docsPerTicker; i++) {
assert.commandWorked(
coll.insert({_id: i, partIndex: i, ticker: "T1", price: (500 - i * 10)}));
@@ -14,13 +14,13 @@ function seedWithTickerData(coll, docsPerTicker) {
}
}
-function forEachPartitionCase(callback) {
+export function forEachPartitionCase(callback) {
callback(null);
callback("$ticker");
callback({$toLower: "$ticker"});
}
-const documentBounds = [
+export const documentBounds = [
["unbounded", 0],
["unbounded", -1],
["unbounded", 1],
@@ -39,7 +39,7 @@ const documentBounds = [
[-2, 3],
];
-function forEachDocumentBoundsCombo(callback) {
+export function forEachDocumentBoundsCombo(callback) {
documentBounds.forEach(function(bounds, index) {
let boundsCombo = [bounds];
for (let j = index + 1; j < documentBounds.length; j++) {
@@ -74,7 +74,7 @@ function forEachDocumentBoundsCombo(callback) {
* Note that this function assumes that the data in 'coll' has been seeded with the documents from
* the seedWithTickerData() method above.
*/
-function computeAsGroup({
+export function computeAsGroup({
coll,
partitionKey,
accumSpec,
@@ -105,7 +105,7 @@ function computeAsGroup({
/**
* Helper to calculate the correct skip based on the lowerBound given.
*/
-function calculateSkip(lowerBound, indexInPartition) {
+export function calculateSkip(lowerBound, indexInPartition) {
let skipValueToUse = 0;
if (lowerBound === "current") {
skipValueToUse = indexInPartition;
@@ -123,7 +123,7 @@ function calculateSkip(lowerBound, indexInPartition) {
/**
* Helper to calculate the correct limit based on the bounds given.
*/
-function calculateLimit(lowerBound, upperBound, indexInPartition) {
+export function calculateLimit(lowerBound, upperBound, indexInPartition) {
let limitValueToUse = "unbounded";
if (upperBound === "current") {
if (lowerBound === "unbounded") {
@@ -160,7 +160,7 @@ function calculateLimit(lowerBound, upperBound, indexInPartition) {
return limitValueToUse;
}
-function assertResultsEqual(wfRes, index, groupRes, accum) {
+export function assertResultsEqual(wfRes, index, groupRes, accum) {
// On DEBUG builds, the computed $group may be slightly different due to precision
// loss when spilling to disk.
// TODO SERVER-42616: Enable the exact check for $stdDevPop/Samp.
@@ -180,7 +180,7 @@ function assertResultsEqual(wfRes, index, groupRes, accum) {
"Window function result for index " + index + ": " + tojson(wfRes));
}
-function assertExplainResult(explainResult) {
+export function assertExplainResult(explainResult) {
const stages = getAggPlanStages(explainResult, "$_internalSetWindowFields");
for (let stage of stages) {
assert(stage.hasOwnProperty("$_internalSetWindowFields"), stage);
@@ -209,7 +209,7 @@ function assertExplainResult(explainResult) {
* Note that this function assumes that the documents in 'coll' were initialized using the
* seedWithTickerData() method above.
*/
-function testAccumAgainstGroup(coll, accum, onNoResults = null, accumArgs = "$price") {
+export function testAccumAgainstGroup(coll, accum, onNoResults = null, accumArgs = "$price") {
const accumSpec = {[accum]: accumArgs};
forEachPartitionCase(function(partition) {
documentBounds.forEach(function(bounds, index) {
diff --git a/jstests/aggregation/group_conversion_to_distinct_scan.js b/jstests/aggregation/group_conversion_to_distinct_scan.js
index 546f8238f44ec..240cd94f92e0b 100644
--- a/jstests/aggregation/group_conversion_to_distinct_scan.js
+++ b/jstests/aggregation/group_conversion_to_distinct_scan.js
@@ -18,10 +18,7 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
const coll = db.group_conversion_to_distinct_scan;
coll.drop();
@@ -1067,5 +1064,4 @@ assertPipelineResultsAndExplain({
],
expectedOutput: expectedResult,
validateExplain: (explain) => assertPlanUsesDistinctScan(explain, {_id: 1})
-});
-}());
+});
\ No newline at end of file
diff --git a/jstests/aggregation/large_bson_mid_pipeline.js b/jstests/aggregation/large_bson_mid_pipeline.js
index 0604bff86c27d..13fcbfb2a0d02 100644
--- a/jstests/aggregation/large_bson_mid_pipeline.js
+++ b/jstests/aggregation/large_bson_mid_pipeline.js
@@ -2,11 +2,6 @@
* Tests that extra-large BSON objects (>16MB) can be materialized for the '$match' stage in the
* middle of the query plan without throwing 'BSONObjectTooLarge' exception.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStage()'.
-
const testDB = db.getSiblingDB("jsTestName");
assert.commandWorked(testDB.dropDatabase());
@@ -23,5 +18,4 @@ const pipeline = [
{$project: {a: 1}}
];
-assert.doesNotThrow(() => coll.aggregate(pipeline).toArray());
-})();
+assert.doesNotThrow(() => coll.aggregate(pipeline).toArray());
\ No newline at end of file
diff --git a/jstests/aggregation/match_no_swap_rand.js b/jstests/aggregation/match_no_swap_rand.js
index 3206f6fa792e8..3aad6d86c53f3 100644
--- a/jstests/aggregation/match_no_swap_rand.js
+++ b/jstests/aggregation/match_no_swap_rand.js
@@ -10,11 +10,8 @@
* requires_pipeline_optimization,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/feature_flag_util.js");
+import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
function getWinningPlanForPipeline({coll, pipeline}) {
const explain = assert.commandWorked(coll.explain().aggregate(pipeline));
@@ -161,4 +158,3 @@ function assertScanFilterEq({coll, pipeline, filter}) {
});
}
}
-}());
diff --git a/jstests/aggregation/match_swapping_renamed_fields.js b/jstests/aggregation/match_swapping_renamed_fields.js
index 1657282e73522..7adf4aab69d32 100644
--- a/jstests/aggregation/match_swapping_renamed_fields.js
+++ b/jstests/aggregation/match_swapping_renamed_fields.js
@@ -6,10 +6,7 @@
* requires_pipeline_optimization,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getAggPlanStage, getAggPlanStages} from "jstests/libs/analyze_plan.js";
let coll = db.match_swapping_renamed_fields;
coll.drop();
@@ -203,5 +200,4 @@ explain = coll.explain().aggregate(pipeline);
// We expect that the $match stage has been split into two, since one predicate has an
// applicable rename that allows swapping, while the other does not.
let matchStages = getAggPlanStages(explain, "$match");
-assert.eq(2, matchStages.length);
-}());
+assert.eq(2, matchStages.length);
\ No newline at end of file
diff --git a/jstests/aggregation/no_output_to_system.js b/jstests/aggregation/no_output_to_system.js
index 2f966e0bea4af..594db28232cf0 100644
--- a/jstests/aggregation/no_output_to_system.js
+++ b/jstests/aggregation/no_output_to_system.js
@@ -50,4 +50,4 @@ if (!FixtureHelpers.isMongos(db)) {
// $out allows for the source collection to be the same as the destination collection.
assertErrorCode(outputToLocal, {$out: outputToLocal.getName()}, 31321);
}
-})();
\ No newline at end of file
+})();
diff --git a/jstests/aggregation/optimize_away_pipeline.js b/jstests/aggregation/optimize_away_pipeline.js
index 8dfe834018206..f4fde7b8e01ff 100644
--- a/jstests/aggregation/optimize_away_pipeline.js
+++ b/jstests/aggregation/optimize_away_pipeline.js
@@ -14,13 +14,18 @@
// requires_pipeline_optimization,
// requires_profiling,
// ]
-(function() {
-"use strict";
-
load("jstests/concurrency/fsm_workload_helpers/server_types.js"); // For isWiredTiger.
-load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers.
+
+import {
+ getPlanStages,
+ getAggPlanStage,
+ aggPlanHasStage,
+ planHasStage,
+ isAggregationPlan,
+ isQueryPlan,
+} from "jstests/libs/analyze_plan.js";
load("jstests/libs/fixture_helpers.js"); // For 'isMongos' and 'isSharded'.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const coll = db.optimize_away_pipeline;
coll.drop();
@@ -799,4 +804,3 @@ if (!FixtureHelpers.isMongos(db) && isWiredTiger(db)) {
[{op: "query", ns: view.getFullName()}, {op: "getmore", ns: view.getFullName()}]);
}
}
-}());
diff --git a/jstests/aggregation/sources/collStats/query_exec_stats.js b/jstests/aggregation/sources/collStats/query_exec_stats.js
index 76c1b46ac7cf1..fa63a65f0c278 100644
--- a/jstests/aggregation/sources/collStats/query_exec_stats.js
+++ b/jstests/aggregation/sources/collStats/query_exec_stats.js
@@ -2,8 +2,6 @@
// @tags: [
// assumes_no_implicit_collection_creation_after_drop,
// does_not_support_repeated_reads,
-// # TODO SERVER-67640: Verify 'top' and $collStats work correctly for queries in CQF.
-// cqf_incompatible,
// ]
(function() {
"use strict";
diff --git a/jstests/aggregation/sources/densify/densify_sort_opt_comparison.js b/jstests/aggregation/sources/densify/densify_sort_opt_comparison.js
index 9ad9ee37bfa0d..63de418760983 100644
--- a/jstests/aggregation/sources/densify/densify_sort_opt_comparison.js
+++ b/jstests/aggregation/sources/densify/densify_sort_opt_comparison.js
@@ -7,12 +7,7 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-load("jstests/libs/feature_flag_util.js"); // For isEnabled.
load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
load("jstests/libs/fail_point_util.js"); // For configureFailPoint
@@ -124,5 +119,4 @@ FixtureHelpers.runCommandOnEachPrimary({
}
});
assert.commandWorked(
- db.adminCommand({'configureFailPoint': 'disablePipelineOptimization', 'mode': 'off'}));
-})();
+ db.adminCommand({'configureFailPoint': 'disablePipelineOptimization', 'mode': 'off'}));
\ No newline at end of file
diff --git a/jstests/aggregation/sources/densify/densify_sort_optimization.js b/jstests/aggregation/sources/densify/densify_sort_optimization.js
index ddc18ede260ef..38c8e8c518e56 100644
--- a/jstests/aggregation/sources/densify/densify_sort_optimization.js
+++ b/jstests/aggregation/sources/densify/densify_sort_optimization.js
@@ -7,11 +7,7 @@
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/fixture_helpers.js");
-load("jstests/libs/feature_flag_util.js"); // For isEnabled.
load("jstests/aggregation/extras/utils.js"); // For getExplainedPipelineFromAggregation.
const coll = db[jsTestName()];
@@ -289,5 +285,4 @@ for (let i = 0; i < testCases.length; i++) {
assert(anyEq(result, testCases[i][1]),
"Test case " + i + " failed.\n" +
"Expected:\n" + tojson(testCases[i][1]) + "\nGot:\n" + tojson(result));
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/aggregation/sources/densify/explicit_range.js b/jstests/aggregation/sources/densify/explicit_range.js
index 842ae79d5bfb0..74a92e2b88e40 100644
--- a/jstests/aggregation/sources/densify/explicit_range.js
+++ b/jstests/aggregation/sources/densify/explicit_range.js
@@ -104,4 +104,24 @@ for (let i = 0; i < densifyUnits.length; i++) {
runDensifyRangeTest({step, bounds: [10, 45]});
}
}
+
+// Run a test where there are no documents in the range to ensure we don't generate anything before
+// the range.
+coll.drop();
+let documents = [
+ {"date": ISODate("2022-10-29T23:00:00Z")},
+];
+coll.insert(documents);
+let stage = {
+ field: "date",
+ range: {
+ step: 1,
+ unit: "month",
+ bounds: [
+ ISODate("2022-10-31T23:00:00.000Z"),
+ ISODate("2022-11-30T23:00:00.000Z"),
+ ],
+ },
+};
+testDensifyStage(stage, coll, "Ensure no docs before range");
})();
diff --git a/jstests/aggregation/sources/fill/fill.js b/jstests/aggregation/sources/fill/fill.js
index 332bc7f7e4c68..ff64803d7dcc1 100644
--- a/jstests/aggregation/sources/fill/fill.js
+++ b/jstests/aggregation/sources/fill/fill.js
@@ -6,12 +6,7 @@
* ]
*/
-(function() {
-
-"use strict";
-
load("jstests/libs/fixture_helpers.js");
-load("jstests/libs/feature_flag_util.js"); // For isEnabled.
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
const coll = db[jsTestName()];
@@ -382,5 +377,4 @@ for (let i = 0; i < testCases.length; i++) {
const result = coll.aggregate(testCases[i][0]).toArray();
assertArrayEq(
{actual: result, expected: testCases[i][1], extraErrorMsg: " during testCase " + i});
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/aggregation/sources/fill/fill_and_densify.js b/jstests/aggregation/sources/fill/fill_and_densify.js
index e374481e5a226..0e9de5d340d36 100644
--- a/jstests/aggregation/sources/fill/fill_and_densify.js
+++ b/jstests/aggregation/sources/fill/fill_and_densify.js
@@ -5,10 +5,7 @@
* ]
*/
-(function() {
-"use strict";
load("jstests/libs/fixture_helpers.js");
-load("jstests/libs/feature_flag_util.js"); // For isEnabled.
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
const coll = db[jsTestName()];
@@ -153,5 +150,4 @@ expected = [
{"part": 2, "val": 8, "toFill": 13, "possible": 6.833333333333334},
{"val": 9, "toFill": 16, "possible": 8, "part": 2}
];
-assertArrayEq({actual: result, expected: expected});
-})();
+assertArrayEq({actual: result, expected: expected});
\ No newline at end of file
diff --git a/jstests/aggregation/sources/fill/fill_parse.js b/jstests/aggregation/sources/fill/fill_parse.js
index ccb7f460b9280..e07abf626cc09 100644
--- a/jstests/aggregation/sources/fill/fill_parse.js
+++ b/jstests/aggregation/sources/fill/fill_parse.js
@@ -7,9 +7,7 @@
* ]
*/
-(function() {
load("jstests/libs/fixture_helpers.js");
-load("jstests/libs/feature_flag_util.js"); // For isEnabled.
load("jstests/aggregation/extras/utils.js"); // For anyEq and desugarSingleStageAggregation.
const coll = db[jsTestName()];
@@ -255,5 +253,4 @@ for (let i = 0; i < testCases.length; i++) {
assert(anyEq(result, testCases[i][1], false, null, "UUIDPLACEHOLDER"),
"Test case " + i + " failed.\n" +
"Expected:\n" + tojson(testCases[i][1]) + "\nGot:\n" + tojson(result));
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/aggregation/sources/graphLookup/graphlookup_rewrite.js b/jstests/aggregation/sources/graphLookup/graphlookup_rewrite.js
index 2f10862d94d38..bdbfb96373e8f 100644
--- a/jstests/aggregation/sources/graphLookup/graphlookup_rewrite.js
+++ b/jstests/aggregation/sources/graphLookup/graphlookup_rewrite.js
@@ -6,42 +6,31 @@
// do_not_wrap_aggregations_in_facets,
// requires_pipeline_optimization,
// ]
-(function() {
-"use strict";
-
load('jstests/aggregation/extras/utils.js');
-load("jstests/libs/analyze_plan.js");
+import {aggPlanHasStage} from "jstests/libs/analyze_plan.js";
load("jstests/libs/fixture_helpers.js");
const coll = db.graphlookup_rewrite;
coll.drop();
assert.commandWorked(coll.insertMany([
- {"from": "a", "foo": 1},
- {"from": "b", "to": "a", "foo": 2},
- {"from": "c", "to": "b", "foo": 3},
- {"from": "d", "to": "b", "foo": 4},
- {"from": "e", "to": "c", "foo": 5},
- {"from": "f", "to": "d", "foo": 6}
+ {"_id": 1, "from": "a", "foo": 1},
+ {"_id": 2, "from": "b", "to": "a", "foo": 2},
+ {"_id": 3, "from": "c", "to": "b", "foo": 3},
+ {"_id": 4, "from": "d", "to": "b", "foo": 4},
+ {"_id": 5, "from": "e", "to": "c", "foo": 5},
+ {"_id": 6, "from": "f", "to": "d", "foo": 6}
]));
-const admin = db.getSiblingDB("admin");
-
-const setPipelineOptimizationMode = (mode) => {
- FixtureHelpers.runCommandOnEachPrimary(
- {db: admin, cmdObj: {configureFailPoint: "disablePipelineOptimization", mode}});
-};
-
-// Get initial optimization mode.
-const pipelineOptParameter = assert.commandWorked(
- db.adminCommand({getParameter: 1, "failpoint.disablePipelineOptimization": 1}));
-const oldMode =
- pipelineOptParameter["failpoint.disablePipelineOptimization"].mode ? 'alwaysOn' : 'off';
-
-function assertStagesAndOutput(
- {pipeline = [], expectedStages = [], optimizedAwayStages = [], fieldsToSkip = [], msg = ""}) {
- setPipelineOptimizationMode("off");
-
+function assertStagesAndOutput({
+ pipeline = [],
+ expectedStages = [],
+ optimizedAwayStages = [],
+ expectedOutput = [],
+ orderedArrayComparison = true,
+ fieldsToSkip = [],
+ msg = ""
+}) {
const explain = coll.explain().aggregate(pipeline);
const output = coll.aggregate(pipeline).toArray();
@@ -54,10 +43,10 @@ function assertStagesAndOutput(
`${msg}: stage ${stage} not optimized away: ${tojson(explain)}`);
}
- setPipelineOptimizationMode("alwaysOn");
-
- const expectedOutput = coll.aggregate(pipeline).toArray();
- assert(orderedArrayEq(output, expectedOutput, true, fieldsToSkip), msg);
+ const res = orderedArrayComparison
+ ? orderedArrayEq(output, expectedOutput, false, fieldsToSkip)
+ : arrayEq(output, expectedOutput, false, null /*valueComparator*/, fieldsToSkip);
+ assert(res, `actual=${tojson(output)}, expected=t${tojson(expectedOutput)}`);
}
const graphLookup = {
@@ -74,6 +63,48 @@ assertStagesAndOutput({
pipeline: [graphLookup, {$sort: {foo: 1}}],
expectedStages: ["SORT", "COLLSCAN", "$graphLookup"],
optimizedAwayStages: ["$sort"],
+ expectedOutput: [
+ {
+ "_id": 1,
+ "from": "a",
+ "foo": 1,
+ "out": [
+ {"_id": 2, "from": "b", "to": "a", "foo": 2},
+ {"_id": 3, "from": "c", "to": "b", "foo": 3},
+ {"_id": 5, "from": "e", "to": "c", "foo": 5},
+ {"_id": 6, "from": "f", "to": "d", "foo": 6},
+ {"_id": 4, "from": "d", "to": "b", "foo": 4}
+ ]
+ },
+ {
+ "_id": 2,
+ "from": "b",
+ "to": "a",
+ "foo": 2,
+ "out": [
+ {"_id": 6, "from": "f", "to": "d", "foo": 6},
+ {"_id": 3, "from": "c", "to": "b", "foo": 3},
+ {"_id": 5, "from": "e", "to": "c", "foo": 5},
+ {"_id": 4, "from": "d", "to": "b", "foo": 4}
+ ]
+ },
+ {
+ "_id": 3,
+ "from": "c",
+ "to": "b",
+ "foo": 3,
+ "out": [{"_id": 5, "from": "e", "to": "c", "foo": 5}]
+ },
+ {
+ "_id": 4,
+ "from": "d",
+ "to": "b",
+ "foo": 4,
+ "out": [{"_id": 6, "from": "f", "to": "d", "foo": 6}]
+ },
+ {"_id": 5, "from": "e", "to": "c", "foo": 5, "out": []},
+ {"_id": 6, "from": "f", "to": "d", "foo": 6, "out": []}
+ ],
msg: "$graphLookup should swap with $sort if there is no internal $unwind"
});
@@ -81,6 +112,49 @@ assertStagesAndOutput({
pipeline: [graphLookup, {$limit: 100}],
expectedStages: ["LIMIT", "COLLSCAN", "$graphLookup"],
optimizedAwayStages: ["$limit"],
+ orderedArrayComparison: false,
+ expectedOutput: [
+ {
+ "_id": 1,
+ "from": "a",
+ "foo": 1,
+ "out": [
+ {"_id": 2, "from": "b", "to": "a", "foo": 2},
+ {"_id": 3, "from": "c", "to": "b", "foo": 3},
+ {"_id": 5, "from": "e", "to": "c", "foo": 5},
+ {"_id": 6, "from": "f", "to": "d", "foo": 6},
+ {"_id": 4, "from": "d", "to": "b", "foo": 4}
+ ]
+ },
+ {
+ "_id": 2,
+ "from": "b",
+ "to": "a",
+ "foo": 2,
+ "out": [
+ {"_id": 5, "from": "e", "to": "c", "foo": 5},
+ {"_id": 3, "from": "c", "to": "b", "foo": 3},
+ {"_id": 6, "from": "f", "to": "d", "foo": 6},
+ {"_id": 4, "from": "d", "to": "b", "foo": 4}
+ ]
+ },
+ {
+ "_id": 3,
+ "from": "c",
+ "to": "b",
+ "foo": 3,
+ "out": [{"_id": 5, "from": "e", "to": "c", "foo": 5}]
+ },
+ {
+ "_id": 4,
+ "from": "d",
+ "to": "b",
+ "foo": 4,
+ "out": [{"_id": 6, "from": "f", "to": "d", "foo": 6}]
+ },
+ {"_id": 5, "from": "e", "to": "c", "foo": 5, "out": []},
+ {"_id": 6, "from": "f", "to": "d", "foo": 6, "out": []}
+ ],
msg: "$graphLookup should swap with $limit if there is no internal $unwind"
});
@@ -88,6 +162,7 @@ assertStagesAndOutput({
pipeline: [graphLookup, {$skip: 100}],
expectedStages: ["SKIP", "COLLSCAN", "$graphLookup"],
optimizedAwayStages: ["$skip"],
+ expectedOutput: [],
msg: "$graphLookup should swap with $skip if there is no internal $unwind"
});
@@ -95,23 +170,152 @@ assertStagesAndOutput({
pipeline: [graphLookup, {$sort: {foo: 1}}, {$limit: 100}],
expectedStages: ["SORT", "COLLSCAN", "$graphLookup"],
optimizedAwayStages: ["LIMIT", "$limit"],
+ expectedOutput: [
+ {
+ "_id": 1,
+ "from": "a",
+ "foo": 1,
+ "out": [
+ {"_id": 6, "from": "f", "to": "d", "foo": 6},
+ {"_id": 2, "from": "b", "to": "a", "foo": 2},
+ {"_id": 4, "from": "d", "to": "b", "foo": 4},
+ {"_id": 5, "from": "e", "to": "c", "foo": 5},
+ {"_id": 3, "from": "c", "to": "b", "foo": 3}
+ ]
+ },
+ {
+ "_id": 2,
+ "from": "b",
+ "to": "a",
+ "foo": 2,
+ "out": [
+ {"_id": 6, "from": "f", "to": "d", "foo": 6},
+ {"_id": 4, "from": "d", "to": "b", "foo": 4},
+ {"_id": 5, "from": "e", "to": "c", "foo": 5},
+ {"_id": 3, "from": "c", "to": "b", "foo": 3}
+ ]
+ },
+ {
+ "_id": 3,
+ "from": "c",
+ "to": "b",
+ "foo": 3,
+ "out": [{"_id": 5, "from": "e", "to": "c", "foo": 5}]
+ },
+ {
+ "_id": 4,
+ "from": "d",
+ "to": "b",
+ "foo": 4,
+ "out": [{"_id": 6, "from": "f", "to": "d", "foo": 6}]
+ },
+ {"_id": 5, "from": "e", "to": "c", "foo": 5, "out": []},
+ {"_id": 6, "from": "f", "to": "d", "foo": 6, "out": []}
+ ],
msg: "$graphLookup should swap with $limit and $sort, and $sort should absorb $limit if " +
"there is no internal $unwind"
});
assertStagesAndOutput({
- pipeline: [graphLookup, {$sort: {out: 1}}],
+ pipeline: [graphLookup, {$sort: {out: 1, foo: 1}}],
expectedStages: ["COLLSCAN", "$graphLookup", "$sort"],
+ expectedOutput: [
+ {"_id": 5, "from": "e", "to": "c", "foo": 5, "out": []},
+ {"_id": 6, "from": "f", "to": "d", "foo": 6, "out": []},
+ {
+ "_id": 1,
+ "from": "a",
+ "foo": 1,
+ "out": [
+ {"_id": 6, "from": "f", "to": "d", "foo": 6},
+ {"_id": 2, "from": "b", "to": "a", "foo": 2},
+ {"_id": 4, "from": "d", "to": "b", "foo": 4},
+ {"_id": 5, "from": "e", "to": "c", "foo": 5},
+ {"_id": 3, "from": "c", "to": "b", "foo": 3}
+ ]
+ },
+ {
+ "_id": 2,
+ "from": "b",
+ "to": "a",
+ "foo": 2,
+ "out": [
+ {"_id": 6, "from": "f", "to": "d", "foo": 6},
+ {"_id": 4, "from": "d", "to": "b", "foo": 4},
+ {"_id": 5, "from": "e", "to": "c", "foo": 5},
+ {"_id": 3, "from": "c", "to": "b", "foo": 3}
+ ]
+ },
+ {
+ "_id": 3,
+ "from": "c",
+ "to": "b",
+ "foo": 3,
+ "out": [{"_id": 5, "from": "e", "to": "c", "foo": 5}]
+ },
+ {
+ "_id": 4,
+ "from": "d",
+ "to": "b",
+ "foo": 4,
+ "out": [{"_id": 6, "from": "f", "to": "d", "foo": 6}]
+ }
+ ],
msg: "$graphLookup should not swap with $sort if sort uses fields created by $graphLookup"
});
assertStagesAndOutput({
pipeline: [graphLookup, {$unwind: "$out"}, {$sort: {foo: 1}}],
expectedStages: ["COLLSCAN", "$graphLookup", "$sort"],
+ expectedOutput: [
+ {"_id": 1, "from": "a", "foo": 1, "out": {"_id": 6, "from": "f", "to": "d", "foo": 6}},
+ {"_id": 1, "from": "a", "foo": 1, "out": {"_id": 2, "from": "b", "to": "a", "foo": 2}},
+ {"_id": 1, "from": "a", "foo": 1, "out": {"_id": 4, "from": "d", "to": "b", "foo": 4}},
+ {"_id": 1, "from": "a", "foo": 1, "out": {"_id": 5, "from": "e", "to": "c", "foo": 5}},
+ {"_id": 1, "from": "a", "foo": 1, "out": {"_id": 3, "from": "c", "to": "b", "foo": 3}},
+ {
+ "_id": 2,
+ "from": "b",
+ "to": "a",
+ "foo": 2,
+ "out": {"_id": 6, "from": "f", "to": "d", "foo": 6}
+ },
+ {
+ "_id": 2,
+ "from": "b",
+ "to": "a",
+ "foo": 2,
+ "out": {"_id": 4, "from": "d", "to": "b", "foo": 4}
+ },
+ {
+ "_id": 2,
+ "from": "b",
+ "to": "a",
+ "foo": 2,
+ "out": {"_id": 5, "from": "e", "to": "c", "foo": 5}
+ },
+ {
+ "_id": 2,
+ "from": "b",
+ "to": "a",
+ "foo": 2,
+ "out": {"_id": 3, "from": "c", "to": "b", "foo": 3}
+ },
+ {
+ "_id": 3,
+ "from": "c",
+ "to": "b",
+ "foo": 3,
+ "out": {"_id": 5, "from": "e", "to": "c", "foo": 5}
+ },
+ {
+ "_id": 4,
+ "from": "d",
+ "to": "b",
+ "foo": 4,
+ "out": {"_id": 6, "from": "f", "to": "d", "foo": 6}
+ }
+ ],
msg: "$graphLookup with an internal $unwind should not swap with $sort",
fieldsToSkip: ["out"]
-});
-
-// Reset optimization mode.
-setPipelineOptimizationMode(oldMode);
-})();
+});
\ No newline at end of file
diff --git a/jstests/aggregation/sources/group/group_large_documents.js b/jstests/aggregation/sources/group/group_large_documents.js
index 324da85e5d6c2..d15ffccedd2f7 100644
--- a/jstests/aggregation/sources/group/group_large_documents.js
+++ b/jstests/aggregation/sources/group/group_large_documents.js
@@ -34,4 +34,4 @@ for (let preventProjectPushdown of [false, true]) {
assert(arrayEq(results, [{a: 2}]),
"Pipeline:\n" + tojson(pipeline) + "Actual results:\n" + tojson(results));
}
-}());
\ No newline at end of file
+}());
diff --git a/jstests/aggregation/sources/indexStats/verify_index_stats_output.js b/jstests/aggregation/sources/indexStats/verify_index_stats_output.js
index f19ee681b60b5..c8d7300bef61d 100644
--- a/jstests/aggregation/sources/indexStats/verify_index_stats_output.js
+++ b/jstests/aggregation/sources/indexStats/verify_index_stats_output.js
@@ -110,4 +110,4 @@ let finishedOutput = coll.aggregate([{$indexStats: {}}, {$match: {name: indexNam
for (const indexStats of finishedOutput) {
assert(!indexStats.hasOwnProperty("building"), tojson(indexStats));
}
-})();
\ No newline at end of file
+})();
diff --git a/jstests/aggregation/sources/lookup/lookup_collation.js b/jstests/aggregation/sources/lookup/lookup_collation.js
index 4c23cb4a54287..1eb834a314c68 100644
--- a/jstests/aggregation/sources/lookup/lookup_collation.js
+++ b/jstests/aggregation/sources/lookup/lookup_collation.js
@@ -13,11 +13,7 @@
* 2. 'collation' option overrides local collection's collation
*/
load("jstests/aggregation/extras/utils.js"); // For anyEq.
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages, getWinningPlan.
-
-(function() {
-
-"use strict";
+import {getWinningPlan, getAggPlanStages} from "jstests/libs/analyze_plan.js";
const testDB = db.getSiblingDB(jsTestName());
assert.commandWorked(testDB.dropDatabase());
@@ -217,4 +213,3 @@ let explain;
assertIndexJoinStrategy(explain);
}
})();
-})();
diff --git a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_hj.js b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_hj.js
index c490314901387..58ad84899bf2d 100644
--- a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_hj.js
+++ b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_hj.js
@@ -1,15 +1,15 @@
/**
* Tests for $lookup with localField/foreignField syntax using hash join algorithm.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js"); // For runTests.
+import {
+ JoinAlgorithm,
+ runTests
+} from "jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping the test because it only applies to $lookup in SBE");
- return;
+ quit();
}
runTests({
@@ -17,4 +17,3 @@ runTests({
foreignColl: db.lookup_arrays_semantics_foreign_hj,
currentJoinAlgorithm: JoinAlgorithm.HJ
});
-})();
diff --git a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_inlj.js b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_inlj.js
index aca8b222acea6..fde36478984e3 100644
--- a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_inlj.js
+++ b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_inlj.js
@@ -1,11 +1,12 @@
/**
* Tests for $lookup with localField/foreignField syntax using indexed nested loop join algorithm.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js"); // For runTests and
- // runTest_*.
+import {
+ JoinAlgorithm,
+ runTest_SingleForeignRecord,
+ runTest_SingleLocalRecord,
+ runTests,
+} from "jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js";
/**
* Run the tests with sorted ascending/descending indexes.
@@ -148,4 +149,3 @@ runTests({
});
})();
})();
-})();
diff --git a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js
index b4016587fb651..2b2679ce4cfe4 100644
--- a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js
+++ b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js
@@ -15,10 +15,10 @@
*/
load("jstests/aggregation/extras/utils.js");
-load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStages()'
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
-const JoinAlgorithm = {
+export const JoinAlgorithm = {
HJ: {name: "HJ", strategy: "HashJoin"},
NLJ: {name: "NLJ", strategy: "NestedLoopJoin"},
INLJ_Asc: {name: "INLJ_Asc", indexType: 1, strategy: "IndexedLoopJoin"},
@@ -26,7 +26,7 @@ const JoinAlgorithm = {
INLJ_Hashed: {name: "INLJ_Hashed", indexType: "hashed", strategy: "IndexedLoopJoin"}
};
-function setupCollections(testConfig, localRecords, foreignRecords, foreignField) {
+export function setupCollections(testConfig, localRecords, foreignRecords, foreignField) {
const {localColl, foreignColl, currentJoinAlgorithm} = testConfig;
localColl.drop();
assert.commandWorked(localColl.insert(localRecords));
@@ -44,7 +44,7 @@ function setupCollections(testConfig, localRecords, foreignRecords, foreignField
* Checks that the expected join algorithm has been used (to sanity check that the tests do provide
* the intended coverage).
*/
-function checkJoinConfiguration(testConfig, explain) {
+export function checkJoinConfiguration(testConfig, explain) {
const {currentJoinAlgorithm} = testConfig;
const eqLookupNodes = getAggPlanStages(explain, "EQ_LOOKUP");
if (checkSBEEnabled(db)) {
@@ -66,7 +66,7 @@ function checkJoinConfiguration(testConfig, explain) {
* content of the "as" field but only that it's not empty for local records with ids in
* 'idsExpectToMatch'.
*/
-function runTest_SingleForeignRecord(
+export function runTest_SingleForeignRecord(
testConfig,
{testDescription, localRecords, localField, foreignRecord, foreignField, idsExpectedToMatch}) {
const {localColl, foreignColl, currentJoinAlgorithm} = testConfig;
@@ -115,7 +115,7 @@ function runTest_SingleForeignRecord(
* Executes $lookup with exactly one record in the local collection and checks that the "as" field
* for it contains documents with ids from `idsExpectedToMatch`.
*/
-function runTest_SingleLocalRecord(
+export function runTest_SingleLocalRecord(
testConfig,
{testDescription, localRecord, localField, foreignRecords, foreignField, idsExpectedToMatch}) {
const {localColl, foreignColl, currentJoinAlgorithm} = testConfig;
@@ -154,7 +154,7 @@ function runTest_SingleLocalRecord(
/**
* Executes $lookup and expects it to fail with the specified 'expectedErrorCode`.
*/
-function runTest_ExpectFailure(
+export function runTest_ExpectFailure(
testConfig,
{testDescription, localRecords, localField, foreignRecords, foreignField, expectedErrorCode}) {
const {localColl, foreignColl, currentJoinAlgorithm} = testConfig;
@@ -182,7 +182,7 @@ function runTest_ExpectFailure(
/**
* Tests.
*/
-function runTests(testConfig) {
+export function runTests(testConfig) {
const {localColl, foreignColl, currentJoinAlgorithm} = testConfig;
// Sanity-test that the join is configured correctly.
diff --git a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_nlj.js b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_nlj.js
index 1ed304a0a18da..e5f711b3a8697 100644
--- a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_nlj.js
+++ b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_nlj.js
@@ -1,14 +1,13 @@
/**
* Tests for $lookup with localField/foreignField syntax using nested loop join algorithm.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js"); // For runTests.
+import {
+ JoinAlgorithm,
+ runTests
+} from "jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js";
runTests({
localColl: db.lookup_arrays_semantics_local_nlj,
foreignColl: db.lookup_arrays_semantics_foreign_nlj,
currentJoinAlgorithm: JoinAlgorithm.NLJ
});
-})();
diff --git a/jstests/aggregation/sources/lookup/lookup_large_documents.js b/jstests/aggregation/sources/lookup/lookup_large_documents.js
index f645fbbdc2613..9778b04dc79ea 100644
--- a/jstests/aggregation/sources/lookup/lookup_large_documents.js
+++ b/jstests/aggregation/sources/lookup/lookup_large_documents.js
@@ -35,4 +35,4 @@ for (let preventProjectPushdown of [false, true]) {
assert(arrayEq(results, [{foo: 3}]),
"Pipeline:\n" + tojson(pipeline) + "Actual results:\n" + tojson(results));
}
-}());
\ No newline at end of file
+}());
diff --git a/jstests/aggregation/sources/lookup/lookup_numeric_field.js b/jstests/aggregation/sources/lookup/lookup_numeric_field.js
new file mode 100644
index 0000000000000..a40f68e961f79
--- /dev/null
+++ b/jstests/aggregation/sources/lookup/lookup_numeric_field.js
@@ -0,0 +1,256 @@
+// Tests that numeric field components in $lookup and $graphLookup arguments behave correctly. This
+// includes $lookup 'localField' and $graphLookup 'startsWith', 'connectFromField', and
+// 'connectToField'.
+// @tags: [
+// # Using a column scan removes the transformBy we search for.
+// assumes_no_implicit_index_creation,
+// ]
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
+
+const local = db.local;
+const foreign = db.foreign;
+
+foreign.drop();
+assert.commandWorked(foreign.insert({y: 3, z: 4}));
+
+function testFieldTraversal(pipeline, localDoc, shouldMatchDoc, prefix) {
+ local.drop();
+ assert.commandWorked(local.insert(localDoc));
+
+ // Test correctness.
+ const results = db.local.aggregate(pipeline).toArray();
+ if (shouldMatchDoc) {
+ assert.eq(results, [{count: 1}]);
+ } else {
+ assert.eq(results.length, 0);
+ }
+
+ // Look for the transformBy.
+ const explain = db.local.explain().aggregate(pipeline);
+ const projStages = [
+ ...getAggPlanStages(explain, "PROJECTION_SIMPLE"),
+ ...getAggPlanStages(explain, "PROJECTION_DEFAULT")
+ ];
+ assert.gt(projStages.length, 0, explain);
+
+ for (const projStage of projStages) {
+ // We have the stage, now make sure we have the correct projection.
+ let transform = projStage.transformBy;
+ if (transform.hasOwnProperty(prefix.join("."))) {
+ transform = transform[prefix.join(".")];
+ } else {
+ for (const field of prefix) {
+ transform = transform[field];
+ }
+ }
+ assert.eq(transform, true, explain);
+ }
+}
+
+function testLookupLocalField(localField, localDoc, shouldMatchDoc, prefix) {
+ // Some prefix of the localField argument gets pushed down to find as a "transformBy" since it's
+ // the only field we need for this pipeline.
+ // We should see
+ // {transformBy: {prefix: true, _id: false}}
+ const pipeline = [
+ {$lookup: {from: "foreign", localField: localField, foreignField: "y", as: "docs"}},
+ {$match: {"docs.0.z": 4}},
+ {$count: "count"}
+ ];
+ testFieldTraversal(pipeline, localDoc, shouldMatchDoc, prefix);
+}
+
+function testGraphLookupStartsWith(localField, localDoc, shouldMatchDoc, prefix) {
+ // Similar to the lookup transformBy case, but for $graphLookup.
+ const pipeline = [
+ {$graphLookup: {
+ from: "foreign",
+ startWith: localField,
+ connectFromField: "z",
+ connectToField: "y",
+ maxDepth: 0,
+ as: "docs"
+ }},
+ {$match: {"docs.0.z": 4}},
+ {$count: "count"}
+ ];
+ testFieldTraversal(pipeline, localDoc, shouldMatchDoc, prefix);
+}
+
+function testGraphLookupToFromField(foreignDocs, fromField, toField, expectedDocs) {
+ foreign.drop();
+ assert.commandWorked(foreign.insert(foreignDocs));
+
+ const pipeline = [
+ {$graphLookup: {
+ from: "foreign",
+ startWith: 0,
+ connectFromField: fromField,
+ connectToField: toField,
+ as: "docs"
+ }},
+ {$project: {docs: {$sortArray: {input: "$docs", sortBy: {_id: 1}}}}}
+ ];
+
+ const result = local.aggregate(pipeline).toArray();
+ assert.eq(result.length, 1);
+ assert.eq(result[0].docs, expectedDocs);
+}
+
+// Test the $lookup 'localField' field.
+{
+ // Non-numeric cases shouldn't be affected.
+ testLookupLocalField("a", {a: 3}, true, ["a"]);
+ testLookupLocalField("a", {a: 1}, false, ["a"]);
+ testLookupLocalField("a.b", {a: {b: 3}}, true, ["a", "b"]);
+ testLookupLocalField("a.b.0", {a: {b: [3]}}, true, ["a", "b"]);
+
+ // Basic numeric cases.
+ testLookupLocalField("a.0", {a: [3, 2, 1]}, true, ["a"]);
+ testLookupLocalField("a.0", {a: {"0": 3, "1": 2, "3": 1}}, true, ["a"]);
+ testLookupLocalField("a.1", {a: [3, 2, 1]}, false, ["a"]);
+ testLookupLocalField("a.3", {a: [3, 2, 1]}, false, ["a"]);
+ testLookupLocalField("b.3", {a: [3, 2, 1]}, false, ["b"]);
+
+ // Consecutive numeric fields.
+ testLookupLocalField("c.1.0", {c: [0, [3, 4, 3], [1, 2]]}, true, ["c"]);
+ testLookupLocalField("c.1.2", {c: [0, [3, 4, 3], [1, 2]]}, true, ["c"]);
+ testLookupLocalField("c.0.0", {c: [0, [3, 4, 3], [1, 2]]}, false, ["c"]);
+ testLookupLocalField("b.2.1", {a: [0, [3, 4, 3], [1, 2]]}, false, ["b"]);
+
+ // Mix numeric and regular fields.
+ testLookupLocalField("a.2.b.1", {a: [{}, {b: [2]}, {b: [1, 3]}]}, true, ["a"]);
+ testLookupLocalField("a.2.b.1", {a: {"2": {b: [1, 3]}}}, true, ["a"]);
+ testLookupLocalField("a.2.b.2", {a: [{}, {b: [2]}, {b: [1, 3]}]}, false, ["a"]);
+ testLookupLocalField("a.1.b.1", {a: [{}, {b: [2]}, {b: [1, 3]}]}, false, ["a"]);
+ testLookupLocalField("a.1.b.2", {a: [{}, {b: [2]}, {b: [1, 3]}]}, false, ["a"]);
+
+ // Test two regular fields then a numeric to make sure "transformBy" has "a.b" instead of just
+ // "a".
+ testLookupLocalField("a.b.0", {a: {b: [3]}}, true, ["a", "b"]);
+ testLookupLocalField("a.b.c.1", {a: {b: {c: [1, 3]}}}, true, ["a", "b", "c"]);
+
+ // Verify that $lookup does not treat 0-prefixed numeric fields as array indices.
+ testLookupLocalField("a.00", {a: [3]}, false, ["a"]);
+ testLookupLocalField("a.b.01", {a: {b: [1, 3]}}, false, ["a", "b"]);
+ testLookupLocalField("a.00.b", {a: [{b: 3}]}, false, ["a"]);
+
+ // Verify that $lookup always treats 0-prefixed numeric fields as field names.
+ testLookupLocalField("a.00", {a: {"00": 3}}, true, ["a"]);
+ testLookupLocalField("a.b.01", {a: {b: {"01": 3}}}, true, ["a", "b"]);
+ testLookupLocalField("a.00.b", {a: {"00": {b: 3}}}, true, ["a"]);
+
+ // Regular index fields shouldn't match "00"-type fields.
+ testLookupLocalField("a.0", {a: {"00": 3}}, false, ["a"]);
+ testLookupLocalField("a.b.1", {a: {b: {"01": 3}}}, false, ["a", "b"]);
+ testLookupLocalField("a.0.b", {a: {"00": {b: 3}}}, false, ["a"]);
+}
+
+// Test the $graphLookup 'startsWith' field.
+{
+ // Non-numeric cases shouldn't be affected.
+ testGraphLookupStartsWith("$a", {a: 3}, true, ["a"]);
+ testGraphLookupStartsWith("$a", {a: 1}, false, ["a"]);
+ testGraphLookupStartsWith("$a.b", {a: {b: 3}}, true, ["a", "b"]);
+ testGraphLookupStartsWith("$a.b.0", {a: {b: {"0": 3}}}, true, ["a", "b", "0"]);
+ testGraphLookupStartsWith("$a.b.0", {a: {b: [{"0": 3}]}}, true, ["a", "b", "0"]);
+ testGraphLookupStartsWith("$a.b.0", {a: {b: [3]}}, false, ["a", "b", "0"]);
+ testGraphLookupStartsWith("$a.0", {a: {"0": 3}}, true, ["a", "0"]);
+ testGraphLookupStartsWith("$a.0", {a: {"0": 2}}, false, ["a", "0"]);
+ testGraphLookupStartsWith("$a.0", {a: [3, 2, 1]}, false, ["a", "0"]);
+
+ // Should traverse once.
+ testGraphLookupStartsWith("$a.0", {a: [{"0": 3}]}, true, ["a", "0"]);
+ testGraphLookupStartsWith("$a.0", {a: [[{"0": 3}]]}, false, ["a", "0"]);
+
+ // Consecutive numeric fields.
+ testGraphLookupStartsWith("$c.1.0", {c: {"1": {"0": 3}}}, true, ["c", "1", "0"]);
+ testGraphLookupStartsWith("$c.1.0", {c: {"01": {"0": 3}}}, false, ["c", "1", "0"]);
+ testGraphLookupStartsWith("$c.1.0", {c: {"1": {"00": 3}}}, false, ["c", "1", "0"]);
+ testGraphLookupStartsWith("$c.1.0", {c: {"0": {"1": 3}}}, false, ["c", "1", "0"]);
+
+ // Mix numeric and regular fields.
+ testGraphLookupStartsWith("$a.2.b.1", {a: {"2": {b: {"1": 3}}}}, true, ["a", "2", "b", "1"]);
+ testGraphLookupStartsWith(
+ "$a.2.b.1", {a: [{}, {b: [2]}, {b: [1, 3]}]}, false, ["a", "2", "b", "1"]);
+
+ testGraphLookupStartsWith("$a.00", {a: {"00": 3}}, true, ["a", "00"]);
+ testGraphLookupStartsWith("$a.00", {a: [{"00": 3}]}, true, ["a", "00"]);
+ testGraphLookupStartsWith("$a.00", {a: {"00": [3]}}, true, ["a", "00"]);
+ testGraphLookupStartsWith("$a.00", {a: [{"00": [3]}]}, false, ["a", "00"]);
+ testGraphLookupStartsWith("$a.00", {a: [3]}, false, ["a", "00"]);
+}
+
+local.drop();
+foreign.drop();
+
+assert.commandWorked(local.insert({_id: 0}));
+
+// Test the $graphLookup 'connectFromField' field.
+const fromSpecs = [
+ // Finding a value of "1" should match the next document.
+ {singleField: "0", doubleField: "00", array: [1, 2]},
+ {singleField: "1", doubleField: "01", array: [2, 1]}
+];
+for (const spec of fromSpecs) {
+ // "00"-type fields should act as field names.
+ testGraphLookupToFromField([{_id: 1, to: 0, from: {[spec.doubleField]: 1}}, {_id: 2, to: 1}],
+ "from." + spec.doubleField,
+ "to",
+ [{_id: 1, to: 0, from: {[spec.doubleField]: 1}}, {_id: 2, to: 1}]);
+ // "00"-type fields should not act as an index into an array.
+ testGraphLookupToFromField([{_id: 1, to: 0, from: spec.array}, {_id: 2, to: 1}],
+ "from." + spec.doubleField,
+ "to",
+ [{_id: 1, to: 0, from: spec.array}]);
+ // Regular numeric fields should not match "00"-type fields.
+ testGraphLookupToFromField([{_id: 1, to: 0, from: {[spec.doubleField]: 1}}, {_id: 2, to: 1}],
+ "from." + spec.singleField,
+ "to",
+ [{_id: 1, to: 0, from: {[spec.doubleField]: 1}}]);
+ // Regular numeric fields can act as an array index.
+ testGraphLookupToFromField([{_id: 1, to: 0, from: spec.array}, {_id: 2, to: 1}],
+ "from." + spec.singleField,
+ "to",
+ [{_id: 1, to: 0, from: spec.array}, {_id: 2, to: 1}]);
+ // "00"-type fields should not match "0"-type field names.
+ testGraphLookupToFromField([{_id: 1, to: 0, from: {[spec.singleField]: 1}}, {_id: 2, to: 1}],
+ "from." + spec.doubleField,
+ "to",
+ [{_id: 1, to: 0, from: {[spec.singleField]: 1}}]);
+ // Regular numeric fields can match themselves as field names.
+ testGraphLookupToFromField([{_id: 1, to: 0, from: {[spec.singleField]: 1}}, {_id: 2, to: 1}],
+ "from." + spec.singleField,
+ "to",
+ [{_id: 1, to: 0, from: {[spec.singleField]: 1}}, {_id: 2, to: 1}]);
+}
+
+// Test the $graphLookup 'connectToField' field.
+const toSpecs = [
+ // Finding a value of "0" should match the document.
+ {singleField: "0", doubleField: "00", array: [0, 2]},
+ {singleField: "1", doubleField: "01", array: [2, 0]}
+];
+for (const spec of toSpecs) {
+ // "00"-type fields should act as field names.
+ testGraphLookupToFromField([{_id: 1, to: {[spec.doubleField]: 0}}],
+ "from",
+ "to." + spec.doubleField,
+ [{_id: 1, to: {[spec.doubleField]: 0}}]);
+ // "00"-type fields should not act as an index into an array.
+ testGraphLookupToFromField([{_id: 1, to: spec.array}], "from", "to." + spec.doubleField, []);
+ // Regular numeric fields should not match "00"-type fields.
+ testGraphLookupToFromField(
+ [{_id: 1, to: {[spec.doubleField]: 0}}], "from", "to." + spec.singleField, []);
+ // Regular numeric fields can act as an array index.
+ testGraphLookupToFromField(
+ [{_id: 1, to: spec.array}], "from", "to." + spec.singleField, [{_id: 1, to: spec.array}]);
+ // "00"-type fields should not match "0"-type field names.
+ testGraphLookupToFromField(
+ [{_id: 1, to: {[spec.singleField]: 0}}], "from", "to." + spec.doubleField, []);
+ // Regular numeric fields can match themselves as field names.
+ testGraphLookupToFromField([{_id: 1, to: {[spec.singleField]: 0}}],
+ "from",
+ "to." + spec.singleField,
+ [{_id: 1, to: {[spec.singleField]: 0}}]);
+}
\ No newline at end of file
diff --git a/jstests/aggregation/sources/lookup/lookup_query_stats.js b/jstests/aggregation/sources/lookup/lookup_query_stats.js
index 8c598b6c2f2d9..8f413d2652877 100644
--- a/jstests/aggregation/sources/lookup/lookup_query_stats.js
+++ b/jstests/aggregation/sources/lookup/lookup_query_stats.js
@@ -14,13 +14,12 @@
* requires_pipeline_optimization
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStages'
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/sbe_explain_helpers.js"); // For getSbePlanStages and
- // getQueryInfoAtTopLevelOrFirstStage.
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
+import {
+ getQueryInfoAtTopLevelOrFirstStage,
+ getSbePlanStages
+} from "jstests/libs/sbe_explain_helpers.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const isSBELookupEnabled = checkSBEEnabled(db);
const testDB = db.getSiblingDB("lookup_query_stats");
@@ -404,4 +403,3 @@ testQueryExecutorStatsWithIndexScan({withUnwind: false});
// taking place within the lookup stage.
testQueryExecutorStatsWithCollectionScan({withUnwind: true});
testQueryExecutorStatsWithIndexScan({withUnwind: true});
-}());
diff --git a/jstests/aggregation/sources/lookup/lookup_sort_limit.js b/jstests/aggregation/sources/lookup/lookup_sort_limit.js
index f100c4bb66c81..58f29ba9728bb 100644
--- a/jstests/aggregation/sources/lookup/lookup_sort_limit.js
+++ b/jstests/aggregation/sources/lookup/lookup_sort_limit.js
@@ -2,11 +2,6 @@
* Test that a $lookup correctly optimizes a foreign pipeline containing a $sort and a $limit. This
* test is designed to reproduce SERVER-36715.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
-
const testDB = db.getSiblingDB("lookup_sort_limit");
testDB.dropDatabase();
@@ -45,5 +40,4 @@ res = localColl
}])
.toArray();
-assert.eq({_id: 0, result: [{_id: 9, foreignField: 9}]}, res[0]);
-}());
+assert.eq({_id: 0, result: [{_id: 9, foreignField: 9}]}, res[0]);
\ No newline at end of file
diff --git a/jstests/aggregation/sources/lookup/profile_lookup.js b/jstests/aggregation/sources/lookup/profile_lookup.js
index edc94e0b99079..0b77b137a10a5 100644
--- a/jstests/aggregation/sources/lookup/profile_lookup.js
+++ b/jstests/aggregation/sources/lookup/profile_lookup.js
@@ -4,10 +4,7 @@
// requires_profiling,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages.
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
const localColl = db.local;
const foreignColl = db.foreign;
@@ -53,5 +50,4 @@ const eqLookupNodes = getAggPlanStages(localColl.explain().aggregate(pipeline),
if (eqLookupNodes.length === 0) {
expectedCount += 3;
}
-assert.eq(expectedCount, actualCount);
-}());
+assert.eq(expectedCount, actualCount);
\ No newline at end of file
diff --git a/jstests/aggregation/sources/match/trivial_match_expr.js b/jstests/aggregation/sources/match/trivial_match_expr.js
index 93be40bfeeaac..dcfd05148f498 100644
--- a/jstests/aggregation/sources/match/trivial_match_expr.js
+++ b/jstests/aggregation/sources/match/trivial_match_expr.js
@@ -10,10 +10,7 @@
// # Explicitly testing optimization.
// requires_pipeline_optimization,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan} from "jstests/libs/analyze_plan.js";
const coll = db.trivial_match_expr;
coll.drop();
@@ -85,5 +82,4 @@ const explainFind = coll.explain().find({$and: [{$expr: "foo"}, {$expr: "$foo"}]
assert.eq(getWinningPlan(explainFind.queryPlanner).filter,
{$expr: "$foo"},
"$expr truthy constant expression should be optimized away when used " +
- "in conjunction with $expr containing non-constant expression");
-})();
+ "in conjunction with $expr containing non-constant expression");
\ No newline at end of file
diff --git a/jstests/aggregation/sources/merge/merge_to_same_collection.js b/jstests/aggregation/sources/merge/merge_to_same_collection.js
index 4e90036f64636..cc2fd7940a253 100644
--- a/jstests/aggregation/sources/merge/merge_to_same_collection.js
+++ b/jstests/aggregation/sources/merge/merge_to_same_collection.js
@@ -30,4 +30,4 @@ assert.doesNotThrow(() => coll.aggregate(pipeline));
assertArrayEq(
{actual: coll.find().toArray(), expected: [{_id: 0, a: 3}, {_id: 1, a: 1}, {_id: 2, a: 2}]});
-}());
\ No newline at end of file
+}());
diff --git a/jstests/aggregation/sources/merge/merge_with_dollar_fields.js b/jstests/aggregation/sources/merge/merge_with_dollar_fields.js
new file mode 100644
index 0000000000000..b79b3a278dd6e
--- /dev/null
+++ b/jstests/aggregation/sources/merge/merge_with_dollar_fields.js
@@ -0,0 +1,135 @@
+// Tests $merge over documents with $-field in it.
+//
+// Sharded collections have special requirements on the join field.
+// @tags: [assumes_unsharded_collection]
+
+(function() {
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
+
+const sourceName = 'merge_with_dollar_fields_source';
+const source = db[sourceName];
+const targetName = 'merge_with_dollar_fields_target';
+const target = db[targetName];
+
+const joinField = 'joinField';
+const sourceDoc = {
+ $dollar: 1,
+ joinField
+};
+const targetDoc = {
+ a: 1,
+ joinField
+};
+assertDropCollection(db, sourceName);
+assert.commandWorked(source.insert(sourceDoc));
+
+function runTest({whenMatched, whenNotMatched}, targetDocs) {
+ assertDropCollection(db, targetName);
+ assert.commandWorked(target.createIndex({joinField: 1}, {unique: true}));
+ assert.commandWorked(target.insert(targetDocs));
+ source.aggregate([
+ {$project: {_id: 0}},
+ {
+ $merge: {
+ into: targetName,
+ on: joinField,
+ whenMatched,
+ whenNotMatched,
+ }
+ }
+ ]);
+ return target.findOne({}, {_id: 0});
+}
+
+function runTestMatched(mode) {
+ return runTest(mode, [targetDoc]);
+}
+
+function runTestNotMatched(mode) {
+ return runTest(mode, []);
+}
+
+// TODO: SERVER-76999: Currently $merge may throw 'FailedToParse' error due to non-local updates.
+// We should return consistent results for dollar field documents.
+
+// whenMatched: 'replace', whenNotMatched: 'insert'
+assert.throwsWithCode(() => runTestMatched({whenMatched: 'replace', whenNotMatched: 'insert'}),
+ [ErrorCodes.DollarPrefixedFieldName, ErrorCodes.FailedToParse]);
+
+try {
+ assert.docEq(sourceDoc, runTestNotMatched({whenMatched: 'replace', whenNotMatched: 'insert'}));
+} catch (error) {
+ assert.commandFailedWithCode(error, ErrorCodes.FailedToParse);
+}
+
+// whenMatched: 'replace', whenNotMatched: 'fail'
+assert.throwsWithCode(() => runTestMatched({whenMatched: 'replace', whenNotMatched: 'fail'}),
+ [ErrorCodes.DollarPrefixedFieldName, ErrorCodes.FailedToParse]);
+
+assert.throwsWithCode(() => runTestNotMatched({whenMatched: 'replace', whenNotMatched: 'fail'}),
+ [ErrorCodes.MergeStageNoMatchingDocument, ErrorCodes.FailedToParse]);
+
+// whenMatched: 'replace', whenNotMatched: 'discard'
+assert.throwsWithCode(() => runTestMatched({whenMatched: 'replace', whenNotMatched: 'discard'}),
+ [ErrorCodes.DollarPrefixedFieldName, ErrorCodes.FailedToParse]);
+
+try {
+ assert.eq(null, runTestNotMatched({whenMatched: 'replace', whenNotMatched: 'discard'}));
+} catch (error) {
+ assert.commandFailedWithCode(error, ErrorCodes.FailedToParse);
+}
+
+// whenMatched: 'merge', whenNotMatched: 'insert'
+assert.throwsWithCode(() => runTestMatched({whenMatched: 'merge', whenNotMatched: 'insert'}),
+ ErrorCodes.DollarPrefixedFieldName);
+
+assert.docEq(sourceDoc, runTestNotMatched({whenMatched: 'merge', whenNotMatched: 'insert'}));
+
+// whenMatched: 'merge', whenNotMatched: 'fail'
+assert.throwsWithCode(() => runTestMatched({whenMatched: 'merge', whenNotMatched: 'fail'}),
+ ErrorCodes.DollarPrefixedFieldName);
+
+assert.throwsWithCode(() => runTestNotMatched({whenMatched: 'merge', whenNotMatched: 'fail'}),
+ ErrorCodes.MergeStageNoMatchingDocument);
+
+// whenMatched: 'merge', whenNotMatched: 'discard'
+assert.throwsWithCode(() => runTestMatched({whenMatched: 'merge', whenNotMatched: 'discard'}),
+ ErrorCodes.DollarPrefixedFieldName);
+
+assert.eq(null, runTestNotMatched({whenMatched: 'merge', whenNotMatched: 'discard'}));
+
+// whenMatched: 'keepExisting', whenNotMatched: 'insert'
+assert.docEq(targetDoc, runTestMatched({whenMatched: 'keepExisting', whenNotMatched: 'insert'}));
+
+assert.docEq(sourceDoc, runTestNotMatched({whenMatched: 'keepExisting', whenNotMatched: 'insert'}));
+
+// whenMatched: 'fail', whenNotMatched: 'insert'
+assert.throwsWithCode(() => runTestMatched({whenMatched: 'fail', whenNotMatched: 'insert'}),
+ ErrorCodes.DuplicateKey);
+
+assert.docEq(sourceDoc, runTestNotMatched({whenMatched: 'fail', whenNotMatched: 'insert'}));
+
+// whenMatched: 'pipeline', whenNotMatched: 'insert'
+const pipeline = [{$addFields: {b: 1}}];
+const targetDocAddFields = {
+ ...targetDoc,
+ b: 1
+};
+assert.docEq(targetDocAddFields, runTestMatched({whenMatched: pipeline, whenNotMatched: 'insert'}));
+
+assert.docEq(sourceDoc, runTestNotMatched({whenMatched: pipeline, whenNotMatched: 'insert'}));
+
+// whenMatched: 'pipeline', whenNotMatched: 'fail'
+assert.docEq(targetDocAddFields, runTestMatched({whenMatched: pipeline, whenNotMatched: 'fail'}));
+
+assert.throwsWithCode(() => runTestNotMatched({whenMatched: pipeline, whenNotMatched: 'fail'}),
+ ErrorCodes.MergeStageNoMatchingDocument);
+
+// whenMatched: 'pipeline', whenNotMatched: 'discard'
+assert.docEq(targetDocAddFields,
+ runTestMatched({whenMatched: pipeline, whenNotMatched: 'discard'}));
+
+assert.eq(null, runTestNotMatched({whenMatched: pipeline, whenNotMatched: 'discard'}));
+}());
diff --git a/jstests/aggregation/sources/project/remove_redundant_projects.js b/jstests/aggregation/sources/project/remove_redundant_projects.js
index 512efdd254602..ae19b320b5435 100644
--- a/jstests/aggregation/sources/project/remove_redundant_projects.js
+++ b/jstests/aggregation/sources/project/remove_redundant_projects.js
@@ -4,12 +4,14 @@
// do_not_wrap_aggregations_in_facets,
// requires_pipeline_optimization,
// ]
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For orderedArrayEq.
-load('jstests/libs/analyze_plan.js'); // For planHasStage().
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {
+ getWinningPlan,
+ planHasStage,
+ isAggregationPlan,
+ isQueryPlan
+} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
let coll = db.remove_redundant_projects;
coll.drop();
@@ -175,5 +177,4 @@ assertResultsMatch({
index: indexSpec,
pipelineOptimizedAway: true,
removedProjectStage: {'_id.a': 1},
-});
-}());
+});
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/add_to_set.js b/jstests/aggregation/sources/setWindowFields/add_to_set.js
index a6370f4c46ef3..ebf6fbe90d38d 100644
--- a/jstests/aggregation/sources/setWindowFields/add_to_set.js
+++ b/jstests/aggregation/sources/setWindowFields/add_to_set.js
@@ -1,10 +1,10 @@
/**
* Test that $addToSet works as a window function.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
+import {
+ seedWithTickerData,
+ testAccumAgainstGroup
+} from "jstests/aggregation/extras/window_function_helpers.js";
const coll = db[jsTestName()];
coll.drop();
@@ -14,5 +14,4 @@ const nDocsPerTicker = 10;
seedWithTickerData(coll, nDocsPerTicker);
// Run the suite of partition and bounds tests against the $addToSet function.
-testAccumAgainstGroup(coll, "$addToSet", []);
-})();
+testAccumAgainstGroup(coll, "$addToSet", []);
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/avg.js b/jstests/aggregation/sources/setWindowFields/avg.js
index da5ee8925ce6c..a48b5c0114386 100644
--- a/jstests/aggregation/sources/setWindowFields/avg.js
+++ b/jstests/aggregation/sources/setWindowFields/avg.js
@@ -1,10 +1,11 @@
/**
* Test that $avg works as a window function.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
+import {
+ computeAsGroup,
+ seedWithTickerData,
+ testAccumAgainstGroup
+} from "jstests/aggregation/extras/window_function_helpers.js";
const coll = db[jsTestName()];
coll.drop();
@@ -53,5 +54,4 @@ for (let index = 0; index < results.length; index++) {
defaultValue: null
});
assert.eq(groupRes, results[index].runningAvgLead);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/count.js b/jstests/aggregation/sources/setWindowFields/count.js
index 13fbe83c537a7..89e2245704f60 100644
--- a/jstests/aggregation/sources/setWindowFields/count.js
+++ b/jstests/aggregation/sources/setWindowFields/count.js
@@ -124,4 +124,4 @@ verifyResults(result, function(num, baseObj) {
}
return baseObj;
});
-})();
\ No newline at end of file
+})();
diff --git a/jstests/aggregation/sources/setWindowFields/covariance.js b/jstests/aggregation/sources/setWindowFields/covariance.js
index 8f2b3e2319458..7dd617b39d573 100644
--- a/jstests/aggregation/sources/setWindowFields/covariance.js
+++ b/jstests/aggregation/sources/setWindowFields/covariance.js
@@ -1,10 +1,7 @@
/**
* Test that $covariance(Pop/Samp) works as a window function.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
+import {documentBounds} from "jstests/aggregation/extras/window_function_helpers.js";
const coll = db[jsTestName()];
coll.drop();
@@ -137,5 +134,4 @@ function compareCovarianceOfflineAndOnline(bounds) {
}
// Test various type of window.
-documentBounds.forEach(compareCovarianceOfflineAndOnline);
-})();
+documentBounds.forEach(compareCovarianceOfflineAndOnline);
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/exp_moving_avg.js b/jstests/aggregation/sources/setWindowFields/exp_moving_avg.js
index a7d0028583f4c..f9987b9a6daff 100644
--- a/jstests/aggregation/sources/setWindowFields/exp_moving_avg.js
+++ b/jstests/aggregation/sources/setWindowFields/exp_moving_avg.js
@@ -1,10 +1,7 @@
/**
* Test that exponential moving average works as a window function.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
+import {seedWithTickerData} from "jstests/aggregation/extras/window_function_helpers.js";
const coll = db[jsTestName()];
coll.drop();
@@ -285,5 +282,4 @@ assert.commandWorked(db.runCommand({
},
],
cursor: {},
-}));
-})();
+}));
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/explain.js b/jstests/aggregation/sources/setWindowFields/explain.js
index cae112cb9e5e9..9650493135d49 100644
--- a/jstests/aggregation/sources/setWindowFields/explain.js
+++ b/jstests/aggregation/sources/setWindowFields/explain.js
@@ -6,10 +6,7 @@
*
* @tags: [assumes_against_mongod_not_mongos]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
const coll = db[jsTestName()];
coll.drop();
@@ -213,5 +210,4 @@ function checkExplainResult(pipeline, expectedFunctionMemUsages, expectedTotalMe
checkExplainResult(pipeline, expectedFunctionMemUsages, expectedTotal, "executionStats");
checkExplainResult(pipeline, expectedFunctionMemUsages, expectedTotal, "allPlansExecution");
-})();
-}());
+})();
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/first.js b/jstests/aggregation/sources/setWindowFields/first.js
index 599bddf35cd08..93b71f0702e9d 100644
--- a/jstests/aggregation/sources/setWindowFields/first.js
+++ b/jstests/aggregation/sources/setWindowFields/first.js
@@ -1,10 +1,10 @@
/**
* Test the behavior of $first.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
+import {
+ seedWithTickerData,
+ testAccumAgainstGroup
+} from "jstests/aggregation/extras/window_function_helpers.js";
const coll = db[jsTestName()];
coll.drop();
@@ -90,5 +90,4 @@ result = coll.runCommand({
]
}
});
-assert.commandFailedWithCode(result, ErrorCodes.FailedToParse, "'window' field must be an object");
-})();
+assert.commandFailedWithCode(result, ErrorCodes.FailedToParse, "'window' field must be an object");
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/integral.js b/jstests/aggregation/sources/setWindowFields/integral.js
index bb0e962c504e4..42aa154486e6d 100644
--- a/jstests/aggregation/sources/setWindowFields/integral.js
+++ b/jstests/aggregation/sources/setWindowFields/integral.js
@@ -1,10 +1,7 @@
/**
* Test the behavior of $integral.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
+import {documentBounds} from "jstests/aggregation/extras/window_function_helpers.js";
const coll = db.setWindowFields_integral;
@@ -270,5 +267,4 @@ assert.sameMembers(runRangeBasedIntegral([-6, 6]), [
{time: ISODate("2020-01-01T00:00:10.000Z"), y: 5.6, integral: 24.0},
// Empty window.
{time: ISODate("2020-01-01T00:00:18.000Z"), y: 6.8, integral: 0.0},
-]);
-})();
+]);
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/last.js b/jstests/aggregation/sources/setWindowFields/last.js
index 6cd69979add76..f5fc3e74db3b7 100644
--- a/jstests/aggregation/sources/setWindowFields/last.js
+++ b/jstests/aggregation/sources/setWindowFields/last.js
@@ -1,10 +1,10 @@
/**
* Test the behavior of $last.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
+import {
+ seedWithTickerData,
+ testAccumAgainstGroup
+} from "jstests/aggregation/extras/window_function_helpers.js";
const coll = db[jsTestName()];
coll.drop();
@@ -90,5 +90,4 @@ result = coll.runCommand({
]
}
});
-assert.commandFailedWithCode(result, ErrorCodes.FailedToParse, "'window' field must be an object");
-})();
+assert.commandFailedWithCode(result, ErrorCodes.FailedToParse, "'window' field must be an object");
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/linear_fill.js b/jstests/aggregation/sources/setWindowFields/linear_fill.js
index 2f742db70f215..ac0a9c7507606 100644
--- a/jstests/aggregation/sources/setWindowFields/linear_fill.js
+++ b/jstests/aggregation/sources/setWindowFields/linear_fill.js
@@ -4,12 +4,7 @@
* requires_fcv_52,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-load("jstests/libs/feature_flag_util.js"); // For isEnabled.
const coll = db.linear_fill;
coll.drop();
@@ -516,5 +511,4 @@ assert.commandFailedWithCode(db.runCommand({
],
cursor: {}
}),
- ErrorCodes.TypeMismatch);
-})();
+ ErrorCodes.TypeMismatch);
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/locf.js b/jstests/aggregation/sources/setWindowFields/locf.js
index dcc8ddb6d4cd5..1995803f100b2 100644
--- a/jstests/aggregation/sources/setWindowFields/locf.js
+++ b/jstests/aggregation/sources/setWindowFields/locf.js
@@ -5,12 +5,7 @@
* requires_fcv_52,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-load("jstests/libs/feature_flag_util.js"); // For isEnabled.
const coll = db[jsTestName()];
coll.drop();
@@ -152,5 +147,4 @@ result = coll.aggregate([{
}])
.toArray();
-assertArrayEq({actual: result, expected: expected});
-})();
+assertArrayEq({actual: result, expected: expected});
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/min_max.js b/jstests/aggregation/sources/setWindowFields/min_max.js
index d15d6e119913f..457bd3612d412 100644
--- a/jstests/aggregation/sources/setWindowFields/min_max.js
+++ b/jstests/aggregation/sources/setWindowFields/min_max.js
@@ -1,10 +1,10 @@
/**
* Test that $min/max works as a window function.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
+import {
+ seedWithTickerData,
+ testAccumAgainstGroup
+} from "jstests/aggregation/extras/window_function_helpers.js";
const coll = db[jsTestName()];
coll.drop();
@@ -38,5 +38,4 @@ let results =
for (let index = 0; index < results.length; index++) {
assert.eq("hiya", results[index].minStr);
assert.eq("hiya", results[index].maxStr);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/n_accumulators.js b/jstests/aggregation/sources/setWindowFields/n_accumulators.js
index 4fb2c1242be69..1fa1244ab3db5 100644
--- a/jstests/aggregation/sources/setWindowFields/n_accumulators.js
+++ b/jstests/aggregation/sources/setWindowFields/n_accumulators.js
@@ -1,10 +1,10 @@
/**
* Test that the 'n' family of accumulators work as window functions.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
+import {
+ seedWithTickerData,
+ testAccumAgainstGroup
+} from "jstests/aggregation/extras/window_function_helpers.js";
const coll = db[jsTestName()];
coll.drop();
@@ -138,5 +138,4 @@ for (const acc of Object.keys(nAccumulators)) {
// Missing sortBy.
testError({[acc]: {output}, window: {documents: [-1, 1]}}, 5788005);
}
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/optimize.js b/jstests/aggregation/sources/setWindowFields/optimize.js
index 0b250c2980fe6..b08138b94e7c9 100644
--- a/jstests/aggregation/sources/setWindowFields/optimize.js
+++ b/jstests/aggregation/sources/setWindowFields/optimize.js
@@ -7,10 +7,7 @@
* requires_pipeline_optimization,
* ]
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js');
+import {aggPlanHasStage, getAggPlanStages} from "jstests/libs/analyze_plan.js";
// Find how many stages of the plan are 'stageName'.
function numberOfStages(explain, stageName) {
@@ -224,5 +221,4 @@ const explain13 = coll.explain().aggregate([
},
{$sort: {a: {$meta: "textScore"}}},
]);
-assert.eq(2, numberOfStages(explain13, '$sort'), explain13);
-})();
+assert.eq(2, numberOfStages(explain13, '$sort'), explain13);
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/partition.js b/jstests/aggregation/sources/setWindowFields/partition.js
index db0b5bd39dc25..dd5e08c168ae9 100644
--- a/jstests/aggregation/sources/setWindowFields/partition.js
+++ b/jstests/aggregation/sources/setWindowFields/partition.js
@@ -88,4 +88,4 @@ assert(resultsEq(res.toArray(), [
{int_field: 0, count: 1},
{other_field: 0, count: 1}
]));
-})();
\ No newline at end of file
+})();
diff --git a/jstests/aggregation/sources/setWindowFields/percentile.js b/jstests/aggregation/sources/setWindowFields/percentile.js
index 2121d9c7e5c47..7447f72c904c6 100644
--- a/jstests/aggregation/sources/setWindowFields/percentile.js
+++ b/jstests/aggregation/sources/setWindowFields/percentile.js
@@ -2,13 +2,12 @@
* Test that $percentile and $median work as window functions.
* @tags: [
* requires_fcv_70,
- * featureFlagApproxPercentiles
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
+import {
+ seedWithTickerData,
+ testAccumAgainstGroup
+} from "jstests/aggregation/extras/window_function_helpers.js";
const coll = db[jsTestName()];
coll.drop();
@@ -30,20 +29,22 @@ testAccumAgainstGroup(
coll, "$percentile", [null, null], {p: [0.1, 0.6], input: "$price", method: "approximate"});
testAccumAgainstGroup(coll, "$median", null, {input: "$price", method: "approximate"});
-function runSetWindowStage(percentileSpec, medianSpec) {
+function runSetWindowStage(percentileSpec, medianSpec, letSpec) {
return coll
- .aggregate([
- {$addFields: {str: "hiya"}},
- {
- $setWindowFields: {
- sortBy: {_id: 1},
- output: {
- runningPercentile: percentileSpec,
- runningMedian: medianSpec,
+ .aggregate(
+ [
+ {$addFields: {str: "hiya"}},
+ {
+ $setWindowFields: {
+ sortBy: {_id: 1},
+ output: {
+ runningPercentile: percentileSpec,
+ runningMedian: medianSpec,
+ }
}
}
- }
- ])
+ ],
+ {let : letSpec})
.toArray();
}
@@ -69,6 +70,27 @@ results =
assertResultEqToVal(
{resultArray: results, percentile: [minDoc.price, maxDoc.price], median: medianDoc.price});
+// Test that an expression can be used for 'input'.
+results = runSetWindowStage(
+ {$percentile: {p: [0.01, 0.99], input: {$add: [42, "$price"]}, method: "approximate"}},
+ {$median: {input: {$add: [42, "$price"]}, method: "approximate"}});
+// Since our percentiles are 0.01 and 0.99 and our collection is small, we will always return the
+// minimum and maximum value in the collection.
+assertResultEqToVal({
+ resultArray: results,
+ percentile: [42 + minDoc.price, 42 + maxDoc.price],
+ median: 42 + medianDoc.price
+});
+
+// Test that a variable can be used for 'p'.
+results = runSetWindowStage({$percentile: {p: "$$ps", input: "$price", method: "approximate"}},
+ {$median: {input: "$price", method: "approximate"}},
+ {ps: [0.01, 0.99]});
+// Since our percentiles are 0.01 and 0.99 and our collection is small, we will always return the
+// minimum and maximum value in the collection.
+assertResultEqToVal(
+ {resultArray: results, percentile: [minDoc.price, maxDoc.price], median: medianDoc.price});
+
// Test that a removable window calculates $percentile and $median correctly using an approximate
// method.
results = runSetWindowStage(
@@ -85,14 +107,15 @@ for (let index = 0; index < results.length; index++) {
assert.eq(minVal, results[index].runningMedian, results[index]);
}
-function testError(percentileSpec, expectedCode) {
+function testError(percentileSpec, expectedCode, letSpec) {
assert.throwsWithCode(() => coll.aggregate([{
- $setWindowFields: {
- partitionBy: "$ticket",
- sortBy: {ts: 1},
- output: {outputField: percentileSpec},
- }
- }]),
+ $setWindowFields: {
+ partitionBy: "$ticket",
+ sortBy: {ts: 1},
+ output: {outputField: percentileSpec},
+ }
+ }],
+ {let : letSpec}),
expectedCode);
}
@@ -118,9 +141,20 @@ testError({$median: "not an object"}, 7436100);
testError({$percentile: {p: [0.1, 0.6], input: "$str", method: false}}, ErrorCodes.TypeMismatch);
testError({$median: {input: "$str", method: false}}, ErrorCodes.TypeMismatch);
+testError({$percentile: {p: [0.1, 0.6], input: "$str", method: "discrete"}}, ErrorCodes.BadValue);
+testError({$median: {input: "$str", method: "discrete"}}, ErrorCodes.BadValue);
+testError({$percentile: {p: [0.1, 0.6], input: "$str", method: "continuous"}}, ErrorCodes.BadValue);
+testError({$median: {input: "$str", method: "continuous"}}, ErrorCodes.BadValue);
+
+// invalid expressions or variables for 'p'
+testError({$percentile: {p: "$$ps", input: "$price", method: "continuous"}},
+ ErrorCodes.BadValue /* non-numeric 'p' value in the variable */,
+ {ps: "foo"} /* letSpec */);
+
+testError({$percentile: {p: ["$price"], input: "$str", method: "continuous"}},
+ ErrorCodes.BadValue /* non-const 'p' expression */);
testError({$percentile: {input: "$str", method: "approximate"}},
40414 /* IDL required field error */);
testError({$median: {p: [0.1, 0.6], input: "$str", method: "approximate"}},
- 40415 /* IDL unknown field error */);
-})();
+ 40415 /* IDL unknown field error */);
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/push.js b/jstests/aggregation/sources/setWindowFields/push.js
index d512def432906..ce579fd7ecf59 100644
--- a/jstests/aggregation/sources/setWindowFields/push.js
+++ b/jstests/aggregation/sources/setWindowFields/push.js
@@ -1,10 +1,10 @@
/**
* Test that $push works as a window function.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
+import {
+ seedWithTickerData,
+ testAccumAgainstGroup
+} from "jstests/aggregation/extras/window_function_helpers.js";
const coll = db[jsTestName()];
coll.drop();
@@ -14,5 +14,4 @@ const nDocsPerTicker = 10;
seedWithTickerData(coll, nDocsPerTicker);
// Run the suite of partition and bounds tests against the $push function.
-testAccumAgainstGroup(coll, "$push", []);
-})();
+testAccumAgainstGroup(coll, "$push", []);
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/range.js b/jstests/aggregation/sources/setWindowFields/range.js
index d31386761de98..350eab0d455ea 100644
--- a/jstests/aggregation/sources/setWindowFields/range.js
+++ b/jstests/aggregation/sources/setWindowFields/range.js
@@ -1,11 +1,6 @@
/**
* Test range-based window bounds.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
-
const coll = db.setWindowFields_range;
coll.drop();
@@ -275,5 +270,4 @@ const pipeline = [{
output: {min: {$min: "$temp", window: {range: [-1, 0], unit: "hour"}}}
}
}];
-assert.commandWorked(db.runCommand({aggregate: coll.getName(), pipeline: pipeline, cursor: {}}));
-})();
+assert.commandWorked(db.runCommand({aggregate: coll.getName(), pipeline: pipeline, cursor: {}}));
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/rank.js b/jstests/aggregation/sources/setWindowFields/rank.js
index 75662829d606f..34fb5b6fd0e95 100644
--- a/jstests/aggregation/sources/setWindowFields/rank.js
+++ b/jstests/aggregation/sources/setWindowFields/rank.js
@@ -110,4 +110,4 @@ verifyResults(result, function(num, baseObj) {
});
result = runRankBasedAccumulator({double: 1}, {$documentNumber: {}});
verifyResults(result, noTieFunc);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/aggregation/sources/setWindowFields/shift.js b/jstests/aggregation/sources/setWindowFields/shift.js
index 6c3f38a822688..27aeaf9a6a140 100644
--- a/jstests/aggregation/sources/setWindowFields/shift.js
+++ b/jstests/aggregation/sources/setWindowFields/shift.js
@@ -334,4 +334,4 @@ assert.commandFailedWithCode(coll.runCommand({
cursor: {}
}),
ErrorCodes.FailedToParse);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/aggregation/sources/setWindowFields/spill_to_disk.js b/jstests/aggregation/sources/setWindowFields/spill_to_disk.js
index 2f687ae6ffc20..b2c214d181bfd 100644
--- a/jstests/aggregation/sources/setWindowFields/spill_to_disk.js
+++ b/jstests/aggregation/sources/setWindowFields/spill_to_disk.js
@@ -5,25 +5,25 @@
* requires_profiling,
* assumes_read_concern_unchanged,
* do_not_wrap_aggregations_in_facets,
- * featureFlagApproxPercentiles
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
load("jstests/libs/discover_topology.js"); // For findNonConfigNodes.
-load("jstests/aggregation/extras/window_function_helpers.js");
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
+import {
+ seedWithTickerData,
+ testAccumAgainstGroup
+} from "jstests/aggregation/extras/window_function_helpers.js";
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
load("jstests/aggregation/extras/utils.js"); // arrayEq.
load("jstests/libs/profiler.js"); // getLatestProfileEntry.
-const origParamValue = assert.commandWorked(db.adminCommand({
- getParameter: 1,
- internalDocumentSourceSetWindowFieldsMaxMemoryBytes: 1
-}))["internalDocumentSourceSetWindowFieldsMaxMemoryBytes"];
+// Doc size was found through logging the size in the SpillableCache. Partition sizes were chosen
+// arbitrarily.
+const avgDocSize = 171;
+const smallPartitionSize = 6;
+const largePartitionSize = 21;
const coll = db[jsTestName()];
-coll.drop();
+const admin = db.getSiblingDB("admin");
function checkProfilerForDiskWrite(dbToCheck, expectedFirstStage) {
if (!FixtureHelpers.isMongos(dbToCheck)) {
@@ -46,123 +46,145 @@ function resetProfiler(db) {
FixtureHelpers.runCommandOnEachPrimary({db: db, cmdObj: {profile: 2}});
}
-// Doc size was found through logging the size in the SpillableCache. Partition sizes were chosen
-// arbitrarily.
-let avgDocSize = 171;
-let smallPartitionSize = 6;
-let largePartitionSize = 21;
-// The number 600 was chosen by observing how much memory is required for the accumulators to run
-// on all windows (~1600 bytes).
-setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
- "internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
- avgDocSize * smallPartitionSize + 600);
-
-seedWithTickerData(coll, 10);
-
-// Run $sum test with memory limits that cause spilling to disk.
-testAccumAgainstGroup(coll, "$sum", 0);
-
-// Run a $percentile test that fails since we go over the memory limit allowed and can't spill.
-let errorPipeline = [
- {
- $setWindowFields: {
- partitionBy: "$partition",
- sortBy: {partition: 1},
- output: {
- p: {
- $percentile: {p: [0.9], input: "$price", method: "approximate"},
- window: {documents: [0, "unbounded"]}
- }
- }
+function changeSpillLimit({mode, maxDocs}) {
+ FixtureHelpers.runCommandOnEachPrimary({
+ db: admin,
+ cmdObj: {
+ configureFailPoint: 'overrideMemoryLimitForSpill',
+ mode: mode,
+ 'data': {maxDocsBeforeSpill: maxDocs}
}
- },
- {$sort: {_id: 1}}
-];
-assert.commandFailedWithCode(
- db.runCommand(
- {aggregate: coll.getName(), pipeline: errorPipeline, allowDiskUse: false, cursor: {}}),
- 5643011);
-
-// Run $percentile test with memory limits that cause spilling to disk and assert it succeeds.
-// In the test suite below, we will run a query identical to the one that failed above.
-resetProfiler(db);
-testAccumAgainstGroup(
- coll, "$percentile", [null], {p: [0.9], input: "$price", method: "approximate"});
-// Confirm that spilling did occur.
-checkProfilerForDiskWrite(db, "$setWindowFields");
-
-// Run $median test with memory limits that cause spilling to disk.
-resetProfiler(db);
-testAccumAgainstGroup(coll, "$median", null, {input: "$price", method: "approximate"});
-// Confirm that spilling did occur.
-checkProfilerForDiskWrite(db, "$setWindowFields");
-
-// Test that a query that spills to disk succeeds across getMore requests.
-// The next test uses less memory. Reduce memory limit to ensure spilling occurs. The number 70 was
-// chosen by observing how much memory is required for the test to run.
-setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
- "internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
- avgDocSize * smallPartitionSize + 70);
-resetProfiler(db);
-const wfResults =
- coll.aggregate(
- [
- {
- $setWindowFields: {
- sortBy: {_id: 1},
- output: {res: {$sum: "$price", window: {documents: ["unbounded", 5]}}}
- },
- },
- ],
- {allowDiskUse: true, cursor: {batchSize: 1}})
- .toArray();
-assert.eq(wfResults.length, 20);
-checkProfilerForDiskWrite(db, "$setWindowFields");
-
-// Test a small, in memory, partition and a larger partition that requires spilling to disk.
-coll.drop();
-// Create small partition.
-for (let i = 0; i < smallPartitionSize; i++) {
- assert.commandWorked(coll.insert({_id: i, val: i, partition: 1}));
+ });
}
-// Create large partition.
-for (let i = 0; i < largePartitionSize; i++) {
- assert.commandWorked(coll.insert({_id: i + smallPartitionSize, val: i, partition: 2}));
+
+function testSingleAccumulator(accumulator, nullValue, spec) {
+ resetProfiler(db);
+ testAccumAgainstGroup(coll, accumulator, nullValue, spec);
+ checkProfilerForDiskWrite(db, "$setWindowFields");
}
-// Run an aggregation that will keep all documents in the cache for all documents.
-resetProfiler(db);
-let results =
- coll.aggregate(
- [
- {
- $setWindowFields: {
- partitionBy: "$partition",
- sortBy: {partition: 1},
- output: {
- sum: {
- $sum: "$val",
- window: {documents: [-largePartitionSize, largePartitionSize]}
+// Assert that spilling to disk doesn't affect the correctness of different accumulators.
+function testSpillWithDifferentAccumulators() {
+ coll.drop();
+ seedWithTickerData(coll, 10);
+
+ // Spill to disk after 5 documents.
+ changeSpillLimit({mode: 'alwaysOn', maxDocs: 5});
+
+ testSingleAccumulator("$sum", 0, "$price");
+ testSingleAccumulator(
+ "$percentile", [null], {p: [0.9], input: "$price", method: "approximate"});
+ testSingleAccumulator("$median", null, {input: "$price", method: "approximate"});
+
+ // Assert that spilling works across 'getMore' commands
+ resetProfiler(db);
+ const wfResults =
+ coll.aggregate(
+ [
+ {
+ $setWindowFields: {
+ sortBy: {_id: 1},
+ output: {res: {$sum: "$price", window: {documents: ["unbounded", 5]}}}
+ },
+ },
+ ],
+ {allowDiskUse: true, cursor: {batchSize: 1}})
+ .toArray();
+ assert.eq(wfResults.length, 20);
+ checkProfilerForDiskWrite(db, "$setWindowFields");
+
+ // Turn off the failpoint for future tests.
+ changeSpillLimit({mode: 'off', maxDocs: null});
+}
+
+// Assert a small, in memory, partition and a larger partition that requires spilling to disk
+// returns correct results.
+function testSpillWithDifferentPartitions() {
+ // Spill to disk after 5 documents. This number should be less than 'smallPartitionSize'.
+ changeSpillLimit({mode: 'alwaysOn', maxDocs: 5});
+
+ coll.drop();
+ // Create small partition.
+ for (let i = 0; i < smallPartitionSize; i++) {
+ assert.commandWorked(coll.insert({_id: i, val: i, partition: 1}));
+ }
+ // Create large partition.
+ for (let i = 0; i < largePartitionSize; i++) {
+ assert.commandWorked(coll.insert({_id: i + smallPartitionSize, val: i, partition: 2}));
+ }
+ // Run an aggregation that will keep all documents in the cache for all documents.
+ resetProfiler(db);
+ let results =
+ coll.aggregate(
+ [
+ {
+ $setWindowFields: {
+ partitionBy: "$partition",
+ sortBy: {partition: 1},
+ output: {
+ sum: {
+ $sum: "$val",
+ window: {documents: [-largePartitionSize, largePartitionSize]}
+ }
}
}
- }
- },
- {$sort: {_id: 1}}
- ],
- {allowDiskUse: true})
- .toArray();
-for (let i = 0; i < results.length; i++) {
- if (results[i].partition === 1) {
- assert.eq(results[i].sum, 15, "Unexepected result in first partition at position " + i);
- } else {
- assert.eq(results[i].sum, 210, "Unexepcted result in second partition at position " + i);
+ },
+ {$sort: {_id: 1}}
+ ],
+ {allowDiskUse: true})
+ .toArray();
+ for (let i = 0; i < results.length; i++) {
+ if (results[i].partition === 1) {
+ assert.eq(results[i].sum, 15, "Unexpected result in first partition at position " + i);
+ } else {
+ assert.eq(
+ results[i].sum, 210, "Unexpected result in second partition at position " + i);
+ }
+ }
+ checkProfilerForDiskWrite(db, "$setWindowFields");
+
+ // Run an aggregation that will store too many documents in the function and force a spill.
+ // Spill to disk after 10 documents.
+ changeSpillLimit({mode: 'alwaysOn', maxDocs: 10});
+ resetProfiler(db);
+ results = coll.aggregate(
+ [
+ {
+ $setWindowFields: {
+ partitionBy: "$partition",
+ sortBy: {partition: 1},
+ output: {arr: {$push: "$val", window: {documents: [-25, 25]}}}
+ }
+ },
+ {$sort: {_id: 1}}
+ ],
+ {allowDiskUse: true})
+ .toArray();
+ checkProfilerForDiskWrite(db, "$setWindowFields");
+ for (let i = 0; i < results.length; i++) {
+ if (results[i].partition === 1) {
+ assert(arrayEq(results[i].arr, [0, 1, 2, 3, 4, 5]),
+ "Unexpected result in first partition at position " + i);
+ } else {
+ assert(
+ arrayEq(results[i].arr,
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]),
+ "Unexpected result in second partition at position " + i);
+ }
}
+
+ // Turn off the failpoint for future tests.
+ changeSpillLimit({mode: 'off', maxDocs: null});
}
-checkProfilerForDiskWrite(db, "$setWindowFields");
-// We don't execute setWindowFields in a sharded explain.
-if (!FixtureHelpers.isMongos(db)) {
- // Test that an explain that executes the query reports usedDisk correctly.
+// Assert that 'usedDisk' is correctly set in an explain query.
+function testUsedDiskAppearsInExplain() {
+ // Don't drop the collection, since the set up in spillWithDifferentPartitions() is valid.
+
+ // Spill after 10 documents. This number should be bigger than the window size.
+ changeSpillLimit({mode: 'alwaysOn', maxDocs: 10});
+
+ // Run an explain query where 'usedDisk' should be true.
let explainPipeline = [
{
$setWindowFields: {
@@ -178,126 +200,65 @@ if (!FixtureHelpers.isMongos(db)) {
coll.explain("allPlansExecution").aggregate(explainPipeline, {allowDiskUse: true}),
"$_internalSetWindowFields");
assert(stages[0]["usedDisk"], stages);
- setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
- "internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
- avgDocSize * largePartitionSize * 2);
- explainPipeline = [
- {
- $setWindowFields: {
- partitionBy: "$partition",
- sortBy: {partition: 1},
- output: {arr: {$sum: "$val", window: {documents: [0, 0]}}}
- }
- },
- {$sort: {_id: 1}}
- ];
+ // Run an explain query with the default memory limit, so 'usedDisk' should be false.
+ changeSpillLimit({mode: 'off', maxDocs: null});
stages = getAggPlanStages(
coll.explain("allPlansExecution").aggregate(explainPipeline, {allowDiskUse: true}),
"$_internalSetWindowFields");
assert(!stages[0]["usedDisk"], stages);
}
-// Run an aggregation that will store too many documents in the function and force a spill. Set the
-// memory limit to be over the size of the large partition.
-setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
- "internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
- largePartitionSize * avgDocSize + 1);
-resetProfiler(db);
-results = coll.aggregate(
- [
- {
- $setWindowFields: {
- partitionBy: "$partition",
- sortBy: {partition: 1},
- output: {arr: {$push: "$val", window: {documents: [-25, 25]}}}
- }
- },
- {$sort: {_id: 1}}
- ],
- {allowDiskUse: true})
- .toArray();
-checkProfilerForDiskWrite(db, "$setWindowFields");
-for (let i = 0; i < results.length; i++) {
- if (results[i].partition === 1) {
- assert(arrayEq(results[i].arr, [0, 1, 2, 3, 4, 5]),
- "Unexepected result in first partition at position " + i);
- } else {
- assert(arrayEq(results[i].arr,
- [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]),
- "Unexepcted result in second partition at position " + i);
+// Assert that situations that would require a large spill successfully write to disk.
+function testLargeSpill() {
+ coll.drop();
+
+ let numDocs = 1111;
+ let batchArr = [];
+ for (let docNum = 0; docNum < numDocs; docNum++) {
+ batchArr.push({_id: docNum, val: docNum, partition: 1});
}
-}
+ assert.commandWorked(coll.insert(batchArr));
+ // Spill to disk after 1000 documents.
+ changeSpillLimit({mode: 'alwaysOn', maxDocs: 1000});
-// Check that if function memory limit exceeds we fail even though the partition iterator spilled.
-// $push uses about ~950 to store all the values in the second partition.
-setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
- "internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
- avgDocSize * 2);
+ // Run a document window over the whole collection to keep everything in the cache.
+ resetProfiler(db);
+ const results =
+ coll.aggregate(
+ [
+ {
+ $setWindowFields: {
+ sortBy: {partition: 1},
+ output: {arr: {$sum: "$val", window: {documents: [-numDocs, numDocs]}}}
+ }
+ },
+ {$sort: {_id: 1}}
+ ],
+ {allowDiskUse: true})
+ .toArray();
+ checkProfilerForDiskWrite(db, "$setWindowFields");
+ // Check that the command succeeded.
+ assert.eq(results.length, numDocs);
+ for (let i = 0; i < numDocs; i++) {
+ assert.eq(results[i].arr, 616605, results);
+ }
-function runExceedMemoryLimitTest(spec) {
- assert.commandFailedWithCode(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [
- {$setWindowFields: {partitionBy: "$partition", sortBy: {partition: 1}, output: spec}},
- {$sort: {_id: 1}}
- ],
- allowDiskUse: true,
- cursor: {}
- }),
- 5414201);
+ // Turn off the failpoint for future tests.
+ changeSpillLimit({mode: 'off', maxDocs: null});
}
-runExceedMemoryLimitTest({arr: {$push: "$val", window: {documents: [-21, 21]}}});
-runExceedMemoryLimitTest({
- percentile: {
- $percentile: {p: [0.6, 0.7], input: "$price", method: "approximate"},
- window: {documents: [-21, 21]}
+// Assert that usedDisk true is set to true if spilling occurs inside $lookup subpipline.
+function testUsedDiskInLookupPipeline() {
+ coll.drop();
+ for (let i = 0; i < largePartitionSize; i++) {
+ assert.commandWorked(coll.insert({_id: i, val: i}));
}
-});
-
-coll.drop();
-// Test that situations that would require a large spill successfully write to disk.
-// Set the limit to spill after ~1000 documents since that is the batch size when we write to disk.
-setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
- "internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
- 1000 * avgDocSize);
-let numDocs = 1111;
-let batchArr = [];
-for (let docNum = 0; docNum < numDocs; docNum++) {
- batchArr.push({_id: docNum, val: docNum, partition: 1});
-}
-assert.commandWorked(coll.insert(batchArr));
-// Run a document window over the whole collection to keep everything in the cache.
-resetProfiler(db);
-results =
- coll.aggregate(
- [
- {
- $setWindowFields: {
- // partitionBy: "$partition",
- sortBy: {partition: 1},
- output: {arr: {$sum: "$val", window: {documents: [-numDocs, numDocs]}}}
- }
- },
- {$sort: {_id: 1}}
- ],
- {allowDiskUse: true})
- .toArray();
-checkProfilerForDiskWrite(db, "$setWindowFields");
-// Check that the command succeeded.
-assert.eq(results.length, numDocs);
-for (let i = 0; i < numDocs; i++) {
- assert.eq(results[i].arr, 616605, results);
-}
+ // Spill to disk after 5 documents.
+ changeSpillLimit({mode: 'alwaysOn', maxDocs: 5});
-// Test that usedDisk true is set when spilling occurs inside $lookup subpipline.
-// Lower the memory limit to ensure spilling occurs.
-setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
- "internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
- 500);
-resetProfiler(db);
-coll.aggregate(
+ resetProfiler(db);
+ coll.aggregate(
[
{
$lookup: {
@@ -313,12 +274,70 @@ coll.aggregate(
}],
{ allowDiskUse: true, cursor: {} })
.toArray();
-checkProfilerForDiskWrite(db, "$lookup");
+ checkProfilerForDiskWrite(db, "$lookup");
+
+ // Turn off the failpoint for future tests.
+ changeSpillLimit({mode: 'off', maxDocs: null});
+}
+
+function runSingleErrorTest({spec, errorCode, diskUse}) {
+ assert.commandFailedWithCode(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [
+ {$setWindowFields: {partitionBy: "$partition", sortBy: {partition: 1}, output: spec}},
+ {$sort: {_id: 1}}
+ ],
+ allowDiskUse: diskUse,
+ cursor: {}
+ }),
+ errorCode);
+}
+
+// Assert that an error is raised when the pipeline exceeds the memory limit or disk use is not
+// allowed.
+function testErrorsWhenCantSpill() {
+ // Don't drop the collection, since the set up in testUsedDiskInLookupPipeline() is valid.
+
+ const origParamValue = assert.commandWorked(db.adminCommand({
+ getParameter: 1,
+ internalDocumentSourceSetWindowFieldsMaxMemoryBytes: 1
+ }))["internalDocumentSourceSetWindowFieldsMaxMemoryBytes"];
+ // Decrease the maximum memory limit allowed. $push uses about ~950 to store all the values in
+ // the second partition.
+ setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
+ "internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
+ avgDocSize * 2);
+
+ // Assert the pipeline errors when exceeding maximum memory, even though the data spilled.
+ runSingleErrorTest({
+ spec: {arr: {$push: "$val", window: {documents: [-21, 21]}}},
+ errorCode: 5414201,
+ diskUse: true
+ });
+ // Assert the pipeline errors when exceeding the maximum memory, even though the data spilled.
+ let percentileSpec = {
+ $percentile: {p: [0.6, 0.7], input: "$price", method: "approximate"},
+ window: {documents: [-21, 21]}
+ };
+ runSingleErrorTest({spec: {percentile: percentileSpec}, errorCode: 5414201, diskUse: true});
+ // Assert the pipeline fails when trying to spill, but 'allowDiskUse' is set to false.
+ runSingleErrorTest({spec: {percentile: percentileSpec}, errorCode: 5643011, diskUse: false});
+ // Reset the memory limit for other tests.
+ setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
+ "internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
+ origParamValue);
+}
+
+// Run the tests.
+testSpillWithDifferentAccumulators();
+testSpillWithDifferentPartitions();
+// We don't execute setWindowFields in a sharded explain.
+if (!FixtureHelpers.isMongos(db)) {
+ testUsedDiskAppearsInExplain();
+}
+testLargeSpill();
+testUsedDiskInLookupPipeline();
+testErrorsWhenCantSpill();
-// Reset limit for other tests.
-setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()),
- "internalDocumentSourceSetWindowFieldsMaxMemoryBytes",
- origParamValue);
// Reset profiler.
-FixtureHelpers.runCommandOnEachPrimary({db: db, cmdObj: {profile: 0}});
-})();
+FixtureHelpers.runCommandOnEachPrimary({db: db, cmdObj: {profile: 0}});
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/stddev.js b/jstests/aggregation/sources/setWindowFields/stddev.js
index caa76b4926804..7b3612afe4bb3 100644
--- a/jstests/aggregation/sources/setWindowFields/stddev.js
+++ b/jstests/aggregation/sources/setWindowFields/stddev.js
@@ -1,10 +1,10 @@
/**
* Test that standard deviation works as a window function.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
+import {
+ seedWithTickerData,
+ testAccumAgainstGroup
+} from "jstests/aggregation/extras/window_function_helpers.js";
const coll = db[jsTestName()];
coll.drop();
@@ -38,5 +38,4 @@ let results =
for (let index = 0; index < results.length; index++) {
assert.eq(null, results[index].stdDevPop);
assert.eq(null, results[index].stdDevSamp);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/sum.js b/jstests/aggregation/sources/setWindowFields/sum.js
index d8267a84af086..064200fec62ed 100644
--- a/jstests/aggregation/sources/setWindowFields/sum.js
+++ b/jstests/aggregation/sources/setWindowFields/sum.js
@@ -1,10 +1,11 @@
/**
* Test that $sum works as a window function.
*/
-(function() {
-"use strict";
+import {
+ seedWithTickerData,
+ testAccumAgainstGroup
+} from "jstests/aggregation/extras/window_function_helpers.js";
-load("jstests/aggregation/extras/window_function_helpers.js");
load("jstests/aggregation/extras/utils.js"); // documentEq
const coll = db[jsTestName()];
@@ -276,5 +277,4 @@ verifyResults(result, function(num, baseObj) {
baseObj.mixedTypeSum += (i % 2) ? 0 : i;
}
return baseObj;
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/time.js b/jstests/aggregation/sources/setWindowFields/time.js
index e4f7d7cfa6c62..1e07bfebdc794 100644
--- a/jstests/aggregation/sources/setWindowFields/time.js
+++ b/jstests/aggregation/sources/setWindowFields/time.js
@@ -1,11 +1,6 @@
/**
* Test time-based window bounds.
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/window_function_helpers.js");
-
const coll = db.setWindowFields_time;
coll.drop();
@@ -195,5 +190,4 @@ assert.commandWorked(coll.insert([
error = assert.throws(() => {
run([range('unbounded', 'unbounded')]);
});
-assert.commandFailedWithCode(error, 5429513);
-})();
+assert.commandFailedWithCode(error, 5429513);
\ No newline at end of file
diff --git a/jstests/aggregation/sources/setWindowFields/window_functions_on_timeseries_coll.js b/jstests/aggregation/sources/setWindowFields/window_functions_on_timeseries_coll.js
index dcfdb0ec6b410..58ac3db6d5b0d 100644
--- a/jstests/aggregation/sources/setWindowFields/window_functions_on_timeseries_coll.js
+++ b/jstests/aggregation/sources/setWindowFields/window_functions_on_timeseries_coll.js
@@ -11,11 +11,8 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStage().
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
const coll = db.window_functions_on_timeseries_coll;
@@ -327,5 +324,4 @@ assertExplainBehaviorAndCorrectResults(
{_id: 1, rank: 1},
{_id: 5, rank: 2},
{_id: 3, rank: 3},
- ]);
-})();
+ ]);
\ No newline at end of file
diff --git a/jstests/aggregation/sources/sort/explain_sort.js b/jstests/aggregation/sources/sort/explain_sort.js
index 7d76527e1e6ec..f196683b3c187 100644
--- a/jstests/aggregation/sources/sort/explain_sort.js
+++ b/jstests/aggregation/sources/sort/explain_sort.js
@@ -5,10 +5,7 @@
// # Asserts on the number of documents examined in an explain plan.
// assumes_no_implicit_index_creation
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
+import {getAggPlanStages, isQueryPlan} from "jstests/libs/analyze_plan.js";
const coll = db.explain_sort;
coll.drop();
@@ -69,5 +66,4 @@ for (let verbosity of ["queryPlanner", "executionStats", "allPlansExecution"]) {
pipeline = [{$project: {_id: 1}}, {$limit: 5}];
checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity, optimizeDisabled ? 10 : 5);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/aggregation/sources/unionWith/unionWith_explain.js b/jstests/aggregation/sources/unionWith/unionWith_explain.js
index a1db4d975e74c..e1ca53d7d5b42 100644
--- a/jstests/aggregation/sources/unionWith/unionWith_explain.js
+++ b/jstests/aggregation/sources/unionWith/unionWith_explain.js
@@ -6,12 +6,9 @@
* ]
*/
-(function() {
-"use strict";
load("jstests/aggregation/extras/utils.js"); // arrayEq, documentEq
load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStage.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
const testDB = db.getSiblingDB(jsTestName());
const collA = testDB.A;
@@ -114,8 +111,8 @@ function assertExplainEq(union, regular) {
} else {
assert(false,
"Don't know how to compare following explains.\n" +
- "regular: " + tojson(regularExplain) + "\n" +
- "union: " + tojson(unionSubExplain) + "\n");
+ "regular: " + tojson(regular) + "\n" +
+ "union: " + tojson(union) + "\n");
}
}
@@ -259,5 +256,4 @@ if (!res["failpoint.disablePipelineOptimization"].mode) {
.aggregate([{$unionWith: indexedColl.getName()}, {$match: {val: {$gt: 2}}}]);
expectedResult = indexedColl.explain("executionStats").aggregate([{$match: {val: {$gt: 2}}}]);
assertExplainMatch(result, expectedResult);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/aggregation/spill_to_disk.js b/jstests/aggregation/spill_to_disk.js
index 8d37777c75b12..445e097ad7ab7 100644
--- a/jstests/aggregation/spill_to_disk.js
+++ b/jstests/aggregation/spill_to_disk.js
@@ -12,14 +12,11 @@
// requires_pipeline_optimization,
// requires_persistence,
// ]
-(function() {
-'use strict';
-
load("jstests/aggregation/extras/utils.js");
load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers'
load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
-load("jstests/libs/sbe_explain_helpers.js"); // For getSbePlanStages.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getSbePlanStages} from "jstests/libs/sbe_explain_helpers.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const coll = db.spill_to_disk;
coll.drop();
@@ -564,4 +561,3 @@ const oldMemSettings =
setHashLookupParameters(oldMemSettings);
}
})();
-})();
diff --git a/jstests/aggregation/split_match_and_swap_with_sort.js b/jstests/aggregation/split_match_and_swap_with_sort.js
index 53b5bcf97d427..ecc382adda71d 100644
--- a/jstests/aggregation/split_match_and_swap_with_sort.js
+++ b/jstests/aggregation/split_match_and_swap_with_sort.js
@@ -11,10 +11,7 @@
// # Don't disable the thing we are specifically testing for!
// requires_pipeline_optimization,
// ]
-load('jstests/libs/analyze_plan.js');
-
-(function() {
-"use strict";
+import {getAggPlanStage, getPlanStage} from "jstests/libs/analyze_plan.js";
const coll = db.getSiblingDB("split_match_and_swap_with_sort")["test"];
coll.drop();
@@ -66,4 +63,3 @@ assert.commandWorked(
collScanStage.filter,
collScanStage);
}
-}());
\ No newline at end of file
diff --git a/jstests/aggregation/unwind.js b/jstests/aggregation/unwind.js
index ffd2a3da9c68c..7db316de1e1bc 100644
--- a/jstests/aggregation/unwind.js
+++ b/jstests/aggregation/unwind.js
@@ -1,6 +1,6 @@
// SERVER-8088: test $unwind with a scalar
-t = db.agg_unwind;
+let t = db.agg_unwind;
t.drop();
t.insert({_id: 1});
diff --git a/jstests/aggregation/use_query_project_and_sort.js b/jstests/aggregation/use_query_project_and_sort.js
index dbe5b6a9f6f92..f65e6ce61efbe 100644
--- a/jstests/aggregation/use_query_project_and_sort.js
+++ b/jstests/aggregation/use_query_project_and_sort.js
@@ -7,10 +7,7 @@
// @tags: [
// do_not_wrap_aggregations_in_facets,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers.
+import {hasRejectedPlans, isQueryPlan, planHasStage} from "jstests/libs/analyze_plan.js";
const coll = db.use_query_project_and_sort;
coll.drop();
@@ -58,5 +55,4 @@ assertQueryCoversProjectionAndSort(
assertQueryCoversProjectionAndSort(
[{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 0, a: 1, x: 1}}]);
assertQueryCoversProjectionAndSort(
- [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 1, x: 1, a: 1}}]);
-}());
+ [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 1, x: 1, a: 1}}]);
\ No newline at end of file
diff --git a/jstests/aggregation/use_query_projection.js b/jstests/aggregation/use_query_projection.js
index 1f92b30d7b346..15cc420c85bc5 100644
--- a/jstests/aggregation/use_query_projection.js
+++ b/jstests/aggregation/use_query_projection.js
@@ -7,10 +7,13 @@
// @tags: [
// do_not_wrap_aggregations_in_facets,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers.
+import {
+ aggPlanHasStage,
+ hasRejectedPlans,
+ isAggregationPlan,
+ isQueryPlan,
+ planHasStage,
+} from "jstests/libs/analyze_plan.js";
const coll = db.use_query_projection;
coll.drop();
@@ -91,5 +94,4 @@ assert.commandWorked(coll.insert({x: ["an", "array!"]}));
assertQueryDoesNotCoverProjection(
{pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1}}]});
assertQueryDoesNotCoverProjection(
- {pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1, a: 1}}]});
-}());
+ {pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1, a: 1}}]});
\ No newline at end of file
diff --git a/jstests/aggregation/use_query_sort.js b/jstests/aggregation/use_query_sort.js
index ca8c6f3bd7703..4c601f49f3573 100644
--- a/jstests/aggregation/use_query_sort.js
+++ b/jstests/aggregation/use_query_sort.js
@@ -6,10 +6,13 @@
// @tags: [
// do_not_wrap_aggregations_in_facets,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers.
+import {
+ aggPlanHasStage,
+ hasRejectedPlans,
+ isAggregationPlan,
+ isQueryPlan,
+ planHasStage,
+} from "jstests/libs/analyze_plan.js";
const coll = db.use_query_sort;
coll.drop();
@@ -86,5 +89,4 @@ assertHasBlockingQuerySort(
// Verify that meta-sort on "randVal" can be pushed into the query layer. Although "randVal" $meta
// sort is currently a supported way to randomize the order of the data, it shouldn't preclude
// pushdown of the sort into the plan stage layer.
-assertHasBlockingQuerySort([{$sort: {key: {$meta: "randVal"}}}], false);
-}());
+assertHasBlockingQuerySort([{$sort: {key: {$meta: "randVal"}}}], false);
\ No newline at end of file
diff --git a/jstests/auth/auth1.js b/jstests/auth/auth1.js
index 65e12f0442561..e784fcd1e0bab 100644
--- a/jstests/auth/auth1.js
+++ b/jstests/auth/auth1.js
@@ -5,6 +5,7 @@
// Multiple users cannot be authenticated on one connection within a session.
TestData.disableImplicitSessions = true;
+let baseName;
function setupTest() {
print("START auth1.js");
baseName = "jstests_auth_auth1";
@@ -16,14 +17,14 @@ function setupTest() {
function runTest(m) {
// these are used by read-only user
db = m.getDB("test");
- mro = new Mongo(m.host);
- dbRO = mro.getDB("test");
- tRO = dbRO[baseName];
+ let mro = new Mongo(m.host);
+ let dbRO = mro.getDB("test");
+ let tRO = dbRO[baseName];
db.getSiblingDB("admin").createUser({user: "root", pwd: "root", roles: ["root"]});
db.getSiblingDB("admin").auth("root", "root");
- t = db[baseName];
+ let t = db[baseName];
t.drop();
db.dropAllUsers();
@@ -51,7 +52,7 @@ function runTest(m) {
assert(!db.auth("eliot", "eliot"), "auth succeeded with wrong password");
assert(db.auth("eliot", "eliot2"), "auth failed");
- for (i = 0; i < 1000; ++i) {
+ for (let i = 0; i < 1000; ++i) {
t.save({i: i});
}
assert.eq(1000, t.count(), "A1");
diff --git a/jstests/auth/auth2.js b/jstests/auth/auth2.js
index baf57753cef0d..077acf07c0f2c 100644
--- a/jstests/auth/auth2.js
+++ b/jstests/auth/auth2.js
@@ -1,6 +1,6 @@
// test read/write permissions
-m = MongoRunner.runMongod({auth: "", bind_ip: "127.0.0.1"});
+let m = MongoRunner.runMongod({auth: "", bind_ip: "127.0.0.1"});
db = m.getDB("admin");
// These statements throw because the localhost exception does not allow
@@ -26,7 +26,7 @@ assert.throws(function() {
db.auth("eliot", "eliot");
-users = db.getCollection("system.users");
+let users = db.getCollection("system.users");
assert.eq(1, users.count());
db.shutdownServer();
diff --git a/jstests/auth/authn_session_abandoned.js b/jstests/auth/authn_session_abandoned.js
new file mode 100644
index 0000000000000..d5298ecbe02eb
--- /dev/null
+++ b/jstests/auth/authn_session_abandoned.js
@@ -0,0 +1,31 @@
+// Test for auth counters in serverStatus.
+
+(function() {
+'use strict';
+load('jstests/libs/parallel_shell_helpers.js');
+
+const kFailedToAuthMsgId = 5286307;
+
+const mongod = MongoRunner.runMongod();
+
+try {
+ mongod.getDB("admin").createUser(
+ {"user": "admin", "pwd": "pwd", roles: ['root'], mechanisms: ["SCRAM-SHA-256"]});
+
+ const shellCmd = () => {
+ // base64 encoded: 'n,,n=admin,r=deadbeefcafeba11';
+ const kClientPayload = 'biwsbj1hZG1pbixyPWRlYWRiZWVmY2FmZWJhMTE=';
+
+ db.getSiblingDB("admin").runCommand(
+ {saslStart: 1, mechanism: "SCRAM-SHA-256", payload: kClientPayload});
+ };
+
+ startParallelShell(shellCmd, mongod.port)();
+
+ assert.soon(() => checkLog.checkContainsOnceJson(
+ mongod, kFailedToAuthMsgId, {"result": ErrorCodes.AuthenticationAbandoned}));
+
+} finally {
+ MongoRunner.stopMongod(mongod);
+}
+})();
diff --git a/jstests/auth/bulk_write_mongod.js b/jstests/auth/bulk_write_mongod.js
index 987a48ff47b3d..5a072a5bda51f 100644
--- a/jstests/auth/bulk_write_mongod.js
+++ b/jstests/auth/bulk_write_mongod.js
@@ -1,12 +1,8 @@
/*
* Auth test for the bulkWrite command on mongods.
*/
-(function() {
-'use strict';
-
-load("jstests/auth/lib/bulk_write_base.js");
+import {runTest} from "jstests/auth/lib/bulk_write_base.js";
const mongod = MongoRunner.runMongod({auth: ""});
runTest(mongod);
MongoRunner.stopMongod(mongod);
-})();
diff --git a/jstests/auth/change_stream_change_collection_role_auth.js b/jstests/auth/change_stream_change_collection_role_auth.js
index 4d174543d66e0..4ed7464cdc898 100644
--- a/jstests/auth/change_stream_change_collection_role_auth.js
+++ b/jstests/auth/change_stream_change_collection_role_auth.js
@@ -6,7 +6,8 @@
* assumes_read_preference_unchanged,
* requires_replication,
* requires_fcv_62,
- * __TEMPORARILY_DISABLED__
+ * # TODO SERVER-74811: Re-enable this test.
+ * __TEMPORARILY_DISABLED__,
* ]
*/
(function() {
diff --git a/jstests/auth/change_stream_pre_image_coll_role_auth.js b/jstests/auth/change_stream_pre_image_coll_role_auth.js
index fcbeec157b624..a32eb716614fe 100644
--- a/jstests/auth/change_stream_pre_image_coll_role_auth.js
+++ b/jstests/auth/change_stream_pre_image_coll_role_auth.js
@@ -199,4 +199,4 @@ assertActionAuthorized(removePreImage.bind(null, rootPrimary), true);
rootPrimary.logout();
replSetTest.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/auth/check_metadata_consistency.js b/jstests/auth/check_metadata_consistency.js
deleted file mode 100644
index 84dd5f89e88f2..0000000000000
--- a/jstests/auth/check_metadata_consistency.js
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Tests to validate the privileges of checkMetadataConsistency command.
- *
- * @tags: [
- * featureFlagCheckMetadataConsistency,
- * requires_fcv_70,
- * # TODO SERVER-74445: Remove tag once the command will be compatible with catalog shard
- * temporary_catalog_shard_incompatible,
- * ]
- */
-
-(function() {
-"use strict";
-
-const kClusterLevel = "clusterLevel";
-const kDatabaseLevel = "databaseLevel";
-const kCollectionLevel = "collectionLevel";
-
-// Helper function to assert that the checkMetadataConsistency command succeeds
-function assertAuthCommandWorked(adminDb, conn, user, level) {
- assert(adminDb.logout());
- assert(adminDb.auth(user, "pwd"));
- const cmd = () => {
- if (level === kClusterLevel || level === kDatabaseLevel) {
- return conn.checkMetadataConsistency().toArray();
- } else {
- return conn.coll.checkMetadataConsistency().toArray();
- }
- };
- const inconsistencies = cmd();
- assert.eq(1, inconsistencies.length);
- assert.eq("MisplacedCollection", inconsistencies[0].type);
-}
-
-// Helper function to assert that the checkMetadataConsistency command fails
-function assertAuthCommandFailed(adminDb, conn, user, level) {
- assert(adminDb.logout());
- assert(adminDb.auth(user, "pwd"));
-
- const cmd = () => {
- if (level === kClusterLevel || level === kDatabaseLevel) {
- return conn.runCommand({checkMetadataConsistency: 1});
- } else {
- return conn.runCommand({checkMetadataConsistency: "coll"});
- }
- };
-
- assert.commandFailedWithCode(
- cmd(),
- ErrorCodes.Unauthorized,
- "user should no longer have privileges to execute checkMetadataConsistency command.");
-}
-
-// Configure initial sharding cluster
-const st = new ShardingTest({keyFile: "jstests/libs/key1", useHostname: false});
-
-const shardAdmin = st.shard0.getDB("admin");
-shardAdmin.createUser({user: "admin", pwd: "x", roles: ["root"]});
-shardAdmin.auth("admin", "x");
-
-const adminDb = st.s.getDB("admin");
-adminDb.createUser({user: "admin", pwd: "x", roles: ["root"]});
-adminDb.auth("admin", "x");
-
-const dbName = "testCheckMetadataConsistencyDB";
-const db = st.s.getDB(dbName);
-
-// Insert a hidden unsharded collection inconsistency.
-assert.commandWorked(
- adminDb.adminCommand({enableSharding: dbName, primaryShard: st.shard1.shardName}));
-assert.commandWorked(
- st.shard0.getDB(dbName).runCommand({insert: "coll", documents: [{_id: "foo"}]}));
-
-(function createRolesToTest() {
- assert.commandWorked(adminDb.runCommand({
- createRole: "clusterLevelRole",
- roles: [],
- privileges: [{resource: {cluster: true}, actions: ["checkMetadataConsistency"]}]
- }));
-
- assert.commandWorked(adminDb.runCommand({
- createRole: "databaseLevelRole",
- roles: [],
- privileges:
- [{resource: {db: dbName, collection: ""}, actions: ["checkMetadataConsistency"]}]
- }));
-
- assert.commandWorked(adminDb.runCommand({
- createRole: "collectionLevelRole",
- roles: [],
- privileges:
- [{resource: {db: dbName, collection: "coll"}, actions: ["checkMetadataConsistency"]}]
- }));
-
- assert.commandWorked(adminDb.runCommand({
- createRole: "roleWithAllNonSystemCollectionsPrivileges",
- roles: [],
- privileges: [{resource: {db: "", collection: ""}, actions: ["checkMetadataConsistency"]}]
- }));
-
- assert.commandWorked(adminDb.runCommand({
- createRole: "roleWithNotRelatedAction",
- roles: [],
- privileges: [{resource: {cluster: true}, actions: ["allCollectionStats"]}]
- }));
-})();
-
-(function createUsersToTest() {
- assert.commandWorked(adminDb.runCommand({
- createUser: "clusterManagerUser",
- pwd: "pwd",
- roles: [{role: "clusterManager", db: "admin"}]
- }));
-
- assert.commandWorked(adminDb.runCommand({
- createUser: "clusterAdminUser",
- pwd: "pwd",
- roles: [{role: "clusterAdmin", db: "admin"}]
- }));
-
- assert.commandWorked(adminDb.runCommand({
- createUser: "userWithClusterLevelRole",
- pwd: "pwd",
- roles: [{role: "clusterLevelRole", db: "admin"}]
- }));
-
- assert.commandWorked(adminDb.runCommand({
- createUser: "userWithDatabaseLevelRole",
- pwd: "pwd",
- roles: [{role: "databaseLevelRole", db: "admin"}]
- }));
-
- assert.commandWorked(adminDb.runCommand({
- createUser: "userWithCollectionLevelRole",
- pwd: "pwd",
- roles: [{role: "collectionLevelRole", db: "admin"}]
- }));
-
- assert.commandWorked(adminDb.runCommand({
- createUser: "userWithAllNonSystemCollectionsPrivileges",
- pwd: "pwd",
- roles: [{role: "roleWithAllNonSystemCollectionsPrivileges", db: "admin"}]
- }));
-
- assert.commandWorked(adminDb.runCommand({
- createUser: "userWithUnrelatedRole",
- pwd: "pwd",
- roles: [{role: "hostManager", db: "admin"}]
- }));
-
- assert.commandWorked(adminDb.runCommand({
- createUser: "userWithUnrelatedAction",
- pwd: "pwd",
- roles: [{role: "roleWithNotRelatedAction", db: "admin"}]
- }));
-
- assert.commandWorked(
- adminDb.runCommand({createUser: "userWithNoRoles", pwd: "pwd", roles: []}));
-})();
-
-shardAdmin.logout();
-adminDb.logout();
-
-(function testClusterLevelModePrivileges() {
- assertAuthCommandWorked(adminDb, adminDb, "clusterManagerUser", kClusterLevel);
- assertAuthCommandWorked(adminDb, adminDb, "clusterAdminUser", kClusterLevel);
- assertAuthCommandWorked(adminDb, adminDb, "userWithClusterLevelRole", kClusterLevel);
-
- assertAuthCommandFailed(
- adminDb, adminDb, "userWithAllNonSystemCollectionsPrivileges", kClusterLevel);
- assertAuthCommandFailed(adminDb, adminDb, "userWithDatabaseLevelRole", kClusterLevel);
- assertAuthCommandFailed(adminDb, adminDb, "userWithCollectionLevelRole", kClusterLevel);
- assertAuthCommandFailed(adminDb, adminDb, "userWithUnrelatedAction", kClusterLevel);
- assertAuthCommandFailed(adminDb, adminDb, "userWithUnrelatedRole", kClusterLevel);
- assertAuthCommandFailed(adminDb, adminDb, "userWithNoRoles", kClusterLevel);
-})();
-
-(function testDatabaseLevelModePrivileges() {
- assertAuthCommandWorked(adminDb, db, "clusterManagerUser", kDatabaseLevel);
- assertAuthCommandWorked(adminDb, db, "clusterAdminUser", kDatabaseLevel);
- assertAuthCommandWorked(adminDb, db, "userWithClusterLevelRole", kDatabaseLevel);
- assertAuthCommandWorked(adminDb, db, "userWithDatabaseLevelRole", kDatabaseLevel);
- assertAuthCommandWorked(
- adminDb, db, "userWithAllNonSystemCollectionsPrivileges", kDatabaseLevel);
-
- assertAuthCommandFailed(adminDb, db, "userWithCollectionLevelRole", kDatabaseLevel);
- assertAuthCommandFailed(adminDb, db, "userWithUnrelatedAction", kDatabaseLevel);
- assertAuthCommandFailed(adminDb, db, "userWithUnrelatedRole", kDatabaseLevel);
- assertAuthCommandFailed(adminDb, db, "userWithNoRoles", kDatabaseLevel);
-})();
-
-(function testCollectionLevelModePrivileges() {
- assertAuthCommandWorked(adminDb, db, "clusterManagerUser", kCollectionLevel);
- assertAuthCommandWorked(adminDb, db, "clusterAdminUser", kCollectionLevel);
- assertAuthCommandWorked(adminDb, db, "userWithClusterLevelRole", kCollectionLevel);
- assertAuthCommandWorked(adminDb, db, "userWithDatabaseLevelRole", kCollectionLevel);
- assertAuthCommandWorked(adminDb, db, "userWithCollectionLevelRole", kCollectionLevel);
- assertAuthCommandWorked(
- adminDb, db, "userWithAllNonSystemCollectionsPrivileges", kCollectionLevel);
-
- assertAuthCommandFailed(adminDb, db, "userWithUnrelatedAction", kCollectionLevel);
- assertAuthCommandFailed(adminDb, db, "userWithUnrelatedRole", kCollectionLevel);
- assertAuthCommandFailed(adminDb, db, "userWithNoRoles", kCollectionLevel);
-})();
-
-st.stop();
-})();
diff --git a/jstests/auth/currentop_cursors_auth.js b/jstests/auth/currentop_cursors_auth.js
index b51325de61a76..d0fc30f236ff1 100644
--- a/jstests/auth/currentop_cursors_auth.js
+++ b/jstests/auth/currentop_cursors_auth.js
@@ -21,16 +21,22 @@ Random.setRandomSeed();
const pass = "a" + Random.rand();
// Create one root user and one regular user on the given connection.
-function createUsers(conn) {
+function createUsers(conn, grantDirectShardOperationsRole) {
const adminDB = conn.getDB("admin");
+
adminDB.createUser({user: "ted", pwd: pass, roles: ["root"]});
assert(adminDB.auth("ted", pass), "Authentication 1 Failed");
- adminDB.createUser({user: "yuta", pwd: pass, roles: ["readWriteAnyDatabase"]});
+
+ let yutaRoles = ["readWriteAnyDatabase"];
+ if (grantDirectShardOperationsRole)
+ yutaRoles.push("directShardOperations");
+
+ adminDB.createUser({user: "yuta", pwd: pass, roles: yutaRoles});
}
// Create the necessary users at both cluster and shard-local level.
-createUsers(shardConn);
-createUsers(mongosConn);
+createUsers(shardConn, /* grantDirectShardOperationsRole */ true);
+createUsers(mongosConn, /* grantDirectShardOperationsRole */ false);
// Run the various auth tests on the given shard or mongoS connection.
function runCursorTests(conn) {
diff --git a/jstests/auth/getMore.js b/jstests/auth/getMore.js
index d7588d73dc648..8480568cd4c1f 100644
--- a/jstests/auth/getMore.js
+++ b/jstests/auth/getMore.js
@@ -8,9 +8,6 @@ TestData.disableImplicitSessions = true;
function runTest(conn) {
const adminDB = conn.getDB("admin");
- const hello = adminDB.runCommand("hello");
- assert.commandWorked(hello);
- const isMongos = (hello.msg === "isdbgrid");
// Create the admin user.
assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
diff --git a/jstests/auth/iteration_count_control.js b/jstests/auth/iteration_count_control.js
index d003347bdbc2d..0111ecf3edd98 100644
--- a/jstests/auth/iteration_count_control.js
+++ b/jstests/auth/iteration_count_control.js
@@ -3,7 +3,7 @@
(function() {
'use strict';
-load('./jstests/multiVersion/libs/auth_helpers.js');
+load('jstests/multiVersion/libs/auth_helpers.js');
const conn = MongoRunner.runMongod({auth: ''});
const adminDB = conn.getDB('admin');
diff --git a/jstests/auth/killop_own_ops.js b/jstests/auth/killop_own_ops.js
index ae1058bca4641..d31d4629cdc04 100644
--- a/jstests/auth/killop_own_ops.js
+++ b/jstests/auth/killop_own_ops.js
@@ -4,7 +4,8 @@
* Theory of operation: Create a long running operation from a user which does not have the killOp
* or inProg privileges. Using the same user, run currentOp to get the opId, and then run killOp
* against it.
- * @tags: [requires_sharding]
+ * TODO SERVER-78101: Investigate the test failure and re-enable the test with CQF enabled.
+ * @tags: [requires_sharding, cqf_incompatible]
*/
(function() {
diff --git a/jstests/auth/lib/automated_idp_authn_simulator.py b/jstests/auth/lib/automated_idp_authn_simulator.py
deleted file mode 100644
index dc34d294bb286..0000000000000
--- a/jstests/auth/lib/automated_idp_authn_simulator.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#! /usr/bin/env python3
-"""
-Simulates a human authenticating to an identity provider on the Web, specifically with the
-device authorization grant flow.
-
-Given a device authorization endpoint, a username, and a file with necessary setup information, it
-will simulate automatically logging in as a human would.
-
-"""
-import argparse
-import os
-import json
-
-import geckodriver_autoinstaller
-from pathlib import Path
-from selenium import webdriver
-from selenium.webdriver.common.by import By
-from selenium.webdriver.firefox.options import Options
-from selenium.webdriver.support import expected_conditions as EC
-from selenium.webdriver.support.ui import WebDriverWait
-
-def authenticate_okta(activation_endpoint, username, test_credentials):
- # Install GeckoDriver if needed.
- geckodriver_autoinstaller.install()
-
- # Launch headless Firefox to the device authorization endpoint.
- firefox_options = Options()
- firefox_options.add_argument('-headless')
- driver = webdriver.Firefox(options=firefox_options)
- driver.get(activation_endpoint)
-
- try:
- # User code will be pre-populated, so wait for next button to load and click.
- next_button = WebDriverWait(driver, 30).until(
- EC.presence_of_element_located((By.XPATH, "//input[@class='button button-primary'][@value='Next']"))
- )
- next_button.click()
-
- # Wait for the username prompt and next button to load.
- username_input_box = WebDriverWait(driver, 30).until(
- EC.presence_of_element_located((By.XPATH, "//input[@name='username']"))
- )
- next_button = WebDriverWait(driver, 30).until(
- EC.presence_of_element_located((By.XPATH, "//input[@class='button button-primary'][@value='Next']"))
- )
-
- # Enter username.
- username_input_box.send_keys(username)
- next_button.click()
-
- # Wait for the password prompt and next button to load.
- password_input_box = WebDriverWait(driver, 30).until(
- EC.presence_of_element_located((By.XPATH, "//input[@name='password']"))
- )
- verify_button = WebDriverWait(driver, 30).until(
- EC.presence_of_element_located((By.XPATH, "//input[@class='button button-primary'][@value='Sign In']"))
- )
-
- # Enter password.
- password_input_box.send_keys(test_credentials[username])
- verify_button.click()
-
- # Assert that the landing page contains the "Device activated" text, indicating successful auth.
- landing_header = WebDriverWait(driver, 30).until(
- EC.presence_of_element_located((By.XPATH, "//h2[@class='okta-form-title o-form-head'][contains(text(), 'Device activated')]"))
- )
- assert landing_header is not None
-
- except Exception as e:
- print(e)
- else:
- print('Success')
- finally:
- driver.quit()
-
-def main():
- parser = argparse.ArgumentParser(description='Okta Automated Authentication Simulator')
-
- parser.add_argument('-e', '--activationEndpoint', type=str, help="Endpoint to start activation at with code filled in")
- parser.add_argument('-u', '--username', type=str, help="Username to authenticate as")
- parser.add_argument('-s', '--setupFile', type=str, help="File containing information generated during test setup, relative to home directory")
-
- args = parser.parse_args()
-
- with open(Path.home() / args.setupFile) as setup_file:
- setup_information = json.load(setup_file)
- assert args.username in setup_information
-
- authenticate_okta(args.activationEndpoint, args.username, setup_information)
-
-if __name__ == '__main__':
- main()
diff --git a/jstests/auth/lib/automated_idp_authn_simulator_azure.py b/jstests/auth/lib/automated_idp_authn_simulator_azure.py
new file mode 100644
index 0000000000000..f931981a3ae68
--- /dev/null
+++ b/jstests/auth/lib/automated_idp_authn_simulator_azure.py
@@ -0,0 +1,106 @@
+#! /usr/bin/env python3
+"""
+Simulates a human authenticating to azure on the Web, specifically with the
+device authorization grant flow.
+
+Given a device authorization endpoint, a username, a user code and a file with necessary setup information, it
+will simulate automatically logging in as a human would.
+
+"""
+import argparse
+import os
+import json
+
+import geckodriver_autoinstaller
+from pathlib import Path
+from selenium import webdriver
+from selenium.webdriver.common.by import By
+from selenium.webdriver.firefox.options import Options
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.support.ui import WebDriverWait
+
+def authenticate_azure(activation_endpoint, userCode, username, test_credentials):
+ # Install GeckoDriver if needed.
+ geckodriver_autoinstaller.install()
+
+ # Launch headless Firefox to the device authorization endpoint.
+ firefox_options = Options()
+ firefox_options.add_argument('-headless')
+ driver = webdriver.Firefox(options=firefox_options)
+ driver.get(activation_endpoint)
+
+ try:
+ # User code will be added to the input box.
+ # Wait for the user code prompt and next button to load.
+ user_code_input_box = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@name='otc']"))
+ )
+ next_button = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@type='submit'][@value='Next']"))
+ )
+
+ # Enter usercode.
+ user_code_input_box.send_keys(userCode)
+ next_button.click()
+
+ # Wait for the username prompt and next button to load.
+ username_input_box = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@name='loginfmt']"))
+ )
+ next_button = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@type='submit'][@value='Next']"))
+ )
+
+ # Enter username.
+ username_input_box.send_keys(username)
+ next_button.click()
+
+ # Wait for the password prompt and next button to load.
+ password_input_box = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@name='passwd'][@placeholder='Password']"))
+ )
+ verify_button = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@type='submit'][@value='Sign in']"))
+ )
+
+ # Enter password.
+ password_input_box.send_keys(test_credentials[username])
+ verify_button.click()
+
+ # Assert 'Are you trying to sign in to OIDC_EVG_TESTING?' message.
+ continue_button = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@type='submit'][@value='Continue']"))
+ )
+ continue_button.click()
+
+ # Assert that the landing page contains the "You have signed in to the OIDC_EVG_TESTING application on your device" text, indicating successful auth.
+ landing_header = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//p[@id='message'][@class='text-block-body no-margin-top']"))
+ )
+ assert landing_header is not None and "You have signed in" in landing_header.text
+
+ except Exception as e:
+ print(e)
+ else:
+ print('Success')
+ finally:
+ driver.quit()
+
+def main():
+ parser = argparse.ArgumentParser(description='Azure Automated Authentication Simulator')
+
+ parser.add_argument('-e', '--activationEndpoint', type=str, help="Endpoint to start activation at")
+ parser.add_argument('-c', '--userCode', type=str, help="Code to be added in the endpoint to authenticate")
+ parser.add_argument('-u', '--username', type=str, help="Username to authenticate as")
+ parser.add_argument('-s', '--setupFile', type=str, help="File containing information generated during test setup, relative to home directory")
+
+ args = parser.parse_args()
+
+ with open(Path.home() / args.setupFile) as setup_file:
+ setup_information = json.load(setup_file)
+ assert args.username in setup_information
+
+ authenticate_azure(args.activationEndpoint, args.userCode, args.username, setup_information)
+
+if __name__ == '__main__':
+ main()
diff --git a/jstests/auth/lib/automated_idp_authn_simulator_okta.py b/jstests/auth/lib/automated_idp_authn_simulator_okta.py
new file mode 100644
index 0000000000000..208ebe530272a
--- /dev/null
+++ b/jstests/auth/lib/automated_idp_authn_simulator_okta.py
@@ -0,0 +1,99 @@
+#! /usr/bin/env python3
+"""
+Simulates a human authenticating to an identity provider on the Web, specifically with the
+device authorization grant flow.
+
+Given a device authorization endpoint, a username, and a file with necessary setup information, it
+will simulate automatically logging in as a human would.
+
+"""
+import argparse
+import os
+import json
+
+import geckodriver_autoinstaller
+from pathlib import Path
+from selenium import webdriver
+from selenium.webdriver.common.by import By
+from selenium.webdriver.firefox.options import Options
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.support.ui import WebDriverWait
+
+def authenticate_okta(activation_endpoint, userCode, username, test_credentials):
+ # Install GeckoDriver if needed.
+ geckodriver_autoinstaller.install()
+
+ # Launch headless Firefox to the device authorization endpoint.
+ firefox_options = Options()
+ firefox_options.add_argument('-headless')
+ driver = webdriver.Firefox(options=firefox_options)
+ driver.get(activation_endpoint)
+
+ try:
+ # Wait for activation code input box and next button to load and click.
+ activationCode_input_box = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@name='userCode']"))
+ )
+ next_button = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@class='button button-primary'][@value='Next']"))
+ )
+
+ # Enter user activation code.
+ activationCode_input_box.send_keys(userCode)
+ next_button.click()
+
+ # Wait for the username prompt and next button to load.
+ username_input_box = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@name='username']"))
+ )
+ next_button = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@class='button button-primary'][@value='Next']"))
+ )
+
+ # Enter username.
+ username_input_box.send_keys(username)
+ next_button.click()
+
+ # Wait for the password prompt and next button to load.
+ password_input_box = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@name='password']"))
+ )
+ verify_button = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//input[@class='button button-primary'][@value='Sign In']"))
+ )
+
+ # Enter password.
+ password_input_box.send_keys(test_credentials[username])
+ verify_button.click()
+
+ # Assert that the landing page contains the "Device activated" text, indicating successful auth.
+ landing_header = WebDriverWait(driver, 30).until(
+ EC.presence_of_element_located((By.XPATH, "//h2[@class='okta-form-title o-form-head'][contains(text(), 'Device activated')]"))
+ )
+ assert landing_header is not None
+
+ except Exception as e:
+ print(e)
+ else:
+ print('Success')
+ finally:
+ driver.quit()
+
+def main():
+ parser = argparse.ArgumentParser(description='Okta Automated Authentication Simulator')
+
+ parser.add_argument('-e', '--activationEndpoint', type=str, help="Endpoint to start activation at")
+ parser.add_argument('-c', '--userCode', type=str, help="Code to be added in the endpoint to authenticate")
+ parser.add_argument('-u', '--username', type=str, help="Username to authenticate as")
+ parser.add_argument('-s', '--setupFile', type=str, help="File containing information generated during test setup, relative to home directory")
+
+ args = parser.parse_args()
+
+ with open(Path.home() / args.setupFile) as setup_file:
+ setup_information = json.load(setup_file)
+ assert args.username in setup_information
+
+ authenticate_okta(args.activationEndpoint, args.userCode, args.username, setup_information)
+
+if __name__ == '__main__':
+ main()
diff --git a/jstests/auth/lib/bulk_write_base.js b/jstests/auth/lib/bulk_write_base.js
index eeb851dea5e3a..fdca6bbff83a4 100644
--- a/jstests/auth/lib/bulk_write_base.js
+++ b/jstests/auth/lib/bulk_write_base.js
@@ -1,10 +1,8 @@
-'use strict';
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
// Auth test the BulkWrite command.
// These test cover privilege combination scenarios that commands_lib.js format cannot.
-function runTest(mongod) {
- load("jstests/libs/feature_flag_util.js");
-
+export function runTest(mongod) {
const admin = mongod.getDB('admin');
admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles});
assert(admin.auth('admin', 'pass'));
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index de2782a5ac251..fc9cc64b359b7 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -425,29 +425,23 @@ export const authCommandsLib = {
]
},
{
- testname: 'transitionToCatalogShard',
- command: {transitionToCatalogShard: 1},
+ testname: 'transitionFromDedicatedConfigServer',
+ command: {transitionFromDedicatedConfigServer: 1},
skipUnlessSharded: true,
- skipTest: (conn) => {
- return !TestData.setParameters.featureFlagCatalogShard;
- },
testcases: [
{
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [{resource: {cluster: true}, actions: ["transitionToCatalogShard"]}]
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {cluster: true}, actions: ["transitionFromDedicatedConfigServer"]}]
},
{runOnDb: firstDbName, roles: {}},
{runOnDb: secondDbName, roles: {}}
]
},
{
- testname: "_configsvrTransitionToCatalogShard",
- command: {_configsvrTransitionToCatalogShard: 1},
+ testname: "_configsvrTransitionFromDedicatedConfigServer",
+ command: {_configsvrTransitionFromDedicatedConfigServer: 1},
skipSharded: true,
- skipTest: (conn) => {
- return !TestData.setParameters.featureFlagCatalogShard;
- },
testcases: [
{
runOnDb: adminDbName,
@@ -3074,6 +3068,43 @@ export const authCommandsLib = {
}
]
},
+ {
+ testname: "cleanupStructuredEncryptionData",
+ command: {cleanupStructuredEncryptionData: "foo", cleanupTokens : {}},
+ skipSharded: true,
+ skipUnlessReplicaSet: true,
+ setup: function(db) {
+ assert.commandWorked(db.createCollection("foo", {
+ encryptedFields: {
+ "fields": [
+ {
+ "path": "firstName",
+ "keyId": UUID("11d58b8a-0c6c-4d69-a0bd-70c6d9befae9"),
+ "bsonType": "string",
+ "queries": {"queryType": "equality"}
+ },
+ ]
+ }
+ }));
+ },
+ teardown: function(db) {
+ assert.commandWorked(db.dropDatabase());
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: { readWrite : 1, readWriteAnyDatabase : 1, dbOwner : 1, root : 1, __system : 1 },
+ privileges:
+ [{resource: {db: firstDbName, collection: "foo"}, actions: ["cleanupStructuredEncryptionData"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: { readWriteAnyDatabase : 1, root : 1, __system : 1 },
+ privileges:
+ [{resource: {db: secondDbName, collection: "foo"}, actions: ["cleanupStructuredEncryptionData"]}]
+ }
+ ]
+ },
{
testname: "connectionStatus",
command: {connectionStatus: 1},
@@ -3264,6 +3295,148 @@ export const authCommandsLib = {
{runOnDb: adminDbName, roles: {__system: 1}, expectFail: true},
]
},
+ {
+ testname: "checkClusterMetadataConsistency",
+ command: {checkMetadataConsistency: 1},
+ skipUnlessSharded: true,
+ setup: function(db) {
+ assert.commandWorked(db.getSiblingDB("test").createCollection("coll"));
+ },
+ teardown: function(db) {
+ assert.commandWorked(db.getSiblingDB("test").dropDatabase());
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {clusterManager: 1, clusterAdmin: 1, root: 1, __system: 1}
+ },
+ {
+ runOnDb: adminDbName,
+ privileges: [{resource: {cluster: true}, actions: ["checkMetadataConsistency"]}]
+ },
+ {
+ runOnDb: adminDbName,
+ privileges: [
+ {resource: {db: "", collection: ""}, actions: ["checkMetadataConsistency"]}
+ ],
+ expectAuthzFailure: true
+ },
+ {
+ runOnDb: adminDbName,
+ privileges: [{
+ resource: {db: adminDbName, collection: ""},
+ actions: ["checkMetadataConsistency"]
+ }],
+ expectAuthzFailure: true
+ },
+ {
+ runOnDb: adminDbName,
+ privileges: [{
+ resource: {db: adminDbName, collection: "coll"},
+ actions: ["checkMetadataConsistency"]
+ }],
+ expectAuthzFailure: true
+ },
+ {
+ runOnDb: adminDbName,
+ privileges: [{resource: {cluster: true}, actions: ["allCollectionStats"]}],
+ expectAuthzFailure: true
+ }
+ ]
+ },
+ {
+ testname: "checkDatabaseMetadataConsistency",
+ command: {checkMetadataConsistency: 1},
+ skipUnlessSharded: true,
+ setup: function(db) {
+ assert.commandWorked(db.getSiblingDB("test").createCollection("coll"));
+ },
+ teardown: function(db) {
+ assert.commandWorked(db.getSiblingDB("test").dropDatabase());
+ },
+ testcases: [
+ {
+ runOnDb: "test",
+ roles: {clusterManager: 1, clusterAdmin: 1, root: 1, __system: 1}
+ },
+ {
+ runOnDb: "test",
+ privileges: [{resource: {cluster: true}, actions: ["checkMetadataConsistency"]}]
+ },
+ {
+ runOnDb: "test",
+ privileges: [
+ {resource: {db: "", collection: ""}, actions: ["checkMetadataConsistency"]}
+ ]
+ },
+ {
+ runOnDb: "test",
+ privileges: [{
+ resource: {db: "test", collection: ""},
+ actions: ["checkMetadataConsistency"]
+ }]
+ },
+ {
+ runOnDb: "test",
+ privileges: [{
+ resource: {db: "test", collection: "coll"},
+ actions: ["checkMetadataConsistency"]
+ }],
+ expectAuthzFailure: true
+ },
+ {
+ runOnDb: "test",
+ privileges: [{resource: {cluster: true}, actions: ["allCollectionStats"]}],
+ expectAuthzFailure: true
+ }
+ ]
+ },
+ {
+ testname: "checkCollectionMetadataConsistency",
+ command: {checkMetadataConsistency: "coll"},
+ skipUnlessSharded: true,
+ setup: function(db) {
+ assert.commandWorked(db.getSiblingDB("test").createCollection("coll"));
+ },
+ teardown: function(db) {
+ assert.commandWorked(db.getSiblingDB("test").dropDatabase());
+ },
+ testcases: [
+ {
+ runOnDb: "test",
+ roles: {clusterManager: 1, clusterAdmin: 1, root: 1, __system: 1}
+ },
+ {
+ runOnDb: "test",
+ privileges: [{resource: {cluster: true}, actions: ["checkMetadataConsistency"]}]
+ },
+ {
+ runOnDb: "test",
+ privileges: [
+ {resource: {db: "", collection: ""}, actions: ["checkMetadataConsistency"]}
+ ]
+ },
+ {
+ runOnDb: "test",
+ privileges: [{
+ resource: {db: "test", collection: ""},
+ actions: ["checkMetadataConsistency"]
+ }]
+ },
+ {
+ runOnDb: "test",
+ privileges: [{
+ resource: {db: "test", collection: "coll"},
+ actions: ["checkMetadataConsistency"]
+ }]
+ },
+ {
+ runOnDb: "test",
+ privileges: [{resource: {cluster: true}, actions: ["allCollectionStats"]}],
+ expectAuthzFailure: true
+ }
+ ]
+ },
{
testname: "clusterCount",
command: {clusterCount: "x"},
@@ -4807,6 +4980,25 @@ export const authCommandsLib = {
}
]
},
+ {
+ testname: "clusterBulkWrite",
+ command: {
+ clusterBulkWrite: 1,
+ ops: [
+ {insert: 0, document: {skey: "MongoDB"}},
+ {insert: 1, document: {skey: "MongoDB"}}],
+ nsInfo: [{ns: firstDbName + ".coll"}, {ns: secondDbName + ".coll1"}],
+ },
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["internal"]}],
+ expectFail: true,
+ },
+ ]
+ },
{
testname: "clusterDelete",
command: {clusterDelete: "foo", deletes: [{q: {}, limit: 1}]},
@@ -5478,14 +5670,14 @@ export const authCommandsLib = {
testcases: [
{
runOnDb: firstDbName,
- roles: Object.extend({restore: 1}, roles_dbAdmin),
+ roles: Object.extend({restore: 1}, roles_writeDbAdmin),
privileges:
[{resource: {db: firstDbName, collection: "foo"}, actions: ["updateSearchIndex"]}],
expectFail: true,
},
{
runOnDb: secondDbName,
- roles: Object.extend({restore: 1}, roles_dbAdminAny),
+ roles: Object.extend({restore: 1}, roles_writeDbAdminAny),
privileges:
[{resource: {db: secondDbName, collection: "foo"}, actions: ["updateSearchIndex"]}],
expectFail: true,
@@ -5522,9 +5714,39 @@ export const authCommandsLib = {
{runOnDb: secondDbName, roles: {}}
]
},
+ {
+ testname: "s_moveRange",
+ command: {moveRange: "test.x", min: {x:1}, toShard:"a"},
+ skipUnlessSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {db: "test", collection: "x"}, actions: ["moveChunk"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "d_moveRange",
+ command: {_shardsvrMoveRange: "test.x", fromShard: "a", toShard: "b", min: {}, max: {}, maxChunkSizeBytes: 1024},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["internal"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
{
testname: "movePrimary",
- command: {movePrimary: "x"},
+ command: {movePrimary: "x", to: "a"},
skipUnlessSharded: true,
testcases: [
{
@@ -5558,8 +5780,7 @@ export const authCommandsLib = {
// Only enterprise knows of this command.
skipTest:
(conn) => {
- return !getBuildInfo().modules.includes("enterprise")
- || !TestData.setParameters.featureFlagOIDC;
+ return !getBuildInfo().modules.includes("enterprise");
},
testcases: [
{
@@ -5576,8 +5797,7 @@ export const authCommandsLib = {
// Only enterprise knows of this command.
skipTest:
(conn) => {
- return !getBuildInfo().modules.includes("enterprise")
- || !TestData.setParameters.featureFlagOIDC;
+ return !getBuildInfo().modules.includes("enterprise");
},
testcases: [
{
@@ -5958,9 +6178,9 @@ export const authCommandsLib = {
skipUnlessSharded: true,
testcases: [
{
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- expectFail: true,
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ expectFail: true,
privileges: [{resource: {cluster: true}, actions: ["transitionToDedicatedConfigServer"]}]
},
{runOnDb: firstDbName, roles: {}},
@@ -6422,12 +6642,12 @@ export const authCommandsLib = {
]
},
{
- // Test that only clusterManager has permission to run $telemetry
+ // Test that only clusterManager has permission to run $queryStats
testname: "testTelemetryReadPrivilege",
- command: {aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}},
+ command: {aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}},
skipSharded: false,
skipTest: (conn) => {
- return !TestData.setParameters.featureFlagTelemetry;
+ return !TestData.setParameters.featureFlagQueryStats;
},
testcases: [{runOnDb: adminDbName, roles: roles_clusterManager}]
},
diff --git a/jstests/auth/list_all_local_sessions.js b/jstests/auth/list_all_local_sessions.js
index 3b90d01b545e9..20fd0f7e021fa 100644
--- a/jstests/auth/list_all_local_sessions.js
+++ b/jstests/auth/list_all_local_sessions.js
@@ -1,5 +1,5 @@
// Auth tests for the $listLocalSessions {allUsers:true} aggregation stage.
-// @tags: [requires_fcv_70, requires_sharding]
+// @tags: [requires_sharding]
(function() {
'use strict';
diff --git a/jstests/auth/listcommands_preauth_mongod.js b/jstests/auth/listcommands_preauth_mongod.js
index f049e75de7abf..bf7ca437c6fee 100644
--- a/jstests/auth/listcommands_preauth_mongod.js
+++ b/jstests/auth/listcommands_preauth_mongod.js
@@ -9,4 +9,4 @@ load("jstests/auth/listcommands_preauth_base.js");
const mongod = MongoRunner.runMongod({auth: ""});
runTest(mongod);
MongoRunner.stopMongod(mongod);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/auth/listcommands_preauth_sharded_cluster.js b/jstests/auth/listcommands_preauth_sharded_cluster.js
index 76115d26f5e1b..325f27cf47964 100644
--- a/jstests/auth/listcommands_preauth_sharded_cluster.js
+++ b/jstests/auth/listcommands_preauth_sharded_cluster.js
@@ -11,4 +11,4 @@ const st =
new ShardingTest({shards: 1, mongos: 1, config: 1, other: {keyFile: 'jstests/libs/key1'}});
runTest(st.s0);
st.stop();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/auth/logout_reconnect.js b/jstests/auth/logout_reconnect.js
index fa5d8a8fcdb2c..614a89e7857a0 100644
--- a/jstests/auth/logout_reconnect.js
+++ b/jstests/auth/logout_reconnect.js
@@ -30,7 +30,7 @@ conn = MongoRunner.runMongod({restart: conn, noCleanData: true});
// expect to fail on first attempt since the socket is no longer valid
try {
- val = testDB.foo.findOne();
+ testDB.foo.findOne();
} catch (err) {
}
diff --git a/jstests/auth/mr_auth.js b/jstests/auth/mr_auth.js
index 9c398af1ba1b8..35c8ad1215f22 100644
--- a/jstests/auth/mr_auth.js
+++ b/jstests/auth/mr_auth.js
@@ -4,26 +4,26 @@
// This test requires users to persist across a restart.
// @tags: [requires_persistence]
-baseName = "jstests_mr_auth";
-dbName = "test";
-out = baseName + "_out";
+let baseName = "jstests_mr_auth";
+let dbName = "test";
+let out = baseName + "_out";
-map = function() {
+let map = function() {
emit(this.x, this.y);
};
-red = function(k, vs) {
+let red = function(k, vs) {
var s = 0;
for (var i = 0; i < vs.length; i++)
s += vs[i];
return s;
};
-red2 = function(k, vs) {
+let red2 = function(k, vs) {
return 42;
};
// make sure writing is allowed when started without --auth enabled
-dbms = MongoRunner.runMongod({bind_ip: "127.0.0.1"});
+let dbms = MongoRunner.runMongod({bind_ip: "127.0.0.1"});
var d = dbms.getDB(dbName);
var t = d[baseName];
diff --git a/jstests/auth/readIndex.js b/jstests/auth/readIndex.js
index 53c5d63ecba00..517415d865d77 100644
--- a/jstests/auth/readIndex.js
+++ b/jstests/auth/readIndex.js
@@ -19,4 +19,4 @@ var indexList = testDB.foo.getIndexes().filter(function(idx) {
});
assert.eq(1, indexList.length, tojson(indexList));
assert.docEq(indexList[0].key, {a: 1}, tojson(indexList));
-MongoRunner.stopMongod(conn, null, {user: 'root', pwd: 'password'});
\ No newline at end of file
+MongoRunner.stopMongod(conn, null, {user: 'root', pwd: 'password'});
diff --git a/jstests/auth/rename_encrypted_collection.js b/jstests/auth/rename_encrypted_collection.js
index b9240bf650c95..311ee357eae35 100644
--- a/jstests/auth/rename_encrypted_collection.js
+++ b/jstests/auth/rename_encrypted_collection.js
@@ -6,11 +6,6 @@
* requires_fcv_61,
* ]
*/
-load("jstests/fle2/libs/encrypted_client_util.js");
-
-(function() {
-'use strict';
-
function runTestWithAuth(conn, allowsRename, verifyFunction) {
const db = conn.getDB("test");
const srcDbName = 'rename_encrypted_collection_src_db';
@@ -125,5 +120,4 @@ jsTestLog("Sharding: Testing fle2 collection rename");
runTest(st.s);
st.stop();
-}
-}());
+}
\ No newline at end of file
diff --git a/jstests/auth/rename_system_buckets_collection.js b/jstests/auth/rename_system_buckets_collection.js
new file mode 100644
index 0000000000000..2a6fa9cfe3e72
--- /dev/null
+++ b/jstests/auth/rename_system_buckets_collection.js
@@ -0,0 +1,128 @@
+// Tests renaming the system.buckets collection.
+(function() {
+"use strict";
+
+// Set up the test database.
+const dbName = "test";
+const collName = "mongosync.tmp.UUID123";
+const bucketsCollName = `system.buckets.${collName}`;
+const targetBucketsCollName = "system.buckets.manual";
+
+function renameBucketsCollection(adminDB, username, shouldSucceed) {
+ // Create collection under admin user
+ assert.eq(1, adminDB.auth("admin", "admin"));
+
+ const testDB = adminDB.getSiblingDB(dbName);
+
+ testDB[bucketsCollName].drop();
+ testDB[targetBucketsCollName].drop();
+
+ assert.commandWorked(
+ testDB.createCollection(bucketsCollName, {timeseries: {timeField: "time"}}));
+ adminDB.logout();
+
+ // Try rename with test users
+ jsTestLog("Testing system.buckets renaming with username: " + username);
+ assert(adminDB.auth(username, 'password'));
+
+ // No privilege grants the ability to rename a system.buckets collection to a non-bucket
+ // namespace.
+ assert.commandFailed(testDB.adminCommand({
+ renameCollection: `${testDB}.${bucketsCollName}`,
+ to: `${testDB}.${collName}`,
+ dropTarget: false
+ }));
+
+ const res = testDB.adminCommand({
+ renameCollection: `${testDB}.${bucketsCollName}`,
+ to: `${testDB}.${targetBucketsCollName}`,
+ dropTarget: true
+ });
+
+ assert.eq((shouldSucceed) ? 1 : 0,
+ res.ok,
+ "Rename collection failed or succeeded unexpectedly:" + tojson(res));
+
+ adminDB.logout();
+}
+
+function runTest(conn) {
+ const adminDB = conn.getDB("admin");
+
+ // Create the admin user.
+ adminDB.createUser({user: 'admin', pwd: 'admin', roles: ['root']});
+ assert.eq(1, adminDB.auth("admin", "admin"));
+
+ // Create roles with ability to rename system.buckets collections.
+ adminDB.createRole({
+ role: "renameBucketsOnly",
+ privileges: [{
+ resource: {db: '', system_buckets: ''},
+ actions: [
+ "createIndex",
+ "dropCollection",
+ "find",
+ "insert",
+ ]
+ }],
+ roles: []
+ });
+
+ // Create test users.
+ adminDB.createUser(
+ {user: 'userAdmin', pwd: 'password', roles: ['userAdminAnyDatabase', 'renameBucketsOnly']});
+
+ // Create read and write users.
+ adminDB.createUser({
+ user: 'readWriteAdmin',
+ pwd: 'password',
+ roles: ['readWriteAnyDatabase', 'renameBucketsOnly']
+ });
+
+ // Create strong users.
+ adminDB.createUser({user: 'restore', pwd: 'password', roles: ['restore', 'renameBucketsOnly']});
+ adminDB.createUser({user: 'root', pwd: 'password', roles: ['root', 'renameBucketsOnly']});
+ adminDB.createUser(
+ {user: 'rootier', pwd: 'password', roles: ['__system', 'renameBucketsOnly']});
+ adminDB.createUser(
+ {user: 'reader', pwd: 'password', roles: ['readAnyDatabase', 'renameBucketsOnly']});
+
+ adminDB.logout();
+
+ // Expect renaming system.buckets collection to succeed.
+ renameBucketsCollection(adminDB, 'restore', true);
+ renameBucketsCollection(adminDB, 'root', true);
+ renameBucketsCollection(adminDB, 'rootier', true);
+
+ // Second test case should fail for user with inadequate role.
+ renameBucketsCollection(adminDB, 'reader', false);
+ renameBucketsCollection(adminDB, 'readWriteAdmin', false);
+ renameBucketsCollection(adminDB, 'userAdmin', false);
+}
+
+jsTestLog("ReplicaSet: Testing rename timeseries collection");
+{
+ const rst = new ReplSetTest({nodes: 1, auth: "", keyFile: 'jstests/libs/key1'});
+ rst.startSet();
+
+ rst.initiate();
+ rst.awaitReplication();
+ runTest(rst.getPrimary());
+ rst.stopSet();
+}
+
+jsTestLog("Sharding: Testing rename timeseries collection");
+{
+ const st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ keyFile: "jstests/libs/key1",
+ other: {shardOptions: {auth: ""}}
+ });
+
+ runTest(st.s);
+
+ st.stop();
+}
+})();
diff --git a/jstests/auth/repl.js b/jstests/auth/repl.js
index 6f5b7ed0dcb8f..b4c3ae7aa6f81 100644
--- a/jstests/auth/repl.js
+++ b/jstests/auth/repl.js
@@ -172,7 +172,7 @@ var AuthReplTest = function(spec) {
};
jsTest.log("1 test replica sets");
-var rs = new ReplSetTest({name: rsName, nodes: 2});
+const rs = new ReplSetTest({name: rsName, nodes: 2});
var nodes = rs.startSet(mongoOptions);
rs.initiate();
authutil.asCluster(nodes, "jstests/libs/key1", function() {
diff --git a/jstests/auth/repl_require_keyfile.js b/jstests/auth/repl_require_keyfile.js
index fc5977a2d1da3..6753c8282d34b 100644
--- a/jstests/auth/repl_require_keyfile.js
+++ b/jstests/auth/repl_require_keyfile.js
@@ -14,4 +14,4 @@ const mongoOutput = rawMongoProgramOutput();
assert(mongoOutput.indexOf(
"security.keyFile is required when authorization is enabled with replica sets") >= 0,
"Expected error message about missing keyFile on startup");
-})();
\ No newline at end of file
+})();
diff --git a/jstests/auth/restore_role_create_collection_via_apply_ops.js b/jstests/auth/restore_role_create_collection_via_apply_ops.js
index be30be7db4767..bcfc57b03b56a 100644
--- a/jstests/auth/restore_role_create_collection_via_apply_ops.js
+++ b/jstests/auth/restore_role_create_collection_via_apply_ops.js
@@ -58,4 +58,4 @@ function runTest(conn) {
const standalone = MongoRunner.runMongod({auth: ''});
runTest(standalone);
MongoRunner.stopMongod(standalone);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/auth/sbe_plan_cache_user_roles.js b/jstests/auth/sbe_plan_cache_user_roles.js
index 8c4313a4ee57a..20e24cf220836 100644
--- a/jstests/auth/sbe_plan_cache_user_roles.js
+++ b/jstests/auth/sbe_plan_cache_user_roles.js
@@ -2,14 +2,11 @@
* Test $$USER_ROLES works correctly with the SBE plan cache. The same query should return the
* updated user role info when a different user logs in.
* @tags: [
- * featureFlagUserRoles,
* # Multiple servers can mess up the plan cache list.
* assumes_standalone_mongod,
* ]
*/
-(function() {
-"use strict";
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const mongod = MongoRunner.runMongod();
const dbName = "test";
@@ -54,5 +51,4 @@ assert.eq(results[0].roles, [{_id: "test.readWrite", role: "readWrite", db: "tes
verifyPlanCache("test.readWrite");
db.logout();
-MongoRunner.stopMongod(mongod);
-})();
+MongoRunner.stopMongod(mongod);
\ No newline at end of file
diff --git a/jstests/auth/secondary_invalidation.js b/jstests/auth/secondary_invalidation.js
index 16f2938ebbf78..d44dec85ca103 100644
--- a/jstests/auth/secondary_invalidation.js
+++ b/jstests/auth/secondary_invalidation.js
@@ -20,7 +20,7 @@ admin.auth('admin', 'password');
primary.getDB('foo').createUser({user: 'foo', pwd: 'foopwd', roles: []}, {w: NUM_NODES});
-secondaryFoo = secondary.getDB('foo');
+let secondaryFoo = secondary.getDB('foo');
secondaryFoo.auth('foo', 'foopwd');
assert.throws(function() {
secondaryFoo.col.findOne();
diff --git a/jstests/auth/speculative-auth-replset.js b/jstests/auth/speculative-auth-replset.js
index 9f36444a0204d..3bd960780d6e0 100644
--- a/jstests/auth/speculative-auth-replset.js
+++ b/jstests/auth/speculative-auth-replset.js
@@ -27,8 +27,9 @@ function countAuthInLog(conn) {
}
} else if (entry.id === kAuthenticationFailedLogId) {
// Authentication can fail legitimately because the secondary abandons the connection
- // during shutdown.
- assert.eq(entry.attr.error.code, ErrorCodes.AuthenticationAbandoned);
+ // during shutdown - if we do encounter an authentication failure in the log, make sure
+ // that it is only of this type, fail anything else
+ assert.eq(entry.attr.result, ErrorCodes.AuthenticationAbandoned);
} else {
// Irrelevant.
return;
diff --git a/jstests/auth/system_buckets_invalid_nss.js b/jstests/auth/system_buckets_invalid_nss.js
new file mode 100644
index 0000000000000..dd182c4858462
--- /dev/null
+++ b/jstests/auth/system_buckets_invalid_nss.js
@@ -0,0 +1,21 @@
+// Validate that *.system.buckets.system.buckets.* is an invalid namespace
+
+(function() {
+"use strict";
+
+function runTest(conn) {
+ const admin = conn.getDB('admin');
+ assert.commandWorked(admin.runCommand({createUser: 'admin', pwd: 'admin', roles: ['root']}));
+
+ assert.commandFailedWithCode(admin.system.buckets.system.buckets.foo.insert({x: 1}),
+ [ErrorCodes.Unauthorized]);
+
+ assert(admin.auth('admin', 'admin'));
+ assert.commandFailedWithCode(admin.system.buckets.system.buckets.foo.insert({x: 1}),
+ [ErrorCodes.InvalidNamespace]);
+}
+
+const mongod = MongoRunner.runMongod({auth: ''});
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
+}());
diff --git a/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js b/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js
index 4c0ff0c8518be..fe00d5fbc219e 100644
--- a/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js
+++ b/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js
@@ -11,6 +11,7 @@ load('jstests/ssl/libs/ssl_helpers.js');
TestData.skipCheckingIndexesConsistentAcrossCluster = true;
TestData.skipCheckOrphans = true;
TestData.skipCheckShardFilteringMetadata = true;
+TestData.skipCheckRoutingTableConsistency = true;
// Disable auth explicitly
var noAuthOptions = {noauth: ''};
diff --git a/jstests/auth/user_cache_acquisition_stats.js b/jstests/auth/user_cache_acquisition_stats.js
index a2f9996d127b0..2a2e36d73b7ce 100644
--- a/jstests/auth/user_cache_acquisition_stats.js
+++ b/jstests/auth/user_cache_acquisition_stats.js
@@ -150,4 +150,4 @@ function runTest(conn, mode) {
jsTest.log('SUCCESS user_cache_acquisition_stats.js Sharding');
st.stop();
}
-})();
\ No newline at end of file
+})();
diff --git a/jstests/auth/user_defined_roles_on_secondaries.js b/jstests/auth/user_defined_roles_on_secondaries.js
index d27fbb7d522a4..5e4733e9d6736 100644
--- a/jstests/auth/user_defined_roles_on_secondaries.js
+++ b/jstests/auth/user_defined_roles_on_secondaries.js
@@ -48,7 +48,7 @@ function assertListContainsRole(list, role, msg) {
if (list[i].role == role.role && list[i].db == role.db)
return;
}
- doassert("Could not find value " + tojson(val) + " in " + tojson(list) +
+ doassert("Could not find value " + tojson(role) + " in " + tojson(list) +
(msg ? ": " + msg : ""));
}
diff --git a/jstests/auth/user_roles_disable_parameter.js b/jstests/auth/user_roles_disable_parameter.js
index 1f93968832eeb..c5fe17a304b10 100644
--- a/jstests/auth/user_roles_disable_parameter.js
+++ b/jstests/auth/user_roles_disable_parameter.js
@@ -1,5 +1,5 @@
// Tests that $$USER_ROLES is not available when the server parameter is set to false.
-// @tags: [featureFlagUserRoles, requires_fcv_70]
+// @tags: [requires_fcv_70]
(function() {
"use strict";
diff --git a/jstests/auth/user_roles_empty.js b/jstests/auth/user_roles_empty.js
index f450ad42d1b8d..938e22404d75a 100644
--- a/jstests/auth/user_roles_empty.js
+++ b/jstests/auth/user_roles_empty.js
@@ -1,6 +1,6 @@
// Tests that $$USER_ROLES works as expected in a find command when the array returned by
// $$USER_ROLES is empty and when mongod was started with auth disabled.
-// @tags: [featureFlagUserRoles, requires_fcv_70]
+// @tags: [requires_fcv_70]
(function() {
"use strict";
diff --git a/jstests/auth/user_roles_find_agg.js b/jstests/auth/user_roles_find_agg.js
index 0e49f67134db6..860ac3f8cc50c 100644
--- a/jstests/auth/user_roles_find_agg.js
+++ b/jstests/auth/user_roles_find_agg.js
@@ -1,6 +1,6 @@
// Tests that $$USER_ROLES works as expected in a find command and an aggregate command (on both a
// standalone mongod and a sharded cluster).
-// @tags: [featureFlagUserRoles, requires_fcv_70]
+// @tags: [requires_fcv_70]
(function() {
"use strict";
diff --git a/jstests/auth/user_roles_update.js b/jstests/auth/user_roles_update.js
deleted file mode 100644
index b01a3a60d5542..0000000000000
--- a/jstests/auth/user_roles_update.js
+++ /dev/null
@@ -1,28 +0,0 @@
-// Tests that $$USER_ROLES is not able to be accessed within an update command.
-// @tags: [featureFlagUserRoles, requires_fcv_70]
-
-(function() {
-"use strict";
-
-const dbName = "test";
-const collName = "coll";
-const varNotAvailableErr = 51144;
-
-const mongod = MongoRunner.runMongod({auth: ""});
-
-// Create a user on the admin database.
-let admin = mongod.getDB("admin");
-assert.commandWorked(admin.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
-admin.auth("admin", "admin");
-
-const db = mongod.getDB(dbName);
-let coll = db.getCollection(collName);
-assert.commandWorked(coll.insert({a: 1}));
-
-// Check that $$USER_ROLES is not available within an update command.
-assert.commandFailedWithCode(coll.update({$expr: {$in: ["root", '$$USER_ROLES.role']}}, {a: 2}),
- varNotAvailableErr);
-
-db.logout();
-MongoRunner.stopMongod(mongod);
-}());
diff --git a/jstests/auth/user_roles_update_findAndModify.js b/jstests/auth/user_roles_update_findAndModify.js
new file mode 100644
index 0000000000000..3faf74a08a1a8
--- /dev/null
+++ b/jstests/auth/user_roles_update_findAndModify.js
@@ -0,0 +1,174 @@
+// Tests that $$USER_ROLES is able to be used in an "update" and "findAndModify" commands.
+// @tags: [requires_fcv_70]
+
+(function() {
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+
+function initialize(db) {
+ let engDoc = {
+ _id: 0,
+ allowedRoles: ["eng-app-prod", "eng-app-stg", "read"],
+ allowedRole: "read",
+ comment: "only for engineering team",
+ teamMembers: ["John", "Ashley", "Gina"],
+ yearlyEduBudget: 15000,
+ yearlyTnEBudget: 2000,
+ salesWins: 1000
+ };
+
+ let salesDoc = {
+ _id: 1,
+ allowedRoles: ["sales-person"],
+ allowedRole: "observe",
+ comment: "only for sales team",
+ salesWins: 1000
+ };
+
+ let testUpdate = {_id: 2, allowedRole: "test", teamMembersRights: ["testUpdate"]};
+
+ let testFindAndModify = {
+ _id: 3,
+ allowedRole: "write",
+ teamMembersRights: ["testFindAndModify"]
+ };
+
+ let coll = db.getCollection(collName);
+ assert.commandWorked(coll.insertMany([engDoc, salesDoc, testUpdate, testFindAndModify]));
+}
+
+// Test accessing $$USER_ROLES in the query portion of "update" command.
+function runUpdateQuery(db) {
+ let coll = db.getCollection(collName);
+
+ let pre = coll.findOne(
+ {$expr: {$eq: [{$setIntersection: ["$allowedRoles", "$$USER_ROLES.role"]}, []]}});
+ var preSalesWins = pre.salesWins;
+
+ assert.commandWorked(coll.update(
+ {$expr: {$eq: [{$setIntersection: ["$allowedRoles", "$$USER_ROLES.role"]}, []]}},
+ {$inc: {salesWins: 1000}},
+ {multi: true}));
+
+ let post = coll.findOne(
+ {$expr: {$eq: [{$setIntersection: ["$allowedRoles", "$$USER_ROLES.role"]}, []]}});
+ var postSalesWins = post.salesWins;
+
+ assert.eq(postSalesWins, preSalesWins + 1000);
+}
+
+// Test accessing $$USER_ROLES in the update portion of "update" command.
+function runUpdateUpdate(db) {
+ let coll = db.getCollection(collName);
+
+ assert.commandWorked(
+ coll.update({_id: 2}, [{$set: {"teamMembersRights": "$$USER_ROLES.role"}}]));
+
+ let post = coll.findOne({_id: 2});
+
+ let expectedResult = {
+ _id: 2,
+ allowedRole: "test",
+ teamMembersRights: ["readWriteAnyDatabase", "read"]
+ };
+
+ assert.eq(post, expectedResult);
+}
+
+// Test accessing $$USER_ROLES in the query portion of "findAndModify" command.
+function runFindAndModifyQuery(db) {
+ let coll = db.getCollection(collName);
+
+ let pre = coll.findOne({$expr: {allowedRole: "$$USER_ROLES.role"}});
+ var preSalesWins = pre.salesWins;
+
+ db.coll.findAndModify({
+ query: {allowedRole: "read", $expr: {allowedRole: "$$USER_ROLES.role"}},
+ update: {$inc: {salesWins: 1000}}
+ });
+
+ let post = coll.findOne({$expr: {allowedRole: "$$USER_ROLES.role"}});
+ var postSalesWins = post.salesWins;
+
+ assert.eq(postSalesWins, preSalesWins + 1000);
+}
+
+// Test accessing $$USER_ROLES in the update portion of "findAndModify" command.
+function runFindAndModifyUpdate(db) {
+ let coll = db.getCollection(collName);
+
+ coll.findAndModify({
+ query: {allowedRole: "write"},
+ update: [{$set: {"teamMembersRights": "$$USER_ROLES.role"}}]
+ });
+
+ let post = coll.findOne({_id: 3});
+
+ let expectedResult = {
+ _id: 3,
+ allowedRole: "write",
+ teamMembersRights: ["readWriteAnyDatabase", "read"]
+ };
+
+ assert.eq(post, expectedResult);
+}
+
+function runTest(conn, st = null) {
+ // Create a user on the admin database.
+ let admin = conn.getDB("admin");
+ assert.commandWorked(admin.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]}));
+ admin.auth("admin", "admin");
+
+ if (st) {
+ // Shard the collection that will be used in the update and findAndModify commands.
+ assert.commandWorked(conn.getDB("admin").runCommand({enableSharding: dbName}));
+ st.shardColl(conn.getDB(dbName).getCollection(collName), {allowedRole: 1});
+ }
+
+ const db = conn.getDB(dbName);
+ let coll = db.getCollection(collName);
+
+ // Create a user that has roles on more than one database. The readWriteAnyDatabase is
+ // necessary for the inserts that follow to work.
+ assert.commandWorked(db.runCommand({
+ createUser: "user",
+ pwd: "pwd",
+ roles: [{role: "readWriteAnyDatabase", db: "admin"}, {role: "read", db: dbName}]
+ }));
+
+ // Logout of the admin user so that we can log into the other user so we can access those
+ // roles with $$USER_ROLES below.
+ admin.logout();
+ db.auth("user", "pwd");
+
+ initialize(db);
+
+ runUpdateQuery(db);
+
+ runUpdateUpdate(db);
+
+ runFindAndModifyQuery(db);
+
+ runFindAndModifyUpdate(db);
+
+ db.logout();
+}
+
+jsTest.log("Test standalone");
+const mongod = MongoRunner.runMongod({auth: ""});
+runTest(mongod);
+MongoRunner.stopMongod(mongod);
+
+jsTest.log("Test sharded cluster");
+const st = new ShardingTest({
+ mongos: 1,
+ config: 1,
+ shards: 2,
+ keyFile: 'jstests/libs/key1',
+});
+
+runTest(st.s, st);
+st.stop();
+}());
diff --git a/jstests/auth/user_roles_view.js b/jstests/auth/user_roles_view.js
index a21d3dae8720c..87dc0650fe3f2 100644
--- a/jstests/auth/user_roles_view.js
+++ b/jstests/auth/user_roles_view.js
@@ -1,6 +1,6 @@
// Tests that $$USER_ROLES works as expected in view creation and queries on the view (on both a
// standalone mongod and a sharded cluster).
-// @tags: [featureFlagUserRoles, requires_fcv_70]
+// @tags: [requires_fcv_70]
(function() {
"use strict";
diff --git a/jstests/change_streams/change_stream.js b/jstests/change_streams/change_stream.js
index a7a100ea12430..38b39bb5529b9 100644
--- a/jstests/change_streams/change_stream.js
+++ b/jstests/change_streams/change_stream.js
@@ -10,8 +10,6 @@ load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
// assert[Valid|Invalid]ChangeStreamNss.
-const isMongos = FixtureHelpers.isMongos(db);
-
// Drop and recreate the collections to be used in this set of tests.
assertDropAndRecreateCollection(db, "t1");
assertDropAndRecreateCollection(db, "t2");
@@ -33,7 +31,7 @@ checkArgFails([1, 2, "invalid", {x: 1}]);
assertInvalidChangeStreamNss("admin", "testColl");
assertInvalidChangeStreamNss("config", "testColl");
// Not allowed to access 'local' database through mongos.
-if (!isMongos) {
+if (!FixtureHelpers.isMongos(db)) {
assertInvalidChangeStreamNss("local", "testColl");
}
diff --git a/jstests/change_streams/change_stream_pre_image_lookup_whole_db_whole_cluster.js b/jstests/change_streams/change_stream_pre_image_lookup_whole_db_whole_cluster.js
index f2ca07b1c54bc..fc96b63e5c5f8 100644
--- a/jstests/change_streams/change_stream_pre_image_lookup_whole_db_whole_cluster.js
+++ b/jstests/change_streams/change_stream_pre_image_lookup_whole_db_whole_cluster.js
@@ -13,7 +13,7 @@
load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-const testDB = db.getSiblingDB(jsTestName());
+const testDB = db.getSiblingDB("preImage_lookup_whole_db_whole_cluster");
const adminDB = db.getSiblingDB("admin");
const collWithPreImageName = "coll_with_pre_images";
const collWithNoPreImageName = "coll_with_no_pre_images";
diff --git a/jstests/change_streams/ddl_create_drop_index_events.js b/jstests/change_streams/ddl_create_drop_index_events.js
index 7119dbc140d7b..de17ea51409d1 100644
--- a/jstests/change_streams/ddl_create_drop_index_events.js
+++ b/jstests/change_streams/ddl_create_drop_index_events.js
@@ -7,14 +7,11 @@
* assumes_against_mongod_not_mongos,
* ]
*/
-(function() {
-"use strict";
-
load('jstests/libs/collection_drop_recreate.js'); // For 'assertDropAndRecreateCollection' and
// 'assertDropCollection'.
load('jstests/libs/change_stream_util.js'); // For 'ChangeStreamTest' and
// 'assertChangeStreamEventEq'.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {safeToCreateColumnStoreIndex} from "jstests/libs/columnstore_util.js";
const testDB = db.getSiblingDB(jsTestName());
@@ -180,4 +177,3 @@ runTest((() => cst.startWatchingChanges({pipeline, collection: 1})), true);
// Run the test using a single collection change stream on a non-empty collection.
runTest((() => cst.startWatchingChanges({pipeline, collection: collName})), true);
-}());
diff --git a/jstests/change_streams/ddl_create_event.js b/jstests/change_streams/ddl_create_event.js
index 72276e2ce4c62..de1c3ca24ee0f 100644
--- a/jstests/change_streams/ddl_create_event.js
+++ b/jstests/change_streams/ddl_create_event.js
@@ -4,14 +4,10 @@
*
* @tags: [ requires_fcv_60, ]
*/
-(function() {
-"use strict";
-
load('jstests/libs/collection_drop_recreate.js'); // For 'assertDropAndRecreateCollection' and
// 'assertDropCollection'.
load('jstests/libs/change_stream_util.js'); // For 'ChangeStreamTest' and
// 'assertChangeStreamEventEq'.
-load("jstests/libs/feature_flag_util.js");
const testDB = db.getSiblingDB(jsTestName());
@@ -69,14 +65,7 @@ function runTest(startChangeStream) {
assertDropCollection(testDB, collName);
// With capped collection parameters.
- let expectedSize;
-
- // TODO SERVER-74653: Remove feature flag check.
- if (FeatureFlagUtil.isPresentAndEnabled(testDB, "CappedCollectionsRelaxedSize")) {
- expectedSize = 1000;
- } else {
- expectedSize = 1024;
- }
+ let expectedSize = 1000;
validateExpectedEventAndDropCollection({create: collName, capped: true, size: 1000, max: 1000},
{
operationType: "create",
@@ -198,4 +187,3 @@ function runTest(startChangeStream) {
const pipeline = [{$changeStream: {showExpandedEvents: true}}];
runTest(() => test.startWatchingChanges({pipeline, collection: 1}));
runTest(() => test.startWatchingChanges({pipeline, collection: collName}));
-}());
diff --git a/jstests/change_streams/error_label.js b/jstests/change_streams/error_label.js
index 31b38f2f7cbd9..f117e9d2184a8 100644
--- a/jstests/change_streams/error_label.js
+++ b/jstests/change_streams/error_label.js
@@ -30,4 +30,4 @@ const err = assert.throws(function() {
assert.commandFailedWithCode(err, ErrorCodes.ChangeStreamFatalError);
assert("errorLabels" in err, err);
assert.contains("NonResumableChangeStreamError", err.errorLabels, err);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/change_streams/expanded_update_description.js b/jstests/change_streams/expanded_update_description.js
index 51ee93381b92d..9cd45eb2e45c1 100644
--- a/jstests/change_streams/expanded_update_description.js
+++ b/jstests/change_streams/expanded_update_description.js
@@ -1,10 +1,5 @@
/**
* Test change stream 'updateDescription' with 'showExpandedEvents'.
- *
- * @tags: [
- * requires_fcv_61,
- * featureFlagChangeStreamsFurtherEnrichedEvents,
- * ]
*/
(function() {
diff --git a/jstests/change_streams/generate_v1_resume_token.js b/jstests/change_streams/generate_v1_resume_token.js
deleted file mode 100644
index 05ebc2f6d9295..0000000000000
--- a/jstests/change_streams/generate_v1_resume_token.js
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Test that the $_generateV2ResumeTokens parameter can be used to force change streams to return v1
- * tokens.
- * @tags: [
- * requires_fcv_61
- * ]
- */
-(function() {
-"use strict";
-
-load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
-
-const coll = assertDropAndRecreateCollection(db, jsTestName());
-
-// Create one stream that returns v2 tokens, the default.
-const v2Stream = coll.watch([]);
-
-// Create a second stream that explicitly requests v1 tokens.
-const v1Stream = coll.watch([], {$_generateV2ResumeTokens: false});
-
-// Insert a test document into the collection.
-assert.commandWorked(coll.insert({_id: 1}));
-
-// Wait until both streams have encountered the insert operation.
-assert.soon(() => v1Stream.hasNext() && v2Stream.hasNext());
-const v1Event = v1Stream.next();
-const v2Event = v2Stream.next();
-
-// Confirm that the streams see the same event, but the resume tokens differ.
-const v1ResumeToken = v1Event._id;
-const v2ResumeToken = v2Event._id;
-
-delete v1Event._id;
-delete v2Event._id;
-
-assert.docEq(v1Event, v2Event);
-assert.neq(v1ResumeToken, v2ResumeToken, {v1ResumeToken, v2ResumeToken});
-})();
\ No newline at end of file
diff --git a/jstests/change_streams/lookup_pit_pre_and_post_image_in_transaction.js b/jstests/change_streams/lookup_pit_pre_and_post_image_in_transaction.js
index 7c5b274cd4135..0f3be92b5c873 100644
--- a/jstests/change_streams/lookup_pit_pre_and_post_image_in_transaction.js
+++ b/jstests/change_streams/lookup_pit_pre_and_post_image_in_transaction.js
@@ -145,4 +145,4 @@ if (!FixtureHelpers.isMongos(testDB)) {
{_id: 6, operationType: "delete", preImage: {_id: 6, a: 1}},
]);
}
-})();
\ No newline at end of file
+})();
diff --git a/jstests/change_streams/oplog_rewrite/change_stream_basic_match_pushdown_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_basic_match_pushdown_rewrite.js
index ec45e81871ac0..9e6bd6285f762 100644
--- a/jstests/change_streams/oplog_rewrite/change_stream_basic_match_pushdown_rewrite.js
+++ b/jstests/change_streams/oplog_rewrite/change_stream_basic_match_pushdown_rewrite.js
@@ -10,10 +10,11 @@
// assumes_unsharded_collection,
// assumes_read_preference_unchanged
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers.
+import {
+ assertNumChangeStreamDocsReturnedFromShard,
+ assertNumMatchingOplogEventsForShard,
+ createShardedCollection,
+} from "jstests/libs/change_stream_rewrite_util.js";
const dbName = "change_stream_match_pushdown_and_rewrite";
const collName = "coll1";
@@ -74,15 +75,15 @@ const stats = coll.explain("executionStats").aggregate([
// Verify the number of documents seen from each shard by the mongoS pipeline. Because we expect
// the $match to be pushed down to the shards, we expect to only see the 1 "insert" operation on
// each shard. All other operations should be filtered out on the shards.
-assertNumChangeStreamDocsReturnedFromShard(stats, st.rs0.name, 1);
-assertNumChangeStreamDocsReturnedFromShard(stats, st.rs1.name, 1);
+assertNumChangeStreamDocsReturnedFromShard(stats, st.shard0.shardName, 1);
+assertNumChangeStreamDocsReturnedFromShard(stats, st.shard1.shardName, 1);
// Because it is possible to rewrite the {operationType: "insert"} predicate so that it applies
// to the oplog entry, we expect the $match to get pushed all the way to the initial oplog
// query. This query executes in an internal "$cursor" stage, and we expect to see exactly 1
// document from this stage on each shard.
-assertNumMatchingOplogEventsForShard(stats, st.rs0.name, 1);
-assertNumMatchingOplogEventsForShard(stats, st.rs1.name, 1);
+assertNumMatchingOplogEventsForShard(stats, st.shard0.shardName, 1);
+assertNumMatchingOplogEventsForShard(stats, st.shard1.shardName, 1);
// Generate another 7 oplog events, this time within a transaction. One of the events is in a
// different collection, to validate that events from outside the watched namespace get filtered
@@ -134,11 +135,11 @@ const txnStatsAfterEvent2 = coll.explain("executionStats").aggregate([
// Verify the number of documents seen from each shard by the mongoS pipeline. As before, we expect
// that everything except the inserts will be filtered on the shard, limiting the number of events
// the mongoS needs to retrieve.
-assertNumChangeStreamDocsReturnedFromShard(txnStatsAfterEvent2, st.rs0.name, 1);
+assertNumChangeStreamDocsReturnedFromShard(txnStatsAfterEvent2, st.shard0.shardName, 1);
// Note that the event we are resuming from is sent to the mongoS from shard 2, even though it gets
// filtered out, which is why we see 2 events here.
-assertNumChangeStreamDocsReturnedFromShard(txnStatsAfterEvent2, st.rs1.name, 2);
+assertNumChangeStreamDocsReturnedFromShard(txnStatsAfterEvent2, st.shard1.shardName, 2);
// Generate a second transaction.
session.startTransaction({readConcern: {level: "majority"}});
@@ -161,8 +162,8 @@ const txnStatsAfterEvent1 = coll.explain("executionStats").aggregate([
// The "lsid" and "txnNumber" filters should get pushed all the way to the initial oplog query
// in the $cursor stage, meaning that every oplog entry gets filtered out except the
// 'commitTransaction' on each shard for the one transaction we select with our filter.
-assertNumMatchingOplogEventsForShard(txnStatsAfterEvent1, st.rs0.name, 1);
-assertNumMatchingOplogEventsForShard(txnStatsAfterEvent1, st.rs1.name, 1);
+assertNumMatchingOplogEventsForShard(txnStatsAfterEvent1, st.shard0.shardName, 1);
+assertNumMatchingOplogEventsForShard(txnStatsAfterEvent1, st.shard1.shardName, 1);
// Ensure that optimization does not attempt to create a filter that disregards the collation.
const collationChangeStream = coll.aggregate(
@@ -183,5 +184,4 @@ assert.eq(stringValues.slice(0, 2), ["Value", "vAlue"]);
// transaction, they effectively occur at exactly the same time.
assert.sameMembers(stringValues.slice(2, 4), ["vaLue", "valUe"]);
-st.stop();
-})();
+st.stop();
\ No newline at end of file
diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_documentKey_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_documentKey_rewrite.js
index 646a6c581325c..3058b9e65018f 100644
--- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_documentKey_rewrite.js
+++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_documentKey_rewrite.js
@@ -11,11 +11,11 @@
// assumes_unsharded_collection,
// assumes_read_preference_unchanged
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers.
-load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+import {
+ createShardedCollection,
+ verifyChangeStreamOnWholeCluster
+} from "jstests/libs/change_stream_rewrite_util.js";
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
const dbName = "change_stream_match_pushdown_documentKey_rewrite";
const collName = "change_stream_match_pushdown_documentKey_rewrite";
@@ -201,4 +201,3 @@ verifyOnWholeCluster({$match: {operationType: "drop"}},
[1, 0] /* expectedChangeStreamDocsForEachShard */);
st.stop();
-})();
diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocumentBeforeChange_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocumentBeforeChange_rewrite.js
index 7b1884a11f08a..9272d9a61f5f0 100644
--- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocumentBeforeChange_rewrite.js
+++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocumentBeforeChange_rewrite.js
@@ -12,10 +12,10 @@
// assumes_unsharded_collection,
// assumes_read_preference_unchanged
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers.
+import {
+ createShardedCollection,
+ verifyChangeStreamOnWholeCluster
+} from "jstests/libs/change_stream_rewrite_util.js";
const dbName = "change_stream_match_pushdown_fullDocumentBeforeChange_rewrite";
const collName = "change_stream_match_pushdown_fullDocumentBeforeChange_rewrite";
@@ -289,5 +289,4 @@ verifyOnWholeCluster({$match: {operationType: "drop"}},
[1, 0] /* expectedOplogRetDocsForEachShard */,
[1, 0] /* expectedChangeStreamDocsForEachShard */);
-st.stop();
-})();
+st.stop();
\ No newline at end of file
diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js
index b149335a06956..e26903f68f250 100644
--- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js
+++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js
@@ -11,10 +11,10 @@
// assumes_unsharded_collection,
// assumes_read_preference_unchanged
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers.
+import {
+ createShardedCollection,
+ verifyChangeStreamOnWholeCluster
+} from "jstests/libs/change_stream_rewrite_util.js";
const dbName = "change_stream_match_pushdown_fullDocument_rewrite";
const collName = "change_stream_match_pushdown_fullDocument_rewrite";
@@ -279,5 +279,4 @@ runVerifyOpsTestcases("delete");
assert(coll.drop());
runVerifyOpsTestcases("drop");
-st.stop();
-})();
+st.stop();
\ No newline at end of file
diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js
index af9b0b9c3b6a3..1294c30d5dfff 100644
--- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js
+++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js
@@ -10,11 +10,11 @@
// assumes_unsharded_collection,
// assumes_read_preference_unchanged
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers.
-load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+import {
+ createShardedCollection,
+ verifyChangeStreamOnWholeCluster
+} from "jstests/libs/change_stream_rewrite_util.js";
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
const dbName = "change_stream_match_pushdown_and_rewrite";
const otherDbName = "other_db";
@@ -1094,4 +1094,3 @@ verifyOnWholeCluster(thirdResumeAfterToken,
1 /* expectedOplogRetDocsForEachShard */);
st.stop();
-})();
diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_operation_type_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_operation_type_rewrite.js
index 5287c5c033628..a117949ffefb4 100644
--- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_operation_type_rewrite.js
+++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_operation_type_rewrite.js
@@ -11,10 +11,10 @@
// assumes_unsharded_collection,
// assumes_read_preference_unchanged
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers.
+import {
+ createShardedCollection,
+ verifyChangeStreamOnWholeCluster
+} from "jstests/libs/change_stream_rewrite_util.js";
const dbName = "change_stream_match_pushdown_and_rewrite";
const collName = "coll1";
@@ -305,5 +305,4 @@ verifyOnWholeCluster({
},
[6, 5] /* expectedOplogRetDocsForEachShard */);
-st.stop();
-})();
+st.stop();
\ No newline at end of file
diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_to_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_to_rewrite.js
index a421a4375dc9f..df4f20117a05a 100644
--- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_to_rewrite.js
+++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_to_rewrite.js
@@ -10,11 +10,11 @@
// assumes_unsharded_collection,
// assumes_read_preference_unchanged
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers.
-load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+import {
+ createShardedCollection,
+ verifyChangeStreamOnWholeCluster
+} from "jstests/libs/change_stream_rewrite_util.js";
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
const dbName = "change_stream_match_pushdown_and_rewrite";
@@ -672,4 +672,3 @@ verifyOnWholeCluster({
4 /* expectedOplogRetDocsForEachShard*/);
st.stop();
-})();
diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_updateDescription_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_updateDescription_rewrite.js
index e1affb18167ca..f5bd897cba333 100644
--- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_updateDescription_rewrite.js
+++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_updateDescription_rewrite.js
@@ -11,10 +11,10 @@
// assumes_unsharded_collection,
// assumes_read_preference_unchanged
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers.
+import {
+ createShardedCollection,
+ verifyChangeStreamOnWholeCluster
+} from "jstests/libs/change_stream_rewrite_util.js";
const dbName = "change_stream_match_pushdown_updateDescription_rewrite";
const collName = "change_stream_match_pushdown_updateDescription_rewrite";
@@ -266,5 +266,4 @@ verifyOnWholeCluster(
[1, 0] /* expectedOplogRetDocsForEachShard*/,
[1, 0] /*expectedChangeStreamDocsForEachShard*/);
-st.stop();
-})();
+st.stop();
\ No newline at end of file
diff --git a/jstests/change_streams/oplog_rewrite/change_stream_null_existence_eq_rewrite_test.js b/jstests/change_streams/oplog_rewrite/change_stream_null_existence_eq_rewrite_test.js
index 8e0869ad243df..7e9fc9b604358 100644
--- a/jstests/change_streams/oplog_rewrite/change_stream_null_existence_eq_rewrite_test.js
+++ b/jstests/change_streams/oplog_rewrite/change_stream_null_existence_eq_rewrite_test.js
@@ -8,12 +8,13 @@
* uses_change_streams
* ]
*/
-(function() {
-"use strict";
+import {
+ generateChangeStreamWriteWorkload,
+ getAllChangeStreamEvents,
+ isPlainObject
+} from "jstests/libs/change_stream_rewrite_util.js";
-load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers.
-
-const dbName = "change_stream_rewrite_null_existence_test";
+const dbName = "rewrite_null_existence_test";
const collName = "coll1";
const testDB = db.getSiblingDB(dbName);
@@ -210,5 +211,4 @@ for (let csConfig of [{fullDocument: "updateLookup", showExpandedEvents: true}])
}
// Assert that there were no failed test cases.
-assert(failedTestCases.length == 0, failedTestCases);
-})();
+assert(failedTestCases.length == 0, failedTestCases);
\ No newline at end of file
diff --git a/jstests/change_streams/oplog_rewrite/match_pushdown_namespace_rewrite_with_expanded_events.js b/jstests/change_streams/oplog_rewrite/match_pushdown_namespace_rewrite_with_expanded_events.js
index b49ce4d721d6e..c74b8cf599d37 100644
--- a/jstests/change_streams/oplog_rewrite/match_pushdown_namespace_rewrite_with_expanded_events.js
+++ b/jstests/change_streams/oplog_rewrite/match_pushdown_namespace_rewrite_with_expanded_events.js
@@ -11,11 +11,11 @@
// assumes_unsharded_collection,
// assumes_read_preference_unchanged
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers.
-load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+import {
+ createShardedCollection,
+ verifyChangeStreamOnWholeCluster
+} from "jstests/libs/change_stream_rewrite_util.js";
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
const dbName = "change_stream_match_pushdown_and_rewrite";
const shard0Only = "shard0Only";
@@ -860,4 +860,3 @@ verifyOnWholeCluster(thirdResumeAfterToken,
[9, 4] /* expectedOplogRetDocsForEachShard */);
st.stop();
-})();
diff --git a/jstests/change_streams/oplog_rewrite/projection_changes_type.js b/jstests/change_streams/oplog_rewrite/projection_changes_type.js
index b463444cea952..cb9b8cae08157 100644
--- a/jstests/change_streams/oplog_rewrite/projection_changes_type.js
+++ b/jstests/change_streams/oplog_rewrite/projection_changes_type.js
@@ -3,10 +3,11 @@
* change stream framework to throw exceptions. Exercises the fix for SERVER-65497.
* @tags: [ requires_fcv_60 ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers.
+import {
+ generateChangeStreamWriteWorkload,
+ getAllChangeStreamEvents,
+ isPlainObject
+} from "jstests/libs/change_stream_rewrite_util.js";
const dbName = jsTestName();
const collName = "coll1";
@@ -83,5 +84,4 @@ for (let fieldName of fieldsToInclude) {
// Test projection of all accumulated fields.
assertProjection(Object.assign(accumulatedProjection, currentFieldProjection));
-}
-})();
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/jstests/change_streams/pipeline_style_updates.js b/jstests/change_streams/pipeline_style_updates.js
index 4b9f56842dfcb..75c0e5fb1cf0c 100644
--- a/jstests/change_streams/pipeline_style_updates.js
+++ b/jstests/change_streams/pipeline_style_updates.js
@@ -72,4 +72,4 @@ expected = {
testPipelineStyleUpdate(updatePipeline, expected, "update");
cst.cleanUp();
-}());
\ No newline at end of file
+}());
diff --git a/jstests/change_streams/projection_fakes_internal_event.js b/jstests/change_streams/projection_fakes_internal_event.js
index 4c95ac78fe575..cbc8bbdb40e55 100644
--- a/jstests/change_streams/projection_fakes_internal_event.js
+++ b/jstests/change_streams/projection_fakes_internal_event.js
@@ -2,7 +2,10 @@
* Tests that a user projection which fakes an internal topology-change event is handled gracefully
* in a sharded cluster.
* TODO SERVER-65778: rework this test when we can handle faked internal events more robustly.
- * @tags: [assumes_read_preference_unchanged]
+ *
+ * Tests that if a user fakes an internal event with a projection nothing crashes, so not valuable
+ * to test with a config shard.
+ * @tags: [assumes_read_preference_unchanged, config_shard_incompatible]
*/
(function() {
"use strict";
@@ -212,4 +215,4 @@ testProjection = {
assertChangeStreamBehaviour(testProjection, null, ErrorCodes.TypeMismatch);
st.stop();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/change_streams/queryable_encryption_change_stream.js b/jstests/change_streams/queryable_encryption_change_stream.js
new file mode 100644
index 0000000000000..d4f6b7b6234af
--- /dev/null
+++ b/jstests/change_streams/queryable_encryption_change_stream.js
@@ -0,0 +1,405 @@
+//
+// Basic $changeStream tests for operations that perform queryable encryption.
+//
+// @tags: [
+// change_stream_does_not_expect_txns,
+// assumes_unsharded_collection,
+// featureFlagFLE2CleanupCommand
+// ]
+//
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
+ // assert[Valid|Invalid]ChangeStreamNss.
+import {EncryptedClient} from "jstests/fle2/libs/encrypted_client_util.js";
+
+if (!buildInfo().modules.includes("enterprise")) {
+ jsTestLog("Skipping test as it requires the enterprise module");
+ quit();
+}
+
+const dbName = "qetestdb";
+const collName = "qetestcoll";
+const initialConn = db.getMongo();
+const testDb = db.getSiblingDB(dbName);
+const placeholderBinData0 = BinData(0, "WMdGo/tcDkE4UL6bgGYTN6oKFitgLXvhyhB9sbKxprk=");
+const placeholderBinData6 = BinData(6, "WMdGo/tcDkE4UL6bgGYTN6oKFitgLXvhyhB9sbKxprk=");
+const placeholderOID = ObjectId();
+
+const origCanonicalizeEventForTesting = canonicalizeEventForTesting;
+
+function replaceRandomDataWithPlaceholders(event) {
+ for (let field in event) {
+ if (!Object.prototype.hasOwnProperty.call(event, field)) {
+ continue;
+ }
+ if (event[field] instanceof BinData) {
+ if (event[field].subtype() === 6) {
+ event[field] = placeholderBinData6;
+ } else if (event[field].subtype() === 0) {
+ event[field] = placeholderBinData0;
+ }
+ } else if (event[field] instanceof ObjectId) {
+ event[field] = placeholderOID;
+ } else if (typeof event[field] === "object") {
+ replaceRandomDataWithPlaceholders(event[field]);
+ }
+ }
+}
+canonicalizeEventForTesting = function(event, expected) {
+ if (event.hasOwnProperty("fullDocument") || event.hasOwnProperty("documentKey")) {
+ replaceRandomDataWithPlaceholders(event);
+ }
+ return origCanonicalizeEventForTesting(event, expected);
+};
+
+testDb.dropDatabase();
+
+let encryptedClient = new EncryptedClient(initialConn, dbName);
+assert.commandWorked(encryptedClient.createEncryptionCollection(collName, {
+ encryptedFields: {
+ "fields": [
+ {
+ "path": "first",
+ "bsonType": "string",
+ /* contention: 0 is required for the cleanup tests to work */
+ "queries": {"queryType": "equality", "contention": 0}
+ },
+ ]
+ }
+}));
+
+const cst = new ChangeStreamTest(testDb);
+const ecoll = encryptedClient.getDB()[collName];
+const [escName, ecocName] = (() => {
+ let names = encryptedClient.getStateCollectionNamespaces(collName);
+ return [names.esc, names.ecoc];
+})();
+
+const escInsertChange = {
+ documentKey: {_id: placeholderBinData0},
+ fullDocument: {_id: placeholderBinData0},
+ ns: {db: dbName, coll: escName},
+ operationType: "insert",
+};
+const ecocInsertChange = {
+ documentKey: {_id: placeholderOID},
+ fullDocument: {_id: placeholderOID, fieldName: "first", value: placeholderBinData0},
+ ns: {db: dbName, coll: ecocName},
+ operationType: "insert",
+};
+function expectedEDCInsertChange(id, last, implicitShardKey = undefined) {
+ let expected = {
+ documentKey: {_id: id},
+ fullDocument: {
+ _id: id,
+ first: placeholderBinData6,
+ last: last,
+ "__safeContent__": [placeholderBinData0]
+ },
+ ns: {db: dbName, coll: collName},
+ operationType: "insert",
+ };
+ if (encryptedClient.useImplicitSharding && implicitShardKey) {
+ expected.documentKey = Object.assign(expected.documentKey, implicitShardKey);
+ }
+ return expected;
+}
+
+let expectedChange = undefined;
+const testValues = [
+ ["frodo", "baggins"],
+ ["merry", "brandybuck"],
+ ["pippin", "took"],
+ ["sam", "gamgee"],
+ ["rosie", "gamgee"],
+ ["paladin", "took"],
+];
+
+let cursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]});
+let cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+
+// Test that if there are no changes, we return an empty batch.
+assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+assert.eq(0, cursordb.firstBatch.length, "Cursor had changes: " + tojson(cursordb));
+
+jsTestLog("Testing single insert");
+{
+ assert.commandWorked(ecoll.insert({_id: 0, first: "frodo", last: "baggins"}));
+ expectedChange = expectedEDCInsertChange(0, "baggins", {last: "baggins"});
+
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expectedChange]});
+ cst.assertNextChangesEqualUnordered(
+ {cursor: cursordb, expectedChanges: [expectedChange, escInsertChange, ecocInsertChange]});
+ cst.assertNoChange(cursor);
+ cst.assertNoChange(cursordb);
+}
+
+jsTestLog("Testing second insert");
+{
+ cursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]});
+ cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+
+ assert.commandWorked(ecoll.insert({_id: 1, first: "merry", last: "brandybuck"}));
+ expectedChange = expectedEDCInsertChange(1, "brandybuck", {last: "brandybuck"});
+
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expectedChange]});
+ cst.assertNextChangesEqualUnordered(
+ {cursor: cursordb, expectedChanges: [expectedChange, escInsertChange, ecocInsertChange]});
+ cst.assertNoChange(cursor);
+ cst.assertNoChange(cursordb);
+}
+
+jsTestLog("Testing replacement update");
+{
+ cursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]});
+ cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+
+ assert.commandWorked(
+ ecoll.update({last: "baggins"}, {first: "pippin", last: "took", location: "shire"}));
+ expectedChange = expectedEDCInsertChange(0, "took", {last: "baggins"});
+ expectedChange.operationType = "replace";
+ expectedChange.fullDocument.location = "shire";
+
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expectedChange]});
+ cst.assertNextChangesEqualUnordered(
+ {cursor: cursordb, expectedChanges: [expectedChange, escInsertChange, ecocInsertChange]});
+}
+
+jsTestLog("Testing upsert");
+{
+ cursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]});
+ cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+
+ assert.commandWorked(
+ ecoll.update({last: "gamgee"}, {_id: 2, first: "sam", last: "gamgee"}, {upsert: true}));
+
+ expectedChange = expectedEDCInsertChange(2, "gamgee", {last: "gamgee"});
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expectedChange]});
+ cst.assertNextChangesEqualUnordered(
+ {cursor: cursordb, expectedChanges: [expectedChange, escInsertChange, ecocInsertChange]});
+ cst.assertNoChange(cursor);
+ cst.assertNoChange(cursordb);
+}
+
+jsTestLog("Testing modification update");
+{
+ cursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]});
+ cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+ assert.commandWorked(ecoll.update({last: "gamgee"}, {$set: {first: "rosie"}}));
+ expectedChange = {
+ documentKey: {_id: 2},
+ ns: {db: dbName, coll: collName},
+ operationType: "update",
+ updateDescription: {
+ removedFields: [],
+ updatedFields: {first: placeholderBinData6, "__safeContent__.1": placeholderBinData0},
+ truncatedArrays: []
+ },
+ };
+ let safeContentPullChange = {
+ documentKey: {_id: 2},
+ ns: {db: dbName, coll: collName},
+ operationType: "update",
+ updateDescription: {
+ removedFields: [],
+ updatedFields: {"__safeContent__": [placeholderBinData0]},
+ truncatedArrays: []
+ },
+ };
+ if (encryptedClient.useImplicitSharding) {
+ expectedChange.documentKey.last = "gamgee";
+ safeContentPullChange.documentKey.last = "gamgee";
+ }
+
+ cst.assertNextChangesEqual(
+ {cursor: cursor, expectedChanges: [expectedChange, safeContentPullChange]});
+ cst.assertNextChangesEqualUnordered({
+ cursor: cursordb,
+ expectedChanges: [expectedChange, escInsertChange, ecocInsertChange, safeContentPullChange]
+ });
+ cst.assertNoChange(cursor);
+ cst.assertNoChange(cursordb);
+}
+
+jsTestLog("Testing findAndModify");
+{
+ cursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]});
+ cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+ assert.commandWorked(ecoll.runCommand({
+ findAndModify: ecoll.getName(),
+ query: {last: "took"},
+ update: {$set: {first: "paladin"}, $unset: {location: ""}},
+ }));
+ expectedChange = {
+ documentKey: {_id: 0},
+ ns: {db: dbName, coll: collName},
+ operationType: "update",
+ updateDescription: {
+ removedFields: ["location"],
+ updatedFields: {first: placeholderBinData6, "__safeContent__.1": placeholderBinData0},
+ truncatedArrays: []
+ },
+ };
+ let safeContentPullChange = {
+ documentKey: {_id: 0},
+ ns: {db: dbName, coll: collName},
+ operationType: "update",
+ updateDescription: {
+ removedFields: [],
+ updatedFields: {"__safeContent__": [placeholderBinData0]},
+ truncatedArrays: []
+ },
+ };
+ if (encryptedClient.useImplicitSharding) {
+ expectedChange.documentKey.last = "took";
+ safeContentPullChange.documentKey.last = "took";
+ }
+
+ cst.assertNextChangesEqual(
+ {cursor: cursor, expectedChanges: [expectedChange, safeContentPullChange]});
+ cst.assertNextChangesEqualUnordered({
+ cursor: cursordb,
+ expectedChanges: [expectedChange, escInsertChange, ecocInsertChange, safeContentPullChange]
+ });
+ cst.assertNoChange(cursor);
+ cst.assertNoChange(cursordb);
+}
+
+jsTestLog("Testing delete");
+{
+ cursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]});
+ cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+ assert.commandWorked(ecoll.remove({last: "gamgee"}));
+ expectedChange = {
+ documentKey: {_id: 2},
+ ns: {db: dbName, coll: collName},
+ operationType: "delete",
+ };
+ if (encryptedClient.useImplicitSharding) {
+ expectedChange.documentKey.last = "gamgee";
+ }
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expectedChange]});
+ cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [expectedChange]});
+ cst.assertNoChange(cursor);
+ cst.assertNoChange(cursordb);
+}
+
+const ecocRenameChange = {
+ operationType: "rename",
+ ns: {db: dbName, coll: ecocName},
+ to: {db: dbName, coll: ecocName + ".compact"},
+};
+const escDeleteChange = {
+ operationType: "delete",
+ ns: {db: dbName, coll: escName},
+ documentKey: {_id: placeholderBinData0},
+};
+const escDeletesDropChange = {
+ operationType: "drop",
+ ns: {db: dbName, coll: escName + ".deletes"},
+};
+const ecocCompactDropChange = {
+ operationType: "drop",
+ ns: {db: dbName, coll: ecocName + ".compact"},
+};
+
+jsTestLog("Testing compact");
+{
+ // all non-anchors will be deleted by compact
+ const deleteCount = testDb[escName].countDocuments({value: {$exists: false}});
+ const numUniqueValues = testValues.length;
+
+ encryptedClient.assertEncryptedCollectionCounts(collName, 2, numUniqueValues, numUniqueValues);
+
+ cursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]});
+ cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+
+ assert.commandWorked(ecoll.compact());
+ encryptedClient.assertEncryptedCollectionCounts(collName, 2, numUniqueValues, 0);
+ const anchorCount = testDb[escName].countDocuments({value: {$exists: true}});
+ const nonAnchorCount = testDb[escName].countDocuments({value: {$exists: false}});
+ assert.eq(anchorCount, numUniqueValues);
+ assert.eq(nonAnchorCount, 0);
+
+ cst.assertNoChange(cursor);
+
+ escInsertChange.fullDocument.value = placeholderBinData0;
+ // temp ecoc rename
+ cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [ecocRenameChange]});
+ // normal anchor inserts
+ for (let i = 0; i < numUniqueValues; i++) {
+ cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [escInsertChange]});
+ }
+ // non-anchor deletes
+ for (let i = 0; i < deleteCount; i++) {
+ cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [escDeleteChange]});
+ }
+ // temp ecoc drop
+ cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [ecocCompactDropChange]});
+ cst.assertNoChange(cursordb);
+}
+
+jsTestLog("Testing cleanup");
+{
+ // insert new documents for each test value, so the ECOC & ESC have documents to clean up
+ for (let val of testValues) {
+ assert.commandWorked(ecoll.insert({first: val[0], last: val[1]}));
+ }
+ // ESC doesn't have null anchors yet, so the total delete count == ESC count before cleanup
+ const deleteCount = testDb[escName].countDocuments({});
+ const nonAnchorCount = testDb[escName].countDocuments({value: {$exists: false}});
+ const anchorCount = deleteCount - nonAnchorCount;
+ const numUniqueValues = testValues.length;
+
+ encryptedClient.assertEncryptedCollectionCounts(
+ collName, 2 + numUniqueValues, numUniqueValues * 2, numUniqueValues);
+ assert.eq(anchorCount, numUniqueValues);
+ assert.eq(nonAnchorCount, numUniqueValues);
+
+ cursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]});
+ cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+
+ assert.commandWorked(ecoll.cleanup());
+ encryptedClient.assertEncryptedCollectionCounts(
+ collName, 2 + numUniqueValues, numUniqueValues, 0);
+ encryptedClient.assertESCNonAnchorCount(collName, 0);
+
+ cst.assertNoChange(cursor);
+
+ const escDeletesInsertChange = {
+ documentKey: {_id: placeholderBinData0},
+ fullDocument: {_id: placeholderBinData0},
+ ns: {db: dbName, coll: escName + ".deletes"},
+ operationType: "insert",
+ };
+
+ // temp ecoc rename
+ cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [ecocRenameChange]});
+ // each null anchor insert is followed by a single insert to esc.deletes
+ escInsertChange.fullDocument.value = placeholderBinData0;
+ for (let i = 0; i < anchorCount; i++) {
+ cst.assertNextChangesEqual(
+ {cursor: cursordb, expectedChanges: [escInsertChange, escDeletesInsertChange]});
+ }
+ // non-anchors and regular anchors are deleted from ESC
+ for (let i = 0; i < deleteCount; i++) {
+ cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [escDeleteChange]});
+ }
+ // temp esc.deletes drop
+ cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [escDeletesDropChange]});
+ // temp ecoc.compact drop
+ cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [ecocCompactDropChange]});
+ cst.assertNoChange(cursordb);
+}
+
+cst.cleanUp();
+
+canonicalizeEventForTesting = origCanonicalizeEventForTesting;
diff --git a/jstests/change_streams/refine_collection_shard_key_event.js b/jstests/change_streams/refine_collection_shard_key_event.js
index 5acde86c27bea..330caa302ea58 100644
--- a/jstests/change_streams/refine_collection_shard_key_event.js
+++ b/jstests/change_streams/refine_collection_shard_key_event.js
@@ -2,13 +2,11 @@
* Test that change streams returns refineCollectionShardKey events.
*
* @tags: [
- * requires_fcv_61,
* requires_sharding,
* uses_change_streams,
* change_stream_does_not_expect_txns,
* assumes_unsharded_collection,
* assumes_read_preference_unchanged,
- * featureFlagChangeStreamsFurtherEnrichedEvents
* ]
*/
diff --git a/jstests/change_streams/reshard_collection_event.js b/jstests/change_streams/reshard_collection_event.js
index f55e081322cd8..6992a66c63850 100644
--- a/jstests/change_streams/reshard_collection_event.js
+++ b/jstests/change_streams/reshard_collection_event.js
@@ -2,13 +2,11 @@
* Test that change streams returns reshardCollection events.
*
* @tags: [
- * requires_fcv_61,
* requires_sharding,
* uses_change_streams,
* change_stream_does_not_expect_txns,
* assumes_unsharded_collection,
* assumes_read_preference_unchanged,
- * featureFlagChangeStreamsFurtherEnrichedEvents
* ]
*/
diff --git a/jstests/change_streams/resume_from_high_water_mark_token.js b/jstests/change_streams/resume_from_high_water_mark_token.js
index e0004c2dd2589..4d5c59eced077 100644
--- a/jstests/change_streams/resume_from_high_water_mark_token.js
+++ b/jstests/change_streams/resume_from_high_water_mark_token.js
@@ -263,4 +263,4 @@ assert.soon(() => {
return csCursor.hasNext() && csCursor.next().operationType === "invalidate";
});
csCursor.close();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/change_streams/shell_helper.js b/jstests/change_streams/shell_helper.js
index bb13609746795..f43f5fa655c19 100644
--- a/jstests/change_streams/shell_helper.js
+++ b/jstests/change_streams/shell_helper.js
@@ -124,8 +124,7 @@ checkNextChange(changeStreamCursor, expected);
jsTestLog("Testing watch() with batchSize");
// Only test mongod because mongos uses batch size 0 for aggregate commands internally to
// establish cursors quickly. GetMore on mongos doesn't respect batch size due to SERVER-31992.
-const isMongos = FixtureHelpers.isMongos(db);
-if (!isMongos) {
+if (!FixtureHelpers.isMongos(db)) {
// Increase a field by 5 times and verify the batch size is respected.
for (let i = 0; i < 5; i++) {
assert.commandWorked(coll.update({_id: 1}, {$inc: {x: 1}}));
diff --git a/jstests/change_streams/show_expanded_events.js b/jstests/change_streams/show_expanded_events.js
index b3ae9c6faafca..cba41827548c5 100644
--- a/jstests/change_streams/show_expanded_events.js
+++ b/jstests/change_streams/show_expanded_events.js
@@ -2,11 +2,9 @@
* Tests the behavior of change streams in the presence of 'showExpandedEvents' flag.
*
* @tags: [
- * requires_fcv_61,
* # The test assumes certain ordering of the events. The chunk migrations on a sharded collection
* # could break the test.
* assumes_unsharded_collection,
- * featureFlagChangeStreamsFurtherEnrichedEvents,
* ]
*/
(function() {
diff --git a/jstests/change_streams/show_resharding_system_events.js b/jstests/change_streams/show_resharding_system_events.js
index d931500746a3f..698a890db49ec 100644
--- a/jstests/change_streams/show_resharding_system_events.js
+++ b/jstests/change_streams/show_resharding_system_events.js
@@ -4,8 +4,6 @@
* operate in a sharded cluster.
*
* @tags: [
- * requires_fcv_61,
- * featureFlagChangeStreamsFurtherEnrichedEvents,
* requires_sharding,
* uses_change_streams,
* change_stream_does_not_expect_txns,
@@ -13,9 +11,7 @@
* assumes_read_preference_unchanged,
* ]
*/
-(function() {
-"use strict";
-
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load('jstests/libs/change_stream_util.js'); // For 'assertChangeStreamEventEq'.
// Create a single-shard cluster for this test.
@@ -74,7 +70,7 @@ const origNs = {
db: testDB.getName(),
coll: testColl.getName()
};
-const expectedReshardingEvents = [
+let expectedReshardingEvents = [
{ns: reshardingNs, collectionUUID: newUUID, operationType: "create"},
{
ns: reshardingNs,
@@ -118,6 +114,52 @@ const expectedReshardingEvents = [
},
];
+if (FeatureFlagUtil.isEnabled(st.s, "ReshardingImprovements")) {
+ expectedReshardingEvents = [
+ {ns: reshardingNs, collectionUUID: newUUID, operationType: "create"},
+ {
+ ns: reshardingNs,
+ collectionUUID: newUUID,
+ operationType: "shardCollection",
+ operationDescription: {shardKey: {a: 1}}
+ },
+ {
+ ns: reshardingNs,
+ collectionUUID: newUUID,
+ operationType: "insert",
+ fullDocument: {_id: 0, a: 0},
+ documentKey: {a: 0, _id: 0}
+ },
+ {
+ ns: reshardingNs,
+ collectionUUID: newUUID,
+ operationType: "insert",
+ fullDocument: {_id: 1, a: 1},
+ documentKey: {a: 1, _id: 1}
+ },
+ {
+ ns: reshardingNs,
+ collectionUUID: newUUID,
+ operationType: "createIndexes",
+ operationDescription: {indexes: [{v: 2, key: {a: 1}, name: "a_1"}]}
+ },
+ {
+ ns: origNs,
+ collectionUUID: oldUUID,
+ operationType: "reshardCollection",
+ operationDescription:
+ {reshardUUID: newUUID, shardKey: {a: 1}, oldShardKey: {_id: 1}, unique: false}
+ },
+ {
+ ns: origNs,
+ collectionUUID: newUUID,
+ operationType: "insert",
+ fullDocument: {_id: 2, a: 2},
+ documentKey: {a: 2, _id: 2}
+ },
+ ];
+}
+
// Helper to confirm the sequence of events observed in the change stream.
function assertChangeStreamEventSequence(csConfig, expectedEvents) {
// Open a change stream on the test DB using the given configuration.
@@ -141,4 +183,3 @@ const nonSystemEvents =
assertChangeStreamEventSequence({showSystemEvents: false}, nonSystemEvents);
st.stop();
-}());
diff --git a/jstests/change_streams/timeseries.js b/jstests/change_streams/timeseries.js
index a279bfc14ad04..e285e26a92769 100644
--- a/jstests/change_streams/timeseries.js
+++ b/jstests/change_streams/timeseries.js
@@ -7,11 +7,8 @@
* requires_fcv_61,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
let testDB = db.getSiblingDB(jsTestName());
testDB.dropDatabase();
@@ -43,12 +40,12 @@ let curNoEvents = testDB.watch([], {showExpandedEvents: true});
assert.commandWorked(testDB.createCollection(
jsTestName(),
- {timeseries: {timeField: "ts", metaField: "meta"}})); // on buckets ns and view ns
-coll.createIndex({ts: 1, "meta.b": 1}, {name: "dropMe"}); // on buckets ns
-coll.insertOne({_id: 1, ts: new Date(1000), meta: {a: 1}}); // on buckets ns
-coll.insertOne({_id: 1, ts: new Date(1000), meta: {a: 1}}); // on buckets ns
-coll.update({"meta.a": 1}, {$set: {"meta.b": 2}}); // on buckets ns
-coll.remove({"meta.a": 1}); // on buckets ns
+ {timeseries: {timeField: "ts", metaField: "meta"}})); // on buckets ns and view ns
+coll.createIndex({ts: 1, "meta.b": 1}, {name: "dropMe"}); // on buckets ns
+coll.insertOne({_id: 1, ts: new Date(1000), meta: {a: 1}}); // on buckets ns
+coll.insertOne({_id: 1, ts: new Date(1000), meta: {a: 1}}); // on buckets ns
+coll.update({"meta.a": 1}, {$set: {"meta.b": 2}}, {multi: true}); // on buckets ns
+coll.remove({"meta.a": 1}); // on buckets ns
// collMod granularity. on both buckets ns and view ns
assert.commandWorked(testDB.runCommand({collMod: collName, timeseries: {granularity: "hours"}}));
// collMod expiration. just on buckets ns
@@ -163,6 +160,16 @@ let expectedChanges = [
{"data._id.1": ["data", "_id", "1"], "data.ts.1": ["data", "ts", "1"]}
}
},
+ {
+ "operationType": "update",
+ "ns": {"db": dbName, "coll": bucketsCollName},
+ "updateDescription": {
+ "updatedFields": {"meta.b": 2},
+ "removedFields": [],
+ "truncatedArrays": [],
+ "disambiguatedPaths": {}
+ }
+ },
{"operationType": "delete", "ns": {"db": dbName, "coll": bucketsCollName}},
{
"operationType": "modify",
@@ -313,5 +320,4 @@ let curWithEventsNormal = new DBCommandCursor(testDB, {ok: 1, cursor: curWithEve
assertNoMoreBucketsEvents(curWithEventsNormal);
// No events cursor should have no system.buckets events.
-assertNoMoreBucketsEvents(curNoEvents);
-}());
+assertNoMoreBucketsEvents(curNoEvents);
\ No newline at end of file
diff --git a/jstests/change_streams/whole_cluster_metadata_notifications.js b/jstests/change_streams/whole_cluster_metadata_notifications.js
index e960affc2ef8c..02164b0b68259 100644
--- a/jstests/change_streams/whole_cluster_metadata_notifications.js
+++ b/jstests/change_streams/whole_cluster_metadata_notifications.js
@@ -12,7 +12,8 @@ load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Col
load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
// Define two databases. We will conduct our tests by creating one collection in each.
-const testDB1 = db.getSiblingDB(jsTestName()), testDB2 = db.getSiblingDB(jsTestName() + "_other");
+const testDB1 = db.getSiblingDB("whole_cluster_metadata"),
+ testDB2 = db.getSiblingDB("whole_cluster_metadata_other");
const adminDB = db.getSiblingDB("admin");
assert.commandWorked(testDB1.dropDatabase());
@@ -153,7 +154,7 @@ for (let collToInvalidate of [db1Coll, db2Coll]) {
// passthrough suites since we cannot guarantee the primary shard of the target database
// and renameCollection requires the source and destination to be on the same shard.
if (!FixtureHelpers.isMongos(testDB)) {
- const otherDB = testDB.getSiblingDB(testDB.getName() + "_rename_target");
+ const otherDB = testDB.getSiblingDB(testDB.getName() + "_target");
// Ensure the target database exists.
const collOtherDB = assertDropAndRecreateCollection(otherDB, "test");
assertDropCollection(otherDB, collOtherDB.getName());
diff --git a/jstests/change_streams/whole_db_metadata_notifications.js b/jstests/change_streams/whole_db_metadata_notifications.js
index 35b75cbc05ca6..29fe3507c7bac 100644
--- a/jstests/change_streams/whole_db_metadata_notifications.js
+++ b/jstests/change_streams/whole_db_metadata_notifications.js
@@ -10,8 +10,10 @@ load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectio
load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-const testDB = db.getSiblingDB(jsTestName());
+const testDB = db.getSiblingDB("whole_db_metadata_notifs");
+const otherDB = testDB.getSiblingDB("whole_db_metadata_notifs_other");
testDB.dropDatabase();
+otherDB.dropDatabase();
let cst = new ChangeStreamTest(testDB);
// Write a document to the collection and test that the change stream returns it
@@ -55,7 +57,6 @@ assert.commandWorked(testDB.runCommand(
{aggregate: 1, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}}));
// Test that invalidation entries for other databases are filtered out.
-const otherDB = testDB.getSiblingDB(jsTestName() + "other");
const otherDBColl = otherDB[collName + "_other"];
assert.commandWorked(otherDBColl.insert({_id: 0}));
diff --git a/jstests/client_encrypt/fle_auto_decrypt.js b/jstests/client_encrypt/fle_auto_decrypt.js
index 650125e0fb7da..60c20b899d27f 100644
--- a/jstests/client_encrypt/fle_auto_decrypt.js
+++ b/jstests/client_encrypt/fle_auto_decrypt.js
@@ -71,4 +71,4 @@ const clientSideFLEOptionsBypassAutoEncrypt = {
test(conn, clientSideFLEOptionsBypassAutoEncrypt, keyId);
MongoRunner.stopMongod(conn);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/client_encrypt/fle_key_faults.js b/jstests/client_encrypt/fle_key_faults.js
index 204ee277ec5e4..c57f5ff372b22 100644
--- a/jstests/client_encrypt/fle_key_faults.js
+++ b/jstests/client_encrypt/fle_key_faults.js
@@ -82,4 +82,4 @@ testFaults((keyId, shell) => {
});
MongoRunner.stopMongod(conn);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/client_encrypt/fle_keys.js b/jstests/client_encrypt/fle_keys.js
index 93fb00004784b..75eaa0889b876 100644
--- a/jstests/client_encrypt/fle_keys.js
+++ b/jstests/client_encrypt/fle_keys.js
@@ -75,4 +75,4 @@ keyVault.createKey("local", ['mongoKey3']);
assert.eq(3, keyVault.getKeys().itcount());
MongoRunner.stopMongod(conn);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/concurrency/fsm_example.js b/jstests/concurrency/fsm_example.js
index fb012462a158f..d4cd9683fdaf1 100644
--- a/jstests/concurrency/fsm_example.js
+++ b/jstests/concurrency/fsm_example.js
@@ -6,7 +6,7 @@
* Includes documentation of each property on $config.
* Serves as a template for new workloads.
*/
-var $config = (function() {
+export const $config = (function() {
// 'data' is passed (copied) to each of the worker threads.
var data = {};
diff --git a/jstests/concurrency/fsm_example_inheritance.js b/jstests/concurrency/fsm_example_inheritance.js
index dd6364b2d87a7..7352980c5488d 100644
--- a/jstests/concurrency/fsm_example_inheritance.js
+++ b/jstests/concurrency/fsm_example_inheritance.js
@@ -1,10 +1,8 @@
-'use strict';
-
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_example.js'); // for $config
+import {$config as $baseConfig} from 'jstests/concurrency/fsm_example.js';
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
// extendWorkload takes a $config object and a callback, and returns an extended $config object.
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// In the callback, $super is the base workload definition we're
// extending,
// and $config is the extended workload definition we're creating.
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index 44fe572bff077..a78f6a8c76791 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -217,7 +217,7 @@ var Cluster = function(options) {
i = 0;
while (st.rs(i)) {
- var rs = st.rs(i++);
+ const rs = st.rs(i++);
this._addReplicaSetConns(rs);
replSets.push(rs);
}
@@ -459,6 +459,12 @@ var Cluster = function(options) {
return cluster;
};
+ this.getReplicaSets = function getReplicaSets() {
+ assert(initialized, 'cluster must be initialized first');
+ assert(this.isReplication() || this.isSharded());
+ return replSets;
+ };
+
this.isBalancerEnabled = function isBalancerEnabled() {
return this.isSharded() && options.sharded.enableBalancer;
};
@@ -594,10 +600,10 @@ var Cluster = function(options) {
};
/*
- * Returns true if this cluster has a catalog shard.
- * Catalog shard always have shard ID equal to "config".
+ * Returns true if this cluster has a config shard.
+ * Config shard always have shard ID equal to "config".
*/
- this.hasCatalogShard = function hasCatalogShard() {
+ this.hasConfigShard = function hasConfigShard() {
if (!this.isSharded()) {
return false;
}
diff --git a/jstests/concurrency/fsm_libs/extend_workload.js b/jstests/concurrency/fsm_libs/extend_workload.js
index 84d094cb36e00..ced5511d06520 100644
--- a/jstests/concurrency/fsm_libs/extend_workload.js
+++ b/jstests/concurrency/fsm_libs/extend_workload.js
@@ -1,5 +1,3 @@
-'use strict';
-
load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig
/**
@@ -13,7 +11,7 @@ load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig
* return $config;
* });
*/
-function extendWorkload($config, callback) {
+export function extendWorkload($config, callback) {
assert.eq(2,
arguments.length,
'extendWorkload must be called with 2 arguments: $config and callback');
diff --git a/jstests/concurrency/fsm_libs/resmoke_runner.js b/jstests/concurrency/fsm_libs/resmoke_runner.js
index 2ca66fee68b45..bf3461cda6347 100644
--- a/jstests/concurrency/fsm_libs/resmoke_runner.js
+++ b/jstests/concurrency/fsm_libs/resmoke_runner.js
@@ -1,4 +1,3 @@
-(function() {
'use strict';
load('jstests/concurrency/fsm_libs/runner.js'); // for runner.internals
@@ -28,8 +27,8 @@ function cleanupWorkload(workload, context, cluster, errors, header) {
return true;
}
-function runWorkloads(workloads,
- {cluster: clusterOptions = {}, execution: executionOptions = {}} = {}) {
+async function runWorkloads(workloads,
+ {cluster: clusterOptions = {}, execution: executionOptions = {}} = {}) {
assert.gt(workloads.length, 0, 'need at least one workload to run');
const executionMode = {serial: true};
@@ -53,7 +52,7 @@ function runWorkloads(workloads,
const context = {};
const applyMultipliers = true;
- loadWorkloadContext(workloads, context, executionOptions, applyMultipliers);
+ await loadWorkloadContext(workloads, context, executionOptions, applyMultipliers);
// Constructing a Cluster instance calls its internal validateClusterOptions() function,
// which fills in any properties that aren't explicitly present in 'clusterOptions'. We do
@@ -287,5 +286,4 @@ if (Object.keys(sessionOptions).length > 0 || TestData.runningWithSessions) {
executionOptions.sessionOptions = sessionOptions;
}
-runWorkloads(workloads, {cluster: clusterOptions, execution: executionOptions});
-})();
+await runWorkloads(workloads, {cluster: clusterOptions, execution: executionOptions});
diff --git a/jstests/concurrency/fsm_libs/runner.js b/jstests/concurrency/fsm_libs/runner.js
index 272c58b1fd704..b9001815361fb 100644
--- a/jstests/concurrency/fsm_libs/runner.js
+++ b/jstests/concurrency/fsm_libs/runner.js
@@ -400,16 +400,17 @@ var runner = (function() {
config.data, 'threadCount', {enumerable: true, value: config.threadCount});
}
- function loadWorkloadContext(workloads, context, executionOptions, applyMultipliers) {
- workloads.forEach(function(workload) {
- load(workload); // for $config
+ async function loadWorkloadContext(workloads, context, executionOptions, applyMultipliers) {
+ for (const workload of workloads) {
+ const {$config} = await import(workload);
assert.neq('undefined', typeof $config, '$config was not defined by ' + workload);
+ print(tojson($config));
context[workload] = {config: parseConfig($config)};
if (applyMultipliers) {
context[workload].config.iterations *= executionOptions.iterationMultiplier;
context[workload].config.threadCount *= executionOptions.threadMultiplier;
}
- });
+ }
}
function printWorkloadSchedule(schedule) {
@@ -591,7 +592,7 @@ var runner = (function() {
'after workload-group teardown and data clean-up');
}
- function runWorkloads(
+ async function runWorkloads(
workloads, clusterOptions, executionMode, executionOptions, cleanupOptions) {
assert.gt(workloads.length, 0, 'need at least one workload to run');
@@ -625,7 +626,8 @@ var runner = (function() {
globalAssertLevel = assertLevel;
var context = {};
- loadWorkloadContext(workloads, context, executionOptions, true /* applyMultipliers */);
+ await loadWorkloadContext(
+ workloads, context, executionOptions, true /* applyMultipliers */);
var threadMgr = new ThreadManager(clusterOptions, executionMode);
var cluster = new Cluster(clusterOptions);
@@ -694,32 +696,34 @@ var runner = (function() {
}
return {
- serial: function serial(workloads, clusterOptions, executionOptions, cleanupOptions) {
+ serial: async function serial(workloads, clusterOptions, executionOptions, cleanupOptions) {
clusterOptions = clusterOptions || {};
executionOptions = executionOptions || {};
cleanupOptions = cleanupOptions || {};
- runWorkloads(
+ await runWorkloads(
workloads, clusterOptions, {serial: true}, executionOptions, cleanupOptions);
},
- parallel: function parallel(workloads, clusterOptions, executionOptions, cleanupOptions) {
- clusterOptions = clusterOptions || {};
- executionOptions = executionOptions || {};
- cleanupOptions = cleanupOptions || {};
-
- runWorkloads(
- workloads, clusterOptions, {parallel: true}, executionOptions, cleanupOptions);
- },
-
- composed: function composed(workloads, clusterOptions, executionOptions, cleanupOptions) {
- clusterOptions = clusterOptions || {};
- executionOptions = executionOptions || {};
- cleanupOptions = cleanupOptions || {};
-
- runWorkloads(
- workloads, clusterOptions, {composed: true}, executionOptions, cleanupOptions);
- },
+ parallel:
+ async function parallel(workloads, clusterOptions, executionOptions, cleanupOptions) {
+ clusterOptions = clusterOptions || {};
+ executionOptions = executionOptions || {};
+ cleanupOptions = cleanupOptions || {};
+
+ await runWorkloads(
+ workloads, clusterOptions, {parallel: true}, executionOptions, cleanupOptions);
+ },
+
+ composed:
+ async function composed(workloads, clusterOptions, executionOptions, cleanupOptions) {
+ clusterOptions = clusterOptions || {};
+ executionOptions = executionOptions || {};
+ cleanupOptions = cleanupOptions || {};
+
+ await runWorkloads(
+ workloads, clusterOptions, {composed: true}, executionOptions, cleanupOptions);
+ },
internals: {
validateExecutionOptions,
diff --git a/jstests/concurrency/fsm_libs/worker_thread.js b/jstests/concurrency/fsm_libs/worker_thread.js
index 3a4623f317d59..deeeb83e14191 100644
--- a/jstests/concurrency/fsm_libs/worker_thread.js
+++ b/jstests/concurrency/fsm_libs/worker_thread.js
@@ -20,7 +20,7 @@ var workerThread = (function() {
// args.errorLatch = CountDownLatch instance that threads count down when they error
// args.sessionOptions = the options to start a session with
// run = callback that takes a map of workloads to their associated $config
- function main(workloads, args, run) {
+ async function main(workloads, args, run) {
var myDB;
var configs = {};
var connectionString = 'mongodb://' + args.host + '/?appName=tid:' + args.tid;
@@ -172,8 +172,8 @@ var workerThread = (function() {
load('jstests/libs/override_methods/set_read_and_write_concerns.js');
}
- workloads.forEach(function(workload) {
- load(workload); // for $config
+ for (const workload of workloads) {
+ const {$config} = await import(workload);
var config = parseConfig($config); // to normalize
// Copy any modifications that were made to $config.data
@@ -213,7 +213,7 @@ var workerThread = (function() {
tid: args.tid,
transitions: config.transitions
};
- });
+ }
args.latch.countDown();
diff --git a/jstests/concurrency/fsm_workload_helpers/delete_in_transaction_states.js b/jstests/concurrency/fsm_workload_helpers/delete_in_transaction_states.js
index 5861d5dc48b21..4be2fa55007a3 100644
--- a/jstests/concurrency/fsm_workload_helpers/delete_in_transaction_states.js
+++ b/jstests/concurrency/fsm_workload_helpers/delete_in_transaction_states.js
@@ -10,17 +10,17 @@ load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js');
// In-memory representation of the documents owned by this thread for all given collections. Used to
// verify the expected documents are deleted in the collection.
let expectedDocuments = {};
-
-// The number of "groups" each document within those assigned to a thread can belong to for a given
-// collection. Entire groups will be deleted at once by the multiDelete state function, so this is
-// effectively the number of times that stage can be meaningfully run per thread.
-const numGroupsWithinThread = $config.data.partitionSize / 5;
let nextGroupId = {};
/**
* Returns the next groupId for the multiDelete state function to use.
*/
-function getNextGroupIdForDelete(collName) {
+function getNextGroupIdForDelete(collName, partitionSize) {
+ // The number of "groups" each document within those assigned to a thread can belong to for a
+ // given collection. Entire groups will be deleted at once by the multiDelete state function, so
+ // this is effectively the number of times that stage can be meaningfully run per thread.
+ const numGroupsWithinThread = partitionSize / 5;
+
const nextId = nextGroupId[collName];
nextGroupId[collName] = (nextGroupId[collName] + 1) % numGroupsWithinThread;
return nextId;
@@ -65,7 +65,7 @@ function exactIdDelete(db, collName, session) {
* Sends a multi=true delete without the shard key that targets all documents assigned to this
* thread, which should be sent to all shards.
*/
-function multiDelete(db, collName, session, tid) {
+function multiDelete(db, collName, session, tid, partitionSize) {
// If no documents remain in our partition, there is nothing to do.
if (!expectedDocuments[collName].length) {
print('This thread owns no more documents for collection ' + db[collName] +
@@ -74,7 +74,7 @@ function multiDelete(db, collName, session, tid) {
}
// Delete a group of documents within those assigned to this thread.
- const groupIdToDelete = getNextGroupIdForDelete(collName);
+ const groupIdToDelete = getNextGroupIdForDelete(collName, partitionSize);
const collection = session.getDatabase(db.getName()).getCollection(collName);
withTxnAndAutoRetry(session, () => {
@@ -119,7 +119,12 @@ function verifyDocuments(db, collName, tid) {
* Gives each document assigned to this thread a group id for multi=true deletes, and loads each
* document into memory.
*/
-function initDeleteInTransactionStates(db, collName, tid) {
+function initDeleteInTransactionStates(db, collName, tid, partitionSize) {
+ // The number of "groups" each document within those assigned to a thread can belong to for a
+ // given collection. Entire groups will be deleted at once by the multiDelete state function, so
+ // this is effectively the number of times that stage can be meaningfully run per thread.
+ const numGroupsWithinThread = partitionSize / 5;
+
// Assign each document owned by this thread to a different "group" so they can be multi
// deleted by group later.
let nextGroupIdForInit = nextGroupId[collName] = 0;
diff --git a/jstests/concurrency/fsm_workload_helpers/kill_session.js b/jstests/concurrency/fsm_workload_helpers/kill_session.js
index f440f25d8d55a..004db48ecacd1 100644
--- a/jstests/concurrency/fsm_workload_helpers/kill_session.js
+++ b/jstests/concurrency/fsm_workload_helpers/kill_session.js
@@ -23,7 +23,10 @@ function killSession(db, collName) {
ourSessionWasKilled = true;
continue;
} else {
- assertAlways.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
+ assertAlways.commandFailedWithCode(
+ res,
+ [ErrorCodes.DuplicateKey, ErrorCodes.WriteConcernFailed],
+ 'unexpected error code: ' + res.code + ': ' + res.message);
}
const sessionToKill = db.getSiblingDB("config").system.sessions.aggregate([
diff --git a/jstests/concurrency/fsm_workloads/CRUD_and_commands.js b/jstests/concurrency/fsm_workloads/CRUD_and_commands.js
index 047c22bb6829c..95768079b8f95 100644
--- a/jstests/concurrency/fsm_workloads/CRUD_and_commands.js
+++ b/jstests/concurrency/fsm_workloads/CRUD_and_commands.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Perform CRUD operations, some of which may implicitly create collections, in parallel with
* collection-dropping operations.
@@ -7,14 +5,13 @@
* @tags: [
* ]
*/
-var $config = (function() {
- const data = {numIds: 10};
+export const $config = (function() {
+ const data = {numIds: 10, docValue: "mydoc"};
const states = {
init: function init(db, collName) {
this.session = db.getMongo().startSession({causalConsistency: true});
this.sessionDb = this.session.getDatabase(db.getName());
- this.docValue = "mydoc";
},
insertDocs: function insertDocs(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/CRUD_and_commands_with_createindexes.js b/jstests/concurrency/fsm_workloads/CRUD_and_commands_with_createindexes.js
index 890225b15fed7..148d2db19e1a9 100644
--- a/jstests/concurrency/fsm_workloads/CRUD_and_commands_with_createindexes.js
+++ b/jstests/concurrency/fsm_workloads/CRUD_and_commands_with_createindexes.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Perform CRUD operations, some of which may implicitly create collections. Also perform index
* creations which may implicitly create collections. Performs these in parallel with collection-
@@ -8,13 +6,13 @@
* @tags: [
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/CRUD_and_commands.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/CRUD_and_commands.js";
// TODO(SERVER-46971) combine with CRUD_and_commands.js and remove `local` readConcern.
TestData.defaultTransactionReadConcernLevel = "local";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
const origStates = Object.keys($config.states);
$config.states = Object.extend({
createIndex: function createIndex(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/CRUD_clustered_collection.js b/jstests/concurrency/fsm_workloads/CRUD_clustered_collection.js
index 351bd0bfbb1ec..fce942e3fa4b8 100644
--- a/jstests/concurrency/fsm_workloads/CRUD_clustered_collection.js
+++ b/jstests/concurrency/fsm_workloads/CRUD_clustered_collection.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Perform CRUD operations in parallel on a clustered collection. Disallows dropping the collection
* to prevent implicit creation of a non-clustered collection.
@@ -9,10 +7,10 @@
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/CRUD_and_commands.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/CRUD_and_commands.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Exclude dropCollection to prevent implicit collection creation of a non-clustered
// collection.
const newStates = $super.states;
diff --git a/jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js b/jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js
index 36bddb4c7d6f0..72f18b0eff507 100644
--- a/jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js
+++ b/jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Transactions with local (and majority) readConcern perform untimestamped reads and do not check
* the min visible snapshot for collections, so they can access collections whose catalog
@@ -14,7 +12,7 @@
* @tags: [uses_transactions, requires_replication]
*/
-var $config = (function() {
+export const $config = (function() {
var states = (function() {
function init(db, collName) {
this.session = db.getMongo().startSession();
diff --git a/jstests/concurrency/fsm_workloads/agg_base.js b/jstests/concurrency/fsm_workloads/agg_base.js
index 0b3ad43241855..9495c6cc53518 100644
--- a/jstests/concurrency/fsm_workloads/agg_base.js
+++ b/jstests/concurrency/fsm_workloads/agg_base.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* agg_base.js
*
* Base workload for aggregation. Inserts a bunch of documents in its setup,
* then each thread does an aggregation with an empty $match.
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
numDocs: 1000,
// Use 12KB documents by default. This number is useful because 12,000 documents each of
diff --git a/jstests/concurrency/fsm_workloads/agg_graph_lookup.js b/jstests/concurrency/fsm_workloads/agg_graph_lookup.js
index e503873619e5d..71bfbb3bcadea 100644
--- a/jstests/concurrency/fsm_workloads/agg_graph_lookup.js
+++ b/jstests/concurrency/fsm_workloads/agg_graph_lookup.js
@@ -1,11 +1,9 @@
-'use strict';
-
/**
* agg_graph_lookup.js
*
* Runs a $graphLookup aggregation simultaneously with updates.
*/
-var $config = (function() {
+export const $config = (function() {
const data = {numDocs: 1000};
const states = (function() {
diff --git a/jstests/concurrency/fsm_workloads/agg_group_external.js b/jstests/concurrency/fsm_workloads/agg_group_external.js
index adb7a787e20d5..c61378bcafc63 100644
--- a/jstests/concurrency/fsm_workloads/agg_group_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_group_external.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* agg_group_external.js
*
@@ -8,10 +6,10 @@
* The data passed to the $group is greater than 100MB, which should force
* disk to be used.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/agg_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// use enough docs to exceed 100MB, the in-memory limit for $sort and $group
$config.data.numDocs = 24 * 1000;
var MB = 1024 * 1024; // bytes
diff --git a/jstests/concurrency/fsm_workloads/agg_lookup.js b/jstests/concurrency/fsm_workloads/agg_lookup.js
index cfc2cfc8086fe..a688094fa1ac4 100644
--- a/jstests/concurrency/fsm_workloads/agg_lookup.js
+++ b/jstests/concurrency/fsm_workloads/agg_lookup.js
@@ -1,11 +1,9 @@
-'use strict';
-
/**
* agg_lookup.js
*
* Runs a $lookup aggregation simultaneously with updates.
*/
-var $config = (function() {
+export const $config = (function() {
const data = {numDocs: 100};
const states = (function() {
diff --git a/jstests/concurrency/fsm_workloads/agg_match.js b/jstests/concurrency/fsm_workloads/agg_match.js
index 25aa5a62eee41..78b54f7a9340a 100644
--- a/jstests/concurrency/fsm_workloads/agg_match.js
+++ b/jstests/concurrency/fsm_workloads/agg_match.js
@@ -1,14 +1,12 @@
-'use strict';
-
/**
* agg_match.js
*
* Runs an aggregation with a $match that returns half the documents.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/agg_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.getOutCollName = function getOutCollName(collName) {
return collName + '_out_agg_match';
};
diff --git a/jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js b/jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js
index 06587f20be08f..93db7dfa87cdf 100644
--- a/jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js
+++ b/jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* agg_merge_when_matched_replace_with_new.js
*
@@ -12,10 +10,12 @@
* requires_non_retryable_writes,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Set the collection to run concurrent moveChunk operations as the output collection.
$config.data.collWithMigrations = "agg_merge_when_matched_replace_with_new";
$config.data.threadRunCount = 0;
diff --git a/jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js b/jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js
index 739a7dc50e01d..0f9cf18e2b1fa 100644
--- a/jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js
+++ b/jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* agg_merge_when_not_matched_insert.js
*
@@ -11,18 +9,20 @@
* assumes_balancer_off,
* requires_non_retryable_writes,
* incompatible_with_gcov,
- * # The config fuzzer causes certain commands to time out periodically.
- * does_not_support_config_fuzzer,
*]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Set the collection to run concurrent moveChunk operations as the output collection.
$config.data.collWithMigrations = "agg_merge_when_not_matched_insert";
$config.data.threadRunCount = 0;
+ let initialMaxCatchUpPercentageBeforeBlockingWrites = null;
+
$config.states.aggregate = function aggregate(db, collName, connCache) {
const res = db[collName].aggregate([
{
@@ -52,5 +52,47 @@ var $config = extendWorkload($config, function($config, $super) {
this.threadRunCount += 1;
};
+ // This test is sensitive to low values of the parameter
+ // maxCatchUpPercentageBeforeBlockingWrites, which can be set by the config server. We set a min
+ // bound for this parameter here.
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, [db, collName, cluster]);
+
+ cluster.executeOnMongodNodes((db) => {
+ const param = assert.commandWorked(
+ db.adminCommand({getParameter: 1, maxCatchUpPercentageBeforeBlockingWrites: 1}));
+ if (param.hasOwnProperty("maxCatchUpPercentageBeforeBlockingWrites")) {
+ const defaultValue = 10;
+ if (param.maxCatchUpPercentageBeforeBlockingWrites < defaultValue) {
+ jsTest.log(
+ "Parameter `maxCatchUpPercentageBeforeBlockingWrites` value too low: " +
+ param.maxCatchUpPercentageBeforeBlockingWrites +
+ ". Setting value to default: " + defaultValue + ".");
+ initialMaxCatchUpPercentageBeforeBlockingWrites =
+ param.maxCatchUpPercentageBeforeBlockingWrites;
+ assert.commandWorked(db.adminCommand(
+ {setParameter: 1, maxCatchUpPercentageBeforeBlockingWrites: defaultValue}));
+ }
+ }
+ });
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ if (initialMaxCatchUpPercentageBeforeBlockingWrites) {
+ jsTest.log(
+ "Resetting parameter `maxCatchUpPercentageBeforeBlockingWrites` to original value: " +
+ initialMaxCatchUpPercentageBeforeBlockingWrites);
+ cluster.executeOnMongodNodes((db) => {
+ assert.commandWorked(db.adminCommand({
+ setParameter: 1,
+ maxCatchUpPercentageBeforeBlockingWrites:
+ initialMaxCatchUpPercentageBeforeBlockingWrites
+ }));
+ });
+ }
+
+ $super.teardown.apply(this, [db, collName, cluster]);
+ };
+
return $config;
});
diff --git a/jstests/concurrency/fsm_workloads/agg_out.js b/jstests/concurrency/fsm_workloads/agg_out.js
index 86ebca8801a9e..cbec48e16929a 100644
--- a/jstests/concurrency/fsm_workloads/agg_out.js
+++ b/jstests/concurrency/fsm_workloads/agg_out.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* agg_out.js
*
@@ -14,11 +12,11 @@
*
* @tags: [requires_capped]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/agg_base.js";
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Use a smaller document size, but more iterations. The smaller documents will ensure each
// operation is faster, giving us time to do more operations and thus increasing the likelihood
// that any two operations will be happening concurrently.
@@ -29,6 +27,7 @@ var $config = extendWorkload($config, function($config, $super) {
// because it is assumed to be unique.
$config.data.indexSpecs = [{rand: -1, randInt: 1}, {randInt: -1}, {flag: 1}, {padding: 'text'}];
+ $config.data.shardKey = {_id: 'hashed'};
// We'll use document validation so that we can change the collection options in the middle of
// an $out, to test that the $out stage will notice this and error. This validator is not very
@@ -144,7 +143,7 @@ var $config = extendWorkload($config, function($config, $super) {
if (isMongos(db) && this.tid === 0) {
assertWhenOwnDB.commandWorked(db.adminCommand({enableSharding: db.getName()}));
assertWhenOwnDB.commandWorked(db.adminCommand(
- {shardCollection: db[this.outputCollName].getFullName(), key: {_id: 'hashed'}}));
+ {shardCollection: db[this.outputCollName].getFullName(), key: this.shardKey}));
}
};
diff --git a/jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js b/jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
index d6bdebb5d570a..44434a35bc80e 100644
--- a/jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
+++ b/jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
@@ -10,11 +10,10 @@
*
* @tags: [uses_curop_agg_stage]
*/
-'use strict';
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/agg_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.aggregate = function aggregate(db, collName) {
// $out to the same collection so that concurrent aggregate commands would cause congestion.
db[collName].runCommand(
diff --git a/jstests/concurrency/fsm_workloads/agg_sort.js b/jstests/concurrency/fsm_workloads/agg_sort.js
index 757ecf76097ff..9628c3bee9e1d 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort.js
@@ -1,15 +1,13 @@
-'use strict';
-
/**
* agg_sort.js
*
* Runs an aggregation with a $match that returns half the documents followed
* by a $sort on a field containing a random float.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/agg_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
return collName + '_out_agg_sort_';
};
diff --git a/jstests/concurrency/fsm_workloads/agg_sort_external.js b/jstests/concurrency/fsm_workloads/agg_sort_external.js
index b8cbad826bbe7..94ed43cbb5f58 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort_external.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* agg_sort_external.js
*
@@ -8,10 +6,10 @@
*
* The data returned by the $match is greater than 100MB, which should force an external sort.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/agg_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// use enough docs to exceed 100MB, the in-memory limit for $sort and $group
$config.data.numDocs = 24 * 1000;
var MB = 1024 * 1024; // bytes
diff --git a/jstests/concurrency/fsm_workloads/agg_unionWith_interrupt_cleanup.js b/jstests/concurrency/fsm_workloads/agg_unionWith_interrupt_cleanup.js
index c2992479aa751..82276ba7ef305 100644
--- a/jstests/concurrency/fsm_workloads/agg_unionWith_interrupt_cleanup.js
+++ b/jstests/concurrency/fsm_workloads/agg_unionWith_interrupt_cleanup.js
@@ -6,11 +6,12 @@
* uses_curop_agg_stage
* ]
*/
-'use strict';
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.commentStr = "agg_unionWith_interrupt_cleanup";
$config.states.aggregate = function aggregate(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/agg_union_with_chunk_migrations.js b/jstests/concurrency/fsm_workloads/agg_union_with_chunk_migrations.js
index e5a2178d44645..a4c3225f96ff1 100644
--- a/jstests/concurrency/fsm_workloads/agg_union_with_chunk_migrations.js
+++ b/jstests/concurrency/fsm_workloads/agg_union_with_chunk_migrations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* agg_union_with_chunk_migrations.js
*
@@ -17,10 +15,12 @@
* requires_non_retryable_writes,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.collWithMigrations = "union_ns";
$config.states.aggregate = function aggregate(db, collName, connCache) {
diff --git a/jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js b/jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js
index 1d7685ee75189..f16c3dd7c5e31 100644
--- a/jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js
+++ b/jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* agg_with_chunk_migrations.js
*
@@ -16,10 +14,12 @@
* requires_non_retryable_writes,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// The base setup will insert 'partitionSize' number of documents per thread, evenly
// distributing across the chunks. Documents will only have the "_id" field.
$config.data.partitionSize = 50;
diff --git a/jstests/concurrency/fsm_workloads/analyze_shard_key.js b/jstests/concurrency/fsm_workloads/analyze_shard_key.js
index 6ee75f0124a90..35fbacb70a4ef 100644
--- a/jstests/concurrency/fsm_workloads/analyze_shard_key.js
+++ b/jstests/concurrency/fsm_workloads/analyze_shard_key.js
@@ -1,54 +1,45 @@
-'use strict';
-
/**
* Tests that the analyzeShardKey command returns correct metrics.
*
* This workload implicitly assumes that its tid range is [0, $config.threadCount). This isn't
* guaranteed to be true when it is run in parallel with other workloads.
*
- * TODO (SERVER-75532): Investigate the high variability of the runtime of analyze_shard_key.js in
- * suites with chunk migration and/or stepdown/kill/terminate.
* @tags: [
- * requires_fcv_70,
- * featureFlagUpdateOneWithoutShardKey,
+ * requires_fcv_71,
* uses_transactions,
* resource_intensive,
* incompatible_with_concurrency_simultaneous,
- * does_not_support_stepdowns,
- * assumes_balancer_off
* ]
*/
-load("jstests/concurrency/fsm_libs/extend_workload.js");
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+
load("jstests/concurrency/fsm_workload_helpers/server_types.js"); // for isMongos
load("jstests/libs/fail_point_util.js");
+load("jstests/libs/retryable_writes_util.js");
load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject'
load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js");
const aggregateInterruptErrors =
[ErrorCodes.CursorNotFound, ErrorCodes.CursorKilled, ErrorCodes.QueryPlanKilled];
-if ($config === undefined) {
- // There is no workload to extend. Define a noop base workload to make the 'extendWorkload' call
- // below still work.
- $config = {
- threadCount: 1,
- iterations: 1,
- startState: "init",
- data: {},
- states: {init: function(db, collName) {}},
- transitions: {init: {init: 1}},
- setup: function(db, collName) {},
- teardown: function(db, collName) {},
- };
-}
-
-var $config = extendWorkload($config, function($config, $super) {
+const kBaseConfig = {
+ threadCount: 1,
+ iterations: 1,
+ startState: "init",
+ data: {},
+ states: {init: function(db, collName) {}},
+ transitions: {init: {init: 1}},
+ setup: function(db, collName) {},
+ teardown: function(db, collName) {},
+};
+
+export const $config = extendWorkload(kBaseConfig, function($config, $super) {
$config.threadCount = 10;
$config.iterations = 500;
// The sample rate range for query sampling.
- $config.data.minSampleRate = 1000;
- $config.data.maxSampleRate = 1500;
+ $config.data.minSamplesPerSecond = 1000;
+ $config.data.maxSamplesPerSecond = 1500;
// The comment to attached to queries in the read and write states below to mark them as
// eligible for sampling. Queries such as the aggregate queries for looking up documents to
// update will not have this comment attached since they do not follow the query patterns
@@ -102,11 +93,6 @@ var $config = extendWorkload($config, function($config, $super) {
key: {[this.currentShardKeyFieldName]: 1, [this.candidateShardKeyFieldName]: 1},
unique: isUnique
}];
- // For a compound hashed shard key, the exact shard key index is needed for determining
- // the monotonicity.
- if (isHashed) {
- indexSpecs.push({name: "exact_index", key: shardKey});
- }
} else {
shardKey = {[this.candidateShardKeyFieldName]: isHashed ? "hashed" : 1};
indexSpecs = [{
@@ -196,6 +182,7 @@ var $config = extendWorkload($config, function($config, $super) {
/**
* Generates and inserts initial documents.
*/
+ $config.data.insertBatchSize = 1000;
$config.data.generateInitialDocuments = function generateInitialDocuments(
db, collName, cluster) {
this.numInitialDocuments = 0;
@@ -230,12 +217,17 @@ var $config = extendWorkload($config, function($config, $super) {
assert.commandWorked(
db.runCommand({createIndexes: collName, indexes: this.shardKeyOptions.indexSpecs}));
- assert.commandWorked(db.runCommand({insert: collName, documents: docs}));
-
- // Wait for the documents to get replicated to all nodes so that a analyzeShardKey command
- // runs immediately after this can assert on the metrics regardless of which nodes it
- // targets.
- cluster.awaitReplication();
+ // To reduce the insertion order noise caused by parallel oplog application on
+ // secondaries, insert the documents in multiple batches.
+ let currIndex = 0;
+ while (currIndex < docs.length) {
+ const endIndex = currIndex + this.insertBatchSize;
+ assert.commandWorked(db.runCommand(
+ {insert: collName, documents: docs.slice(currIndex, endIndex), ordered: true}));
+ currIndex = endIndex;
+ // Wait for secondaries to have replicated the writes.
+ cluster.awaitReplication();
+ }
print(`Set up collection that have the following shard key to analyze ${tojson({
shardKeyOptions: this.shardKeyOptions,
@@ -463,9 +455,10 @@ var $config = extendWorkload($config, function($config, $super) {
* ranges.
*/
$config.data.assertKeyCharacteristicsMetrics = function assertKeyCharacteristicsMetrics(
- metrics) {
+ res, isSampling) {
// Perform basic validation of the metrics.
- AnalyzeShardKeyUtil.assertContainKeyCharacteristicsMetrics(metrics);
+ AnalyzeShardKeyUtil.assertContainKeyCharacteristicsMetrics(res);
+ const metrics = res.keyCharacteristics;
assert.eq(metrics.isUnique, this.shardKeyOptions.isUnique, metrics);
// Validate the cardinality metrics. Due to the concurrent writes by other threads, it is
@@ -479,7 +472,7 @@ var $config = extendWorkload($config, function($config, $super) {
// not unique, they are calculated using an aggregation with readConcern "available" (i.e.
// it opts out of shard versioning and filtering). If the shard key is unique, they are
// inferred from fast count of the documents.
- if (metrics.numDistinctValues < this.numInitialDistinctValues) {
+ if (!isSampling && (metrics.numDistinctValues < this.numInitialDistinctValues)) {
if (!TestData.runningWithBalancer) {
assert(this.shardKeyOptions.isUnique, metrics);
if (!TestData.runningWithShardStepdowns) {
@@ -500,7 +493,7 @@ var $config = extendWorkload($config, function($config, $super) {
// since chunk migration deletes documents from the donor shard and re-inserts them on the
// recipient shard so there is no guarantee that the insertion order from the client is
// preserved.
- if (!TestData.runningWithBalancer) {
+ if (!isSampling && !TestData.runningWithBalancer) {
assert.eq(metrics.monotonicity.type,
this.shardKeyOptions.isMonotonic && !this.shardKeyOptions.isHashed
? "monotonic"
@@ -513,23 +506,89 @@ var $config = extendWorkload($config, function($config, $super) {
// distribution.
$config.data.intermediateReadDistributionMetricsMaxDiff = 20;
$config.data.intermediateWriteDistributionMetricsMaxDiff = 20;
- // The final diff windows are larger when the reads and writes are run inside transactions or
- // with stepdown/kill/terminate in the background due to the presence of retries from the
- // external client.
- $config.data.finalReadDistributionMetricsMaxDiff =
- (TestData.runInsideTransaction || TestData.runningWithShardStepdowns) ? 15 : 12;
- $config.data.finalWriteDistributionMetricsMaxDiff =
- (TestData.runInsideTransaction || TestData.runningWithShardStepdowns) ? 15 : 12;
+ $config.data.finalReadDistributionMetricsMaxDiff = 15;
+ $config.data.finalWriteDistributionMetricsMaxDiff = 15;
// The minimum number of sampled queries to wait for before verifying the read and write
// distribution metrics.
$config.data.numSampledQueriesThreshold = 1500;
+ // The diff window for the sample size for each command for the sample population to be
+ // considered as matching the mock query pattern.
+ $config.data.sampleSizePercentageMaxDiff = 5;
+
+ // The number of sampled queries returned by the latest analyzeShardKey command.
+ $config.data.previousNumSampledQueries = 0;
+
+ $config.data.isAcceptableSampleSize = function isAcceptableSampleSize(
+ part, whole, expectedPercentage) {
+ return Math.abs(AnalyzeShardKeyUtil.calculatePercentage(part, whole) - expectedPercentage) <
+ this.sampleSizePercentageMaxDiff;
+ };
+
+ $config.data.shouldValidateReadDistribution = function shouldValidateReadDistribution(
+ sampleSize) {
+ if (sampleSize.total < this.numSampledQueriesThreshold) {
+ return false;
+ }
+
+ // There are 4 read states (i.e. find, aggregate, count and distinct) and they have the
+ // same incoming and outgoing state transition probabilities.
+ const isAcceptable = this.isAcceptableSampleSize(
+ sampleSize.find, sampleSize.total, 25 /* expectedPercentage */) &&
+ this.isAcceptableSampleSize(
+ sampleSize.aggregate, sampleSize.total, 25 /* expectedPercentage */) &&
+ this.isAcceptableSampleSize(
+ sampleSize.count, sampleSize.total, 25 /* expectedPercentage */) &&
+ this.isAcceptableSampleSize(
+ sampleSize.distinct, sampleSize.total, 25 /* expectedPercentage */);
+
+ if (!isAcceptable) {
+ print(
+ `Skip validating the read distribution metrics because the sample ` +
+ `population does not match the mock query patterns: ${tojsononeline(sampleSize)}`);
+ // The sample population should always match the mock query patterns unless there are
+ // retries.
+ assert(TestData.runningWithShardStepdowns || TestData.runningWithBalancer ||
+ TestData.runInsideTransaction);
+ }
+ return isAcceptable;
+ };
+
+ $config.data.shouldValidateWriteDistribution = function shouldValidateWriteDistribution(
+ sampleSize) {
+ if (sampleSize.total < this.numSampledQueriesThreshold) {
+ return false;
+ }
+
+ // There are 4 write states (i.e. update, remove, findAndModifyUpdate and
+ // findAndModifyRemove) and they have the same incoming and outgoing state transition
+ // probabilities.
+ const isAcceptable =
+ this.isAcceptableSampleSize(
+ sampleSize.update, sampleSize.total, 25 /* expectedPercentage */) &&
+ this.isAcceptableSampleSize(
+ sampleSize.delete, sampleSize.total, 25 /* expectedPercentage */) &&
+ this.isAcceptableSampleSize(
+ sampleSize.findAndModify, sampleSize.total, 50 /* expectedPercentage */);
+
+ if (!isAcceptable) {
+ print(
+ `Skip validating the write distribution metrics because the sample ` +
+ `population does not match the mock query patterns: ${tojsononeline(sampleSize)}`);
+ // The sample population should always match the mock query patterns unless there are
+ // retries.
+ assert(TestData.runningWithShardStepdowns || TestData.runningWithBalancer ||
+ TestData.runInsideTransaction);
+ }
+ return isAcceptable;
+ };
+
/**
* Verifies that the metrics about the read and write distribution are within acceptable ranges.
*/
$config.data.assertReadWriteDistributionMetrics = function assertReadWriteDistributionMetrics(
- metrics, isFinal) {
- AnalyzeShardKeyUtil.assertContainReadWriteDistributionMetrics(metrics);
+ res, isFinal) {
+ AnalyzeShardKeyUtil.assertContainReadWriteDistributionMetrics(res);
let assertReadMetricsDiff = (actual, expected) => {
const maxDiff = isFinal ? this.finalReadDistributionMetricsMaxDiff
@@ -542,32 +601,34 @@ var $config = extendWorkload($config, function($config, $super) {
assert.lt(Math.abs(actual - expected), maxDiff, {actual, expected});
};
- if (metrics.readDistribution.sampleSize.total > this.numSampledQueriesThreshold) {
- assertReadMetricsDiff(metrics.readDistribution.percentageOfSingleShardReads,
+ const currentNumSampledQueries =
+ res.readDistribution.sampleSize.total + res.writeDistribution.sampleSize.total;
+ this.previousNumSampledQueries = currentNumSampledQueries;
+
+ if (this.shouldValidateReadDistribution(res.readDistribution.sampleSize)) {
+ assertReadMetricsDiff(res.readDistribution.percentageOfSingleShardReads,
this.readDistribution.percentageOfSingleShardReads);
- assertReadMetricsDiff(metrics.readDistribution.percentageOfMultiShardReads,
+ assertReadMetricsDiff(res.readDistribution.percentageOfMultiShardReads,
this.readDistribution.percentageOfMultiShardReads);
- assertReadMetricsDiff(metrics.readDistribution.percentageOfScatterGatherReads,
+ assertReadMetricsDiff(res.readDistribution.percentageOfScatterGatherReads,
this.readDistribution.percentageOfScatterGatherReads);
- assert.eq(metrics.readDistribution.numReadsByRange.length,
- this.analyzeShardKeyNumRanges);
+ assert.eq(res.readDistribution.numReadsByRange.length, this.analyzeShardKeyNumRanges);
}
- if (metrics.writeDistribution.sampleSize.total > this.numSampledQueriesThreshold) {
- assertWriteMetricsDiff(metrics.writeDistribution.percentageOfSingleShardWrites,
+
+ if (this.shouldValidateWriteDistribution(res.writeDistribution.sampleSize)) {
+ assertWriteMetricsDiff(res.writeDistribution.percentageOfSingleShardWrites,
this.writeDistribution.percentageOfSingleShardWrites);
- assertWriteMetricsDiff(metrics.writeDistribution.percentageOfMultiShardWrites,
+ assertWriteMetricsDiff(res.writeDistribution.percentageOfMultiShardWrites,
this.writeDistribution.percentageOfMultiShardWrites);
- assertWriteMetricsDiff(metrics.writeDistribution.percentageOfScatterGatherWrites,
+ assertWriteMetricsDiff(res.writeDistribution.percentageOfScatterGatherWrites,
this.writeDistribution.percentageOfScatterGatherWrites);
- assertWriteMetricsDiff(metrics.writeDistribution.percentageOfShardKeyUpdates,
+ assertWriteMetricsDiff(res.writeDistribution.percentageOfShardKeyUpdates,
this.writeDistribution.percentageOfShardKeyUpdates);
- assertWriteMetricsDiff(
- metrics.writeDistribution.percentageOfSingleWritesWithoutShardKey,
- this.writeDistribution.percentageOfSingleWritesWithoutShardKey);
- assertWriteMetricsDiff(metrics.writeDistribution.percentageOfMultiWritesWithoutShardKey,
+ assertWriteMetricsDiff(res.writeDistribution.percentageOfSingleWritesWithoutShardKey,
+ this.writeDistribution.percentageOfSingleWritesWithoutShardKey);
+ assertWriteMetricsDiff(res.writeDistribution.percentageOfMultiWritesWithoutShardKey,
this.writeDistribution.percentageOfMultiWritesWithoutShardKey);
- assert.eq(metrics.writeDistribution.numWritesByRange.length,
- this.analyzeShardKeyNumRanges);
+ assert.eq(res.writeDistribution.numWritesByRange.length, this.analyzeShardKeyNumRanges);
}
};
@@ -589,8 +650,11 @@ var $config = extendWorkload($config, function($config, $super) {
// non-duplicate document using a random cursor. 4952606 is the error that the sampling
// based split policy throws if it fails to find the specified number of split points.
print(
- `Failed to analyze the shard key due to duplicate keys returned by random cursor ${
- tojsononeline(err)}`);
+ `Failed to analyze the shard key due to duplicate keys returned by random ` +
+ `cursor. Skipping the next ${this.numAnalyzeShardKeySkipsAfterRandomCursorError} ` +
+ `analyzeShardKey states since the analyzeShardKey command is likely to fail with ` +
+ `this error again. ${tojsononeline(err)}`);
+ this.numAnalyzeShardKeySkips = this.numAnalyzeShardKeySkipsAfterRandomCursorError;
return true;
}
if (this.expectedAggregateInterruptErrors.includes(err.code)) {
@@ -599,6 +663,31 @@ var $config = extendWorkload($config, function($config, $super) {
tojsononeline(err)}`);
return true;
}
+ if (err.code == 7559401) {
+ print(`Failed to analyze the shard key because one of the shards fetched the split ` +
+ `point documents after the TTL deletions had started. ${tojsononeline(err)}`);
+ return true;
+ }
+ if (err.code == 7588600) {
+ print(`Failed to analyze the shard key because the document for one of the most ` +
+ `common shard key values got deleted while the command was running. ${
+ tojsononeline(err)}`);
+ return true;
+ }
+ if (err.code == 7826501) {
+ print(`Failed to analyze the shard key because $collStats indicates that the ` +
+ `collection is empty. ${tojsononeline(err)}`);
+ // Inaccurate fast count is only expected when there is unclean shutdown.
+ return TestData.runningWithShardStepdowns;
+ }
+ if (err.code == ErrorCodes.IllegalOperation && err.errmsg &&
+ err.errmsg.includes("monotonicity") && err.errmsg.includes("empty collection")) {
+ print(`Failed to analyze the shard key because the fast count during the ` +
+ `step for calculating the monotonicity metrics indicates that collection ` +
+ `is empty. ${tojsononeline(err)}`);
+ // Inaccurate fast count is only expected when there is unclean shutdown.
+ return TestData.runningWithShardStepdowns;
+ }
return false;
};
@@ -634,6 +723,139 @@ var $config = extendWorkload($config, function($config, $super) {
return truncatedRes;
};
+ /**
+ * Runs $listSampledQueries and asserts that the number of sampled queries is greater or equal
+ * to the number of sampled queries returned by the latest analyzeShardKey command.
+ */
+ $config.data.listSampledQueries = function listSampledQueries(db, collName) {
+ const ns = db.getName() + "." + collName;
+ let docs;
+ try {
+ docs = db.getSiblingDB("admin")
+ .aggregate(
+ [{$listSampledQueries: {namespace: ns}}],
+ // The network override does not support issuing getMore commands since
+ // if a network error occurs during it then it won't know whether the
+ // cursor was advanced or not. To allow this workload to run in a suite
+ // with network error, use a large batch size so that no getMore commands
+ // would be issued.
+ {cursor: TestData.runningWithShardStepdowns ? {batchSize: 100000} : {}})
+ .toArray();
+ } catch (e) {
+ if (this.expectedAggregateInterruptErrors.includes(e.code)) {
+ return;
+ }
+ throw e;
+ }
+ assert.gte(docs.length, this.previousNumSampledQueries);
+ };
+
+ // To avoid leaving a lot of config.analyzeShardKeySplitPoints documents around which could
+ // make restart recovery take a long time, overwrite the values of the
+ // 'analyzeShardKeySplitPointExpirationSecs' and 'ttlMonitorSleepSecs' server parameters to make
+ // the clean up occur as the workload runs, and then restore the original values during
+ // teardown().
+ $config.data.splitPointExpirationSecs = 10;
+ $config.data.ttlMonitorSleepSecs = 5;
+ $config.data.originalSplitPointExpirationSecs = {};
+ $config.data.originalTTLMonitorSleepSecs = {};
+
+ $config.data.overrideSplitPointExpiration = function overrideSplitPointExpiration(cluster) {
+ cluster.executeOnMongodNodes((db) => {
+ const res = assert.commandWorked(db.adminCommand({
+ setParameter: 1,
+ analyzeShardKeySplitPointExpirationSecs: this.splitPointExpirationSecs,
+ }));
+ this.originalSplitPointExpirationSecs[db.getMongo().host] = res.was;
+ });
+ };
+
+ $config.data.overrideTTLMonitorSleepSecs = function overrideTTLMonitorSleepSecs(cluster) {
+ cluster.executeOnMongodNodes((db) => {
+ const res = assert.commandWorked(
+ db.adminCommand({setParameter: 1, ttlMonitorSleepSecs: this.ttlMonitorSleepSecs}));
+ this.originalTTLMonitorSleepSecs[db.getMongo().host] = res.was;
+ });
+ };
+
+ $config.data.restoreSplitPointExpiration = function restoreSplitPointExpiration(cluster) {
+ cluster.executeOnMongodNodes((db) => {
+ assert.commandWorked(db.adminCommand({
+ setParameter: 1,
+ analyzeShardKeySplitPointExpirationSecs:
+ this.originalSplitPointExpirationSecs[db.getMongo().host],
+ }));
+ });
+ };
+
+ $config.data.restoreTTLMonitorSleepSecs = function restoreTTLMonitorSleepSecs(cluster) {
+ cluster.executeOnMongodNodes((db) => {
+ assert.commandWorked(db.adminCommand({
+ setParameter: 1,
+ ttlMonitorSleepSecs: this.originalTTLMonitorSleepSecs[db.getMongo().host],
+ }));
+ });
+ };
+
+ /**
+ * Returns the number of documents that match the given filter in the given collection.
+ */
+ $config.data.getNumDocuments = function getNumDocuments(db, collName, filter) {
+ const firstBatch = assert
+ .commandWorked(db.runCommand({
+ aggregate: collName,
+ pipeline: [{$match: filter}, {$count: "count"}],
+ cursor: {}
+ }))
+ .cursor.firstBatch;
+ return firstBatch.length == 0 ? 0 : firstBatch[0].count;
+ };
+
+ // To avoid leaving unnecessary documents in config database after this workload finishes,
+ // remove all the sampled query documents and split point documents during teardown().
+ $config.data.removeSampledQueryAndSplitPointDocuments =
+ function removeSampledQueryAndSplitPointDocuments(db, collName, cluster) {
+ const ns = db.getName() + "." + collName;
+ cluster.getReplicaSets().forEach(rst => {
+ while (true) {
+ try {
+ const configDb = rst.getPrimary().getDB("config");
+ jsTest.log("Removing sampled query documents and split points documents");
+ jsTest.log(
+ "The counts before removing " + tojsononeline({
+ sampledQueries: this.getNumDocuments(configDb, "sampledQueries", {ns}),
+ sampledQueriesDiff:
+ this.getNumDocuments(configDb, "sampledQueriesDiff", {ns}),
+ analyzeShardKeySplitPoints:
+ this.getNumDocuments(configDb, "analyzeShardKeySplitPoints", {ns}),
+
+ }));
+
+ assert.commandWorked(configDb.sampledQueries.remove({}));
+ assert.commandWorked(configDb.sampledQueriesDiff.remove({}));
+ assert.commandWorked(configDb.analyzeShardKeySplitPoints.remove({}));
+
+ jsTest.log(
+ "The counts after removing " + tojsononeline({
+ sampledQueries: this.getNumDocuments(configDb, "sampledQueries", {ns}),
+ sampledQueriesDiff:
+ this.getNumDocuments(configDb, "sampledQueriesDiff", {ns}),
+ analyzeShardKeySplitPoints:
+ this.getNumDocuments(configDb, "analyzeShardKeySplitPoints", {ns}),
+
+ }));
+ return;
+ } catch (e) {
+ if (RetryableWritesUtil.isRetryableCode(e.code)) {
+ print("Retry documents removal after error: " + tojson(e));
+ continue;
+ }
+ throw e;
+ }
+ }
+ });
+ };
+
////
// The body of the workload.
@@ -674,6 +896,9 @@ var $config = extendWorkload($config, function($config, $super) {
{comment: this.eligibleForSamplingComment});
});
+ this.overrideSplitPointExpiration(cluster);
+ this.overrideTTLMonitorSleepSecs(cluster);
+
// On a sharded cluster, running an aggregate command by default involves running getMore
// commands since the cursor establisher in sharding is pessimistic about the router being
// stale so it always makes a cursor with {batchSize: 0} on the shards and then run getMore
@@ -712,6 +937,16 @@ var $config = extendWorkload($config, function($config, $super) {
print("Doing final validation of read and write distribution metrics " +
tojson(this.truncateAnalyzeShardKeyResponseForLogging(metrics)));
this.assertReadWriteDistributionMetrics(metrics, true /* isFinal */);
+
+ print("Listing sampled queries " +
+ tojsononeline({lastNumSampledQueries: this.previousNumSampledQueries}));
+ assert.gt(this.previousNumSampledQueries, 0);
+ this.listSampledQueries(db, collName);
+
+ print("Cleaning up");
+ this.restoreSplitPointExpiration(cluster);
+ this.restoreTTLMonitorSleepSecs(cluster);
+ this.removeSampledQueryAndSplitPointDocuments(db, collName, cluster);
};
$config.states.init = function init(db, collName) {
@@ -719,15 +954,37 @@ var $config = extendWorkload($config, function($config, $super) {
this.metricsDocId = new UUID(this.metricsDocIdString);
};
+ $config.data.numAnalyzeShardKeySkipsAfterRandomCursorError = 5;
+ // Set to a positive value when the analyzeShardKey command fails with an error that is likely
+ // to occur again upon the next try.
+ $config.data.numAnalyzeShardKeySkips = 0;
+
$config.states.analyzeShardKey = function analyzeShardKey(db, collName) {
- print("Starting analyzeShardKey state");
+ if (this.numAnalyzeShardKeySkips > 0) {
+ print("Skipping the analyzeShardKey state");
+ this.numAnalyzeShardKeySkips--;
+ return;
+ }
+
const ns = db.getName() + "." + collName;
- const res = db.adminCommand({analyzeShardKey: ns, key: this.shardKeyOptions.shardKey});
+ const cmdObj = {analyzeShardKey: ns, key: this.shardKeyOptions.shardKey};
+ const rand = Math.random();
+ if (rand < 0.25) {
+ cmdObj.sampleRate = Math.random() * 0.5 + 0.5;
+ } else if (rand < 0.5) {
+ cmdObj.sampleSize =
+ NumberLong(Math.floor(Math.random() * 1.5 * this.numInitialDocuments));
+ }
+ const isSampling =
+ cmdObj.hasOwnProperty("sampleRate") || cmdObj.hasOwnProperty("sampleSize");
+
+ print("Starting analyzeShardKey state " + tojsononeline(cmdObj));
+ const res = db.adminCommand(cmdObj);
try {
assert.commandWorked(res);
print("Metrics: " +
tojsononeline({res: this.truncateAnalyzeShardKeyResponseForLogging(res)}));
- this.assertKeyCharacteristicsMetrics(res);
+ this.assertKeyCharacteristicsMetrics(res, isSampling);
this.assertReadWriteDistributionMetrics(res, false /* isFinal */);
// Persist the metrics so we can do the final validation during teardown.
assert.commandWorked(
@@ -748,7 +1005,8 @@ var $config = extendWorkload($config, function($config, $super) {
assert.commandWorked(db.adminCommand({
configureQueryAnalyzer: ns,
mode: "full",
- sampleRate: AnalyzeShardKeyUtil.getRandInteger(this.minSampleRate, this.maxSampleRate)
+ samplesPerSecond: AnalyzeShardKeyUtil.getRandInteger(this.minSamplesPerSecond,
+ this.maxSamplesPerSecond)
}));
print("Finished enableQuerySampling state");
};
@@ -756,14 +1014,16 @@ var $config = extendWorkload($config, function($config, $super) {
$config.states.disableQuerySampling = function disableQuerySampling(db, collName) {
print("Starting disableQuerySampling state");
const ns = db.getName() + "." + collName;
- // If query sampling is off, this command is expected to fail with an IllegalOperation
- // error.
- assert.commandWorkedOrFailedWithCode(
- db.adminCommand({configureQueryAnalyzer: ns, mode: "off"}),
- ErrorCodes.IllegalOperation);
+ assert.commandWorked(db.adminCommand({configureQueryAnalyzer: ns, mode: "off"}));
print("Finished disableQuerySampling state");
};
+ $config.states.listSampledQueries = function listSampledQueries(db, collName) {
+ print("Starting listSampledQueries state");
+ this.listSampledQueries(db, collName);
+ print("Finished listSampledQueries state");
+ };
+
$config.states.find = function find(db, collName) {
const cmdObj = {
find: collName,
@@ -841,7 +1101,7 @@ var $config = extendWorkload($config, function($config, $super) {
} catch (e) {
if (!this.isAcceptableUpdateError(res) &&
!(res.hasOwnProperty("writeErrors") &&
- isAcceptableUpdateError(res.writeErrors[0]))) {
+ this.isAcceptableUpdateError(res.writeErrors[0]))) {
throw e;
}
}
@@ -863,6 +1123,7 @@ var $config = extendWorkload($config, function($config, $super) {
print("Starting remove state " + tojsononeline(cmdObj));
const res = assert.commandWorked(db.runCommand(cmdObj));
assert.eq(res.n, 1, {cmdObj, res});
+
// Insert a random document to restore the original number of documents.
assert.commandWorked(
db.runCommand({insert: collName, documents: [this.generateRandomDocument(this.tid)]}));
@@ -911,6 +1172,7 @@ var $config = extendWorkload($config, function($config, $super) {
print("Starting findAndModifyRemove state " + tojsononeline(cmdObj));
const res = assert.commandWorked(db.runCommand(cmdObj));
assert.eq(res.lastErrorObject.n, 1, {cmdObj, res});
+
// Insert a random document to restore the original number of documents.
assert.commandWorked(
db.runCommand({insert: collName, documents: [this.generateRandomDocument(this.tid)]}));
@@ -936,6 +1198,11 @@ var $config = extendWorkload($config, function($config, $super) {
originalDisableQuerySampling.call(this, db, collName);
};
+ const originalListSampledQueries = $config.states.listSampledQueries;
+ $config.states.listSampledQueries = function(db, collName, connCache) {
+ originalListSampledQueries.call(this, db, collName);
+ };
+
const originalAnalyzeShardKey = $config.states.analyzeShardKey;
$config.states.analyzeShardKey = function(db, collName, connCache) {
originalAnalyzeShardKey.call(this, db, collName);
@@ -992,8 +1259,9 @@ var $config = extendWorkload($config, function($config, $super) {
enableQuerySampling: 1,
},
analyzeShardKey: {
- enableQuerySampling: 0.18,
+ enableQuerySampling: 0.15,
disableQuerySampling: 0.02,
+ listSampledQueries: 0.03,
find: 0.1,
aggregate: 0.1,
count: 0.1,
@@ -1004,8 +1272,9 @@ var $config = extendWorkload($config, function($config, $super) {
findAndModifyRemove: 0.1,
},
enableQuerySampling: {
- analyzeShardKey: 0.18,
+ analyzeShardKey: 0.15,
disableQuerySampling: 0.02,
+ listSampledQueries: 0.03,
find: 0.1,
aggregate: 0.1,
count: 0.1,
@@ -1019,7 +1288,7 @@ var $config = extendWorkload($config, function($config, $super) {
analyzeShardKey: 0.05,
enableQuerySampling: 0.95,
},
- find: {
+ listSampledQueries: {
analyzeShardKey: 0.2,
enableQuerySampling: 0.1,
aggregate: 0.1,
@@ -1030,9 +1299,22 @@ var $config = extendWorkload($config, function($config, $super) {
findAndModifyUpdate: 0.1,
findAndModifyRemove: 0.1,
},
+ find: {
+ analyzeShardKey: 0.15,
+ enableQuerySampling: 0.1,
+ listSampledQueries: 0.05,
+ aggregate: 0.1,
+ count: 0.1,
+ distinct: 0.1,
+ update: 0.1,
+ remove: 0.1,
+ findAndModifyUpdate: 0.1,
+ findAndModifyRemove: 0.1,
+ },
aggregate: {
- analyzeShardKey: 0.2,
+ analyzeShardKey: 0.15,
enableQuerySampling: 0.1,
+ listSampledQueries: 0.05,
find: 0.1,
count: 0.1,
distinct: 0.1,
@@ -1042,8 +1324,9 @@ var $config = extendWorkload($config, function($config, $super) {
findAndModifyRemove: 0.1,
},
count: {
- analyzeShardKey: 0.2,
+ analyzeShardKey: 0.15,
enableQuerySampling: 0.1,
+ listSampledQueries: 0.05,
find: 0.1,
aggregate: 0.1,
distinct: 0.1,
@@ -1053,8 +1336,9 @@ var $config = extendWorkload($config, function($config, $super) {
findAndModifyRemove: 0.1,
},
distinct: {
- analyzeShardKey: 0.2,
+ analyzeShardKey: 0.15,
enableQuerySampling: 0.1,
+ listSampledQueries: 0.05,
find: 0.1,
aggregate: 0.1,
count: 0.1,
@@ -1064,8 +1348,9 @@ var $config = extendWorkload($config, function($config, $super) {
findAndModifyRemove: 0.1,
},
update: {
- analyzeShardKey: 0.2,
+ analyzeShardKey: 0.15,
enableQuerySampling: 0.1,
+ listSampledQueries: 0.05,
find: 0.1,
aggregate: 0.1,
count: 0.1,
@@ -1075,8 +1360,9 @@ var $config = extendWorkload($config, function($config, $super) {
findAndModifyRemove: 0.1,
},
remove: {
- analyzeShardKey: 0.2,
+ analyzeShardKey: 0.15,
enableQuerySampling: 0.1,
+ listSampledQueries: 0.05,
find: 0.1,
aggregate: 0.1,
count: 0.1,
@@ -1086,8 +1372,9 @@ var $config = extendWorkload($config, function($config, $super) {
findAndModifyRemove: 0.1,
},
findAndModifyUpdate: {
- analyzeShardKey: 0.2,
+ analyzeShardKey: 0.15,
enableQuerySampling: 0.1,
+ listSampledQueries: 0.05,
find: 0.1,
aggregate: 0.1,
count: 0.1,
@@ -1097,8 +1384,9 @@ var $config = extendWorkload($config, function($config, $super) {
findAndModifyRemove: 0.1,
},
findAndModifyRemove: {
- analyzeShardKey: 0.2,
+ analyzeShardKey: 0.15,
enableQuerySampling: 0.1,
+ listSampledQueries: 0.05,
find: 0.1,
aggregate: 0.1,
count: 0.1,
diff --git a/jstests/concurrency/fsm_workloads/auth_create_role.js b/jstests/concurrency/fsm_workloads/auth_create_role.js
index 05ec4a9fb32eb..9dec84d40fdfa 100644
--- a/jstests/concurrency/fsm_workloads/auth_create_role.js
+++ b/jstests/concurrency/fsm_workloads/auth_create_role.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* auth_create_role.js
*
@@ -10,7 +8,7 @@ load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRole
// UMC commands are not supported in transactions.
TestData.runInsideTransaction = false;
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the role name,
// since the workload name is assumed to be unique.
diff --git a/jstests/concurrency/fsm_workloads/auth_create_user.js b/jstests/concurrency/fsm_workloads/auth_create_user.js
index 26c327dd0cde0..fcb4930fd14b1 100644
--- a/jstests/concurrency/fsm_workloads/auth_create_user.js
+++ b/jstests/concurrency/fsm_workloads/auth_create_user.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* auth_create_user.js
*
@@ -10,7 +8,7 @@ load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropUser
// UMC commands are not supported in transactions.
TestData.runInsideTransaction = false;
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the username,
// since the workload name is assumed to be unique.
diff --git a/jstests/concurrency/fsm_workloads/auth_drop_role.js b/jstests/concurrency/fsm_workloads/auth_drop_role.js
index 6a5ebc2240dd2..43bf23bef6d7f 100644
--- a/jstests/concurrency/fsm_workloads/auth_drop_role.js
+++ b/jstests/concurrency/fsm_workloads/auth_drop_role.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* auth_drop_role.js
*
@@ -12,7 +10,7 @@ load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRole
// UMC commands are not supported in transactions.
TestData.runInsideTransaction = false;
-var $config = (function() {
+export const $config = (function() {
const kMaxCmdTimeMs = 60000;
const kMaxTxnLockReqTimeMs = 100;
const kDefaultTxnLockReqTimeMs = 5;
diff --git a/jstests/concurrency/fsm_workloads/auth_drop_user.js b/jstests/concurrency/fsm_workloads/auth_drop_user.js
index 248e0139287ac..75fe9e61fdbd8 100644
--- a/jstests/concurrency/fsm_workloads/auth_drop_user.js
+++ b/jstests/concurrency/fsm_workloads/auth_drop_user.js
@@ -1,5 +1,3 @@
-'use strict';
-
// UMC commands are not supported in transactions.
TestData.runInsideTransaction = false;
@@ -10,7 +8,7 @@ TestData.runInsideTransaction = false;
* drops the user from the database.
* @tags: [incompatible_with_concurrency_simultaneous]
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the username,
// since the workload name is assumed to be unique.
diff --git a/jstests/concurrency/fsm_workloads/auth_privilege_cache_miss.js b/jstests/concurrency/fsm_workloads/auth_privilege_cache_miss.js
index 3d50438cc1313..5a91ad52def05 100644
--- a/jstests/concurrency/fsm_workloads/auth_privilege_cache_miss.js
+++ b/jstests/concurrency/fsm_workloads/auth_privilege_cache_miss.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* auth_privilege_cache_miss.js
*
@@ -9,10 +7,12 @@
*/
// Use the auth_privilege_consistency workload as a base.
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/auth_privilege_consistency.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/auth_privilege_consistency.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Override setup() to also set cache-miss and slow load failpoints.
const kResolveRolesDelayMS = 100;
diff --git a/jstests/concurrency/fsm_workloads/auth_privilege_consistency.js b/jstests/concurrency/fsm_workloads/auth_privilege_consistency.js
index 4997d47ad2eeb..474ed1fcbb52f 100644
--- a/jstests/concurrency/fsm_workloads/auth_privilege_consistency.js
+++ b/jstests/concurrency/fsm_workloads/auth_privilege_consistency.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* auth_privilege_consistency.js
*
@@ -12,7 +10,7 @@ load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRole
// UMC commands are not supported in transactions.
TestData.runInsideTransaction = false;
-var $config = (function() {
+export const $config = (function() {
const kTestUserPassword = 'secret';
const kMaxCmdTimeMs = 60000;
const kMaxTxnLockReqTimeMs = 100;
diff --git a/jstests/concurrency/fsm_workloads/auth_role_consistency.js b/jstests/concurrency/fsm_workloads/auth_role_consistency.js
index 4ce7d3300f697..506ba1b0d98d9 100644
--- a/jstests/concurrency/fsm_workloads/auth_role_consistency.js
+++ b/jstests/concurrency/fsm_workloads/auth_role_consistency.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* auth_role_consistency.js
*
@@ -8,7 +6,7 @@
*/
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
-var $config = (function() {
+export const $config = (function() {
const kRoleNamePrefix = 'auth_role_consistency';
const states = (function() {
diff --git a/jstests/concurrency/fsm_workloads/batched_multi_deletes_with_write_conflicts.js b/jstests/concurrency/fsm_workloads/batched_multi_deletes_with_write_conflicts.js
index ec01224dda36f..5965adcca8bd2 100644
--- a/jstests/concurrency/fsm_workloads/batched_multi_deletes_with_write_conflicts.js
+++ b/jstests/concurrency/fsm_workloads/batched_multi_deletes_with_write_conflicts.js
@@ -1,6 +1,3 @@
-'use strict';
-load("jstests/libs/analyze_plan.js");
-
/**
* batched_multi_deletes_with_write_conflicts.js
*
@@ -13,8 +10,9 @@ load("jstests/libs/analyze_plan.js");
* requires_fcv_61,
* ]
*/
+import {getPlanStage, getPlanStages} from "jstests/libs/analyze_plan.js";
-var $config = (function() {
+export const $config = (function() {
// 'data' is passed (copied) to each of the worker threads.
var data = {
// Defines the number of subsets of data, which are randomly picked to create conflicts.
diff --git a/jstests/concurrency/fsm_workloads/cleanupOrphanedWhileMigrating.js b/jstests/concurrency/fsm_workloads/cleanupOrphanedWhileMigrating.js
index c6c287082f822..7c07d20d31f94 100644
--- a/jstests/concurrency/fsm_workloads/cleanupOrphanedWhileMigrating.js
+++ b/jstests/concurrency/fsm_workloads/cleanupOrphanedWhileMigrating.js
@@ -1,16 +1,16 @@
-'use strict';
-
/**
* Performs range deletions while chunks are being moved.
*
* @tags: [requires_sharding, assumes_balancer_on, antithesis_incompatible]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/sharded_base_partitioned.js";
load('jstests/concurrency/fsm_workload_helpers/balancer.js');
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.threadCount = 5;
$config.iterations = 50;
diff --git a/jstests/concurrency/fsm_workloads/collection_defragmentation.js b/jstests/concurrency/fsm_workloads/collection_defragmentation.js
index f91b62a67412c..ec56ee992a701 100644
--- a/jstests/concurrency/fsm_workloads/collection_defragmentation.js
+++ b/jstests/concurrency/fsm_workloads/collection_defragmentation.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* collection_defragmentation.js
*
@@ -51,7 +49,7 @@ function getAllChunks(configDB, ns, keyPattern) {
return chunkArray;
}
-var $config = (function() {
+export const $config = (function() {
var states = {
init: function init(db, collName, connCache) {
// Initialize defragmentation
@@ -223,14 +221,14 @@ var $config = (function() {
const fullNs = dbName + "." + collPrefix + j;
const numChunks = Random.randInt(30);
const numZones = Random.randInt(numChunks / 2);
- const docSizeBytes = Random.randInt(1024 * 1024) + 50;
+ const docSizeBytesRange = [50, 1024 * 1024];
defragmentationUtil.createFragmentedCollection(
mongos,
fullNs,
numChunks,
5 /* maxChunkFillMB */,
numZones,
- docSizeBytes,
+ docSizeBytesRange,
1000 /* chunkSpacing */,
true /* disableCollectionBalancing*/);
}
diff --git a/jstests/concurrency/fsm_workloads/collection_uuid.js b/jstests/concurrency/fsm_workloads/collection_uuid.js
index 0151d65d1d450..9eb1fed6ea055 100644
--- a/jstests/concurrency/fsm_workloads/collection_uuid.js
+++ b/jstests/concurrency/fsm_workloads/collection_uuid.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Tests running operations with 'collectionUUID' parameter while the collection is being renamed
* concurrently, and makes sure that all operations will succeed eventually when using the correct
@@ -133,7 +131,7 @@ const verifyFailingWithCollectionUUIDMismatch = function(
assert.eq(res.actualCollection, actualCollection);
};
-const testCommand = function(
+export const testCommand = function(
db, namespace, cmdName, cmdObj, data, expectedNonRetryableErrors = []) {
verifyFailingWithCollectionUUIDMismatch(
db, cmdName, cmdObj, data.sameDbCollUUID, sameDbCollName, namespace, data);
@@ -152,7 +150,7 @@ const testCommand = function(
runCommandInLoop(db, namespace, cmdName, cmdObj, data, expectedNonRetryableErrors);
};
-var $config = (function() {
+export const $config = (function() {
const data = {};
const states = (function() {
diff --git a/jstests/concurrency/fsm_workloads/collection_uuid_sharded.js b/jstests/concurrency/fsm_workloads/collection_uuid_sharded.js
index 7fbaba0357f65..3dd02cbb3fdf5 100644
--- a/jstests/concurrency/fsm_workloads/collection_uuid_sharded.js
+++ b/jstests/concurrency/fsm_workloads/collection_uuid_sharded.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Tests running sharding operations with 'collectionUUID' parameter while the sharded collection is
* being renamed concurrenlty.
@@ -11,10 +9,13 @@
* requires_sharding,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/collection_uuid.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig,
+ testCommand
+} from "jstests/concurrency/fsm_workloads/collection_uuid.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
const origStates = Object.keys($config.states);
$config.states = Object.extend({
shardingCommands: function shardingCommands(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/collmod.js b/jstests/concurrency/fsm_workloads/collmod.js
index 4fb7945ae5231..c512ff7a84074 100644
--- a/jstests/concurrency/fsm_workloads/collmod.js
+++ b/jstests/concurrency/fsm_workloads/collmod.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* collmod.js
*
@@ -10,7 +8,7 @@
*
* All threads update the same TTL index on the same collection.
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
numDocs: 1000,
maxTTL: 5000 // max time to live
diff --git a/jstests/concurrency/fsm_workloads/collmod_separate_collections.js b/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
index 615af927c1ba9..de459d97bba3b 100644
--- a/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
+++ b/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* collmod_separate_collections.js
*
@@ -9,10 +7,10 @@
*
* Each thread updates a TTL index on a separate collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/collmod.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/collmod.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.prefix = 'collmod_separate_collections';
$config.data.shardKey = {createdAt: 1};
diff --git a/jstests/concurrency/fsm_workloads/collmod_writeconflict.js b/jstests/concurrency/fsm_workloads/collmod_writeconflict.js
index 32241ef4ab015..6d27d4c3abb97 100644
--- a/jstests/concurrency/fsm_workloads/collmod_writeconflict.js
+++ b/jstests/concurrency/fsm_workloads/collmod_writeconflict.js
@@ -1,14 +1,12 @@
-'use strict';
-
/**
* collmod_writeconflict.js
*
* Ensures collMod successfully handles WriteConflictExceptions.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/collmod.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/collmod.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.prefix = 'collmod_writeconflict';
$config.setup = function setup(db, collName, cluster) {
$super.setup.apply(this, arguments);
diff --git a/jstests/concurrency/fsm_workloads/compact.js b/jstests/concurrency/fsm_workloads/compact.js
index e373cfe6063e0..784a9d24189fe 100644
--- a/jstests/concurrency/fsm_workloads/compact.js
+++ b/jstests/concurrency/fsm_workloads/compact.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* compact.js
*
@@ -11,13 +9,13 @@
* with wiredTiger LSM variants. Bypass this command for the wiredTiger LSM variant
* until a fix is available for WT-2523.
*
- * @tags: [does_not_support_wiredtiger_lsm, requires_compact]
+ * @tags: [does_not_support_wiredtiger_lsm, incompatible_with_macos, requires_compact]
*/
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral
load("jstests/concurrency/fsm_workload_helpers/assert_handle_fail_in_transaction.js");
-var $config = (function() {
+export const $config = (function() {
var data = {
nDocumentsToInsert: 1000,
nIndexes: 3 + 1, // The number of indexes created in createIndexes + 1 for { _id: 1 }
diff --git a/jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js b/jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js
index 0a74749a138de..86a64786a1859 100644
--- a/jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js
+++ b/jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* WiredTiger allows online compaction of its collections so it does not require an exclusive lock.
* This workload is meant to test the behavior of the locking changes done in SERVER-16413. To
@@ -11,7 +9,7 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral
-var $config = (function() {
+export const $config = (function() {
var states = (function() {
function init(db, collName) {
insertDocuments.call(this, db, collName);
@@ -38,7 +36,7 @@ var $config = (function() {
}
function createIndex(db, collName) {
- db[collName].createIndex({x: 1}, {background: true});
+ db[collName].createIndex({x: 1});
}
function dropIndex(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
index e8c84f35fac67..48cc7e0afef39 100644
--- a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
@@ -12,9 +12,7 @@
* @tags: [requires_collstats, requires_capped]
*/
-var $config = (function() {
- load("jstests/libs/feature_flag_util.js");
-
+export const $config = (function() {
// TODO: This workload may fail if an iteration multiplier is specified.
var data = {prefix: 'convert_to_capped_collection'};
@@ -23,10 +21,6 @@ var $config = (function() {
return prefix + '_' + tid;
}
- function isMultiple256(num) {
- return num % 256 === 0;
- }
-
function init(db, collName) {
this.threadCollName = uniqueCollectionName(this.prefix, this.tid);
@@ -42,9 +36,6 @@ var $config = (function() {
assertWhenOwnDB(!db[this.threadCollName].isCapped());
assertWhenOwnDB.commandWorked(db[this.threadCollName].convertToCapped(this.size));
assertWhenOwnDB(db[this.threadCollName].isCapped());
- if (!FeatureFlagUtil.isPresentAndEnabled(db, "CappedCollectionsRelaxedSize")) {
- assertWhenOwnDB(isMultiple256(db[this.threadCollName].stats().maxSize));
- }
}
function convertToCapped(db, collName) {
@@ -54,9 +45,6 @@ var $config = (function() {
assertWhenOwnDB.commandWorked(db[this.threadCollName].convertToCapped(this.size));
assertWhenOwnDB(db[this.threadCollName].isCapped());
- if (!FeatureFlagUtil.isPresentAndEnabled(db, "CappedCollectionsRelaxedSize")) {
- assertWhenOwnDB(isMultiple256(db[this.threadCollName].stats().maxSize));
- }
// only the _id index should remain after running convertToCapped
var indexKeys = db[this.threadCollName].getIndexKeys();
diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
index be4414f642dc6..91e1e3d35318f 100644
--- a/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
+++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
@@ -15,10 +15,12 @@
*
* @tags: [requires_collstats, requires_capped]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/convert_to_capped_collection.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/convert_to_capped_collection.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.convertToCapped = function convertToCapped(db, collName) {
assertWhenOwnDB.commandWorked(db[this.threadCollName].createIndex({i: 1, rand: 1}));
assertWhenOwnDB.eq(2, db[this.threadCollName].getIndexes().length);
diff --git a/jstests/concurrency/fsm_workloads/count.js b/jstests/concurrency/fsm_workloads/count.js
index 517941e97470c..f9f6229ffa709 100644
--- a/jstests/concurrency/fsm_workloads/count.js
+++ b/jstests/concurrency/fsm_workloads/count.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* count.js
*
@@ -13,7 +11,7 @@
*/
load("jstests/libs/fixture_helpers.js"); // For isMongos.
-var $config = (function() {
+export const $config = (function() {
var data = {
randRange: function randRange(low, high) {
// return random number in range [low, high]
diff --git a/jstests/concurrency/fsm_workloads/count_indexed.js b/jstests/concurrency/fsm_workloads/count_indexed.js
index 9887d1a113ece..77aa22ab9db82 100644
--- a/jstests/concurrency/fsm_workloads/count_indexed.js
+++ b/jstests/concurrency/fsm_workloads/count_indexed.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* count_indexed.js
*
@@ -10,10 +8,10 @@
* and then inserts 'modulus * countPerNum' documents. [250, 1000]
* Each thread inserts docs into a unique collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/count.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/count.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.prefix = 'count_fsm';
$config.data.shardKey = {tid: 1, i: 1};
diff --git a/jstests/concurrency/fsm_workloads/count_limit_skip.js b/jstests/concurrency/fsm_workloads/count_limit_skip.js
index 99a2149cc1f91..931e0539164f2 100644
--- a/jstests/concurrency/fsm_workloads/count_limit_skip.js
+++ b/jstests/concurrency/fsm_workloads/count_limit_skip.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* count_limit_skip.js
*
@@ -10,11 +8,11 @@
* and then inserts 'modulus * countPerNum' documents. [250, 1000]
* Each thread inserts docs into a unique collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/count.js'); // for $config
-load("jstests/libs/fixture_helpers.js"); // For isMongos.
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/count.js";
+load("jstests/libs/fixture_helpers.js"); // For isMongos.
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.prefix = 'count_fsm_q_l_s';
$config.data.getCount = function getCount(db, predicate) {
diff --git a/jstests/concurrency/fsm_workloads/count_odd.js b/jstests/concurrency/fsm_workloads/count_odd.js
index 666f65fe66c51..abd99ca4dd5a2 100644
--- a/jstests/concurrency/fsm_workloads/count_odd.js
+++ b/jstests/concurrency/fsm_workloads/count_odd.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* count_odd.js
*
@@ -10,7 +8,7 @@
* ]
*
*/
-var $config = (function() {
+export const $config = (function() {
var states = (function() {
function init(db, collName) {
}
diff --git a/jstests/concurrency/fsm_workloads/create_and_drop_collection.js b/jstests/concurrency/fsm_workloads/create_and_drop_collection.js
index fd23d5e153ec3..366fa95efc72a 100644
--- a/jstests/concurrency/fsm_workloads/create_and_drop_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_and_drop_collection.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* create_and_drop_collection.js
*
@@ -7,7 +5,7 @@
*
* @tags: [requires_sharding]
*/
-var $config = (function() {
+export const $config = (function() {
var data = {};
var states = (function() {
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection.js b/jstests/concurrency/fsm_workloads/create_capped_collection.js
index 942af8a823100..59af04e65eb0b 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* create_capped_collection.js
*
@@ -9,7 +7,7 @@
* @tags: [requires_capped]
*/
-var $config = (function() {
+export const $config = (function() {
// Returns a document of the form { _id: ObjectId(...), field: '...' }
// with specified BSON size.
function makeDocWithSize(targetSize) {
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
index 3f435b9eb438a..f4dc6a11a29cb 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* create_capped_collection_maxdocs.js
*
@@ -14,10 +12,12 @@
*
* @tags: [does_not_support_stepdowns, requires_capped]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/create_capped_collection.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/create_capped_collection.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
$config.data.prefix = 'create_capped_collection_maxdocs';
diff --git a/jstests/concurrency/fsm_workloads/create_collection.js b/jstests/concurrency/fsm_workloads/create_collection.js
index 61db8d1082460..2886c29fc6a03 100644
--- a/jstests/concurrency/fsm_workloads/create_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_collection.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* create_collection.js
*
* Repeatedly creates a collection.
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
diff --git a/jstests/concurrency/fsm_workloads/create_collection_and_view.js b/jstests/concurrency/fsm_workloads/create_collection_and_view.js
index 29bcddee65f46..083fa6d0e185d 100644
--- a/jstests/concurrency/fsm_workloads/create_collection_and_view.js
+++ b/jstests/concurrency/fsm_workloads/create_collection_and_view.js
@@ -7,7 +7,7 @@
* @tags: [catches_command_failures, antithesis_incompatible]
*/
-var $config = (function() {
+export const $config = (function() {
const prefix = "create_collection_and_view";
// We'll use a single unique collection for all operations executed by this test. The
diff --git a/jstests/concurrency/fsm_workloads/create_database.js b/jstests/concurrency/fsm_workloads/create_database.js
index 332303f7100bd..0e4a8acf171c5 100644
--- a/jstests/concurrency/fsm_workloads/create_database.js
+++ b/jstests/concurrency/fsm_workloads/create_database.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* create_database.js
*
@@ -14,7 +12,7 @@
load("jstests/concurrency/fsm_workload_helpers/assert_handle_fail_in_transaction.js");
-var $config = (function() {
+export const $config = (function() {
let data = {
checkCommandResult: function checkCommandResult(mayFailWithDatabaseDifferCase, res) {
if (mayFailWithDatabaseDifferCase)
diff --git a/jstests/concurrency/fsm_workloads/create_index_background.js b/jstests/concurrency/fsm_workloads/create_index_background.js
index 2fd28949bc3ec..67e7f72288a36 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* create_index_background.js
*
@@ -15,7 +13,7 @@
*/
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
-var $config = (function() {
+export const $config = (function() {
var data = {
nDocumentsToSeed: 1000,
nDocumentsToCreate: 200,
@@ -68,7 +66,7 @@ var $config = (function() {
return coll.find({crud: {$exists: true}}).itcount() > 0;
}, 'No documents with "crud" field have been inserted or updated', 60 * 1000);
- let createOptions = {background: true};
+ let createOptions = {};
let filter = this.getPartialFilterExpression();
if (filter !== undefined) {
createOptions['partialFilterExpression'] = filter;
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js b/jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js
index f352ece062607..e8e78d6cd0ea6 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Executes the create_index_background.js workload, but with a partial filter expression on the
* indexed field.
@@ -9,10 +7,10 @@
* creates_background_indexes
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // For extendWorkload.
-load('jstests/concurrency/fsm_workloads/create_index_background.js'); // For $config.
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/create_index_background.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
const fieldName = "isIndexed";
$config.data.getIndexSpec = function() {
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_unique.js b/jstests/concurrency/fsm_workloads/create_index_background_unique.js
index 9f43db15b8843..6cd31867b3f31 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_unique.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_unique.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* create_index_background_unique.js
*
@@ -15,7 +13,8 @@
* ]
*/
load("jstests/concurrency/fsm_workload_helpers/assert_handle_fail_in_transaction.js");
-var $config = (function() {
+
+export const $config = (function() {
var data = {
prefix: "create_index_background_unique_",
numDocsToLoad: 5000,
@@ -40,7 +39,7 @@ var $config = (function() {
const res = db.runCommand({
createIndexes: this.getCollectionNameForThread(this.tid),
- indexes: [{key: {x: 1}, name: "x_1", unique: true, background: true}]
+ indexes: [{key: {x: 1}, name: "x_1", unique: true}]
});
// Multi-statement Transactions can fail with SnapshotUnavailable if there are
// pending catalog changes as of the transaction start (see SERVER-43018).
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js b/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js
index c1f0da117f25c..2c0677128a4cc 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* create_index_background_unique_capped.js
*
@@ -8,10 +6,12 @@
* @tags: [creates_background_indexes, requires_capped]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/create_index_background_unique.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/create_index_background_unique.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.prefix = "create_index_background_unique_capped_";
$config.data.getCollectionOptions = function() {
// We create an 8MB capped collection, as it will comfortably fit the collection data
diff --git a/jstests/concurrency/fsm_workloads/create_index_background_wildcard.js b/jstests/concurrency/fsm_workloads/create_index_background_wildcard.js
index f3d5f972cb1cb..80e4f31c33c68 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background_wildcard.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background_wildcard.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Executes the create_index_background.js workload, but with a wildcard index.
*
@@ -8,10 +6,10 @@
* creates_background_indexes
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // For extendWorkload.
-load('jstests/concurrency/fsm_workloads/create_index_background.js'); // For $config.
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/create_index_background.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.getIndexSpec = function() {
return {"$**": 1};
};
diff --git a/jstests/concurrency/fsm_workloads/create_timeseries_collection.js b/jstests/concurrency/fsm_workloads/create_timeseries_collection.js
index c85d9946feb4d..8a2d32ceb42e7 100644
--- a/jstests/concurrency/fsm_workloads/create_timeseries_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_timeseries_collection.js
@@ -8,7 +8,7 @@
* does_not_support_stepdowns,
* ]
*/
-var $config = (function() {
+export const $config = (function() {
var data = {prefix: "create_timeseries_collection"};
var states = (function() {
diff --git a/jstests/concurrency/fsm_workloads/dbhash_test.js b/jstests/concurrency/fsm_workloads/dbhash_test.js
new file mode 100644
index 0000000000000..a493f5569f55e
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/dbhash_test.js
@@ -0,0 +1,87 @@
+/**
+ * Tests dbHash collisions in WT with full validation.
+ * dbHash should not experience races on data, or EBUSY errors in the storage engine.
+ * @tags: [
+ * requires_wiredtiger,
+ * requires_replication,
+ * uses_full_validation,
+ * ]
+ */
+
+"use strict";
+
+load("jstests/concurrency/fsm_workload_helpers/state_transition_utils.js");
+
+export const dbPrefix = jsTestName() + '_db_';
+
+export const $config = (function() {
+ let states = {
+ init: function(db, collName) {
+ jsTestLog("init tid: " + this.tid);
+ },
+ dbHash: function(db, collName) {
+ jsTestLog("dbHash: " + db + "." + collName + " tid: " + this.tid);
+ let opTime =
+ assert
+ .commandWorked(db.runCommand(
+ {insert: collName, documents: [{x: 1}], writeConcern: {w: "majority"}}))
+ .operationTime;
+ jsTestLog("dbHash opTime:" + tojson(opTime));
+ jsTestLog("dbHash begin opTime:" + tojson(opTime));
+ let dbHashRes = assert.commandWorked(db.collName.runCommand(
+ {dbHash: 1, $_internalReadAtClusterTime: Timestamp(opTime['t'], opTime['i'])}));
+ jsTestLog("dbHash done" + dbHashRes.timeMillis);
+ },
+ fullValidation: function(db, collName) {
+ jsTestLog("fullValidation: " + db + "." + collName + " tid: " + this.tid);
+ let res = assert.commandWorked(db.collName.validate({full: true}));
+ jsTestLog("fullValidation done: " + db + "." + collName + " " + this.tid);
+ assert(res.valid);
+ },
+ };
+
+ const setSyncDelay = function(db, delay) {
+ jsTestLog("setSyncDelay: ", delay);
+ assert.commandWorked(db.adminCommand({setParameter: 1, syncdelay: delay}));
+ };
+
+ const setup = function(db, collName) {
+ jsTestLog("Creating:" + db + "." + collName + " tid: " + this.tid);
+ let x = 'x'.repeat(20 * 1024); // 20KB
+
+ let bulk = db.collName.initializeOrderedBulkOp();
+ for (let i = 0; i < 80; i++) {
+ bulk.insert({_id: x + i.toString()});
+ }
+ assertAlways.commandWorked(bulk.execute());
+
+ // Avoid filling the cache by flushing on a shorter interval
+ setSyncDelay(db, 10);
+
+ jsTestLog("Creating done:" + db + "." + collName);
+ };
+
+ const teardown = function(db, collName) {
+ setSyncDelay(db, 60);
+ };
+
+ const standardTransition = {
+ dbHash: 0.5,
+ fullValidation: 0.5,
+ };
+
+ const transitions = {
+ init: standardTransition,
+ dbHash: {dbHash: 0.8, fullValidation: 0.2},
+ fullValidation: {dbHash: 0.2, fullValidation: 0.8},
+ };
+
+ return {
+ threadCount: 5,
+ iterations: 2,
+ setup: setup,
+ states: states,
+ teardown: teardown,
+ transitions: transitions,
+ };
+})();
diff --git a/jstests/concurrency/fsm_workloads/distinct.js b/jstests/concurrency/fsm_workloads/distinct.js
index 2c56372c7d7b5..aaf5ca3f2ae1a 100644
--- a/jstests/concurrency/fsm_workloads/distinct.js
+++ b/jstests/concurrency/fsm_workloads/distinct.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* distinct.js
*
@@ -8,7 +6,7 @@
* Each thread operates on a separate collection.
*/
-var $config = (function() {
+export const $config = (function() {
var data = {numDocs: 1000, prefix: 'distinct_fsm', shardKey: {i: 1}};
var states = (function() {
diff --git a/jstests/concurrency/fsm_workloads/distinct_noindex.js b/jstests/concurrency/fsm_workloads/distinct_noindex.js
index b2f2f69bcdd0d..e741abda56461 100644
--- a/jstests/concurrency/fsm_workloads/distinct_noindex.js
+++ b/jstests/concurrency/fsm_workloads/distinct_noindex.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* distinct_noindex.js
*
@@ -7,7 +5,7 @@
* The field contains non-unique values.
* Each thread operates on the same collection.
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
randRange: function randRange(low, high) {
assertAlways.gt(high, low);
diff --git a/jstests/concurrency/fsm_workloads/distinct_projection.js b/jstests/concurrency/fsm_workloads/distinct_projection.js
index cf287cdb2106f..633fb5c099b7f 100644
--- a/jstests/concurrency/fsm_workloads/distinct_projection.js
+++ b/jstests/concurrency/fsm_workloads/distinct_projection.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* distinct_projection.js
*
@@ -7,10 +5,10 @@
* The indexed field contains unique values.
* Each thread operates on a separate collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/distinct.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/distinct.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.prefix = 'distinct_projection_fsm';
$config.states.distinct = function distinct(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/drop_collection.js b/jstests/concurrency/fsm_workloads/drop_collection.js
index 950c9f3d5ed28..b6a5e3c0a7495 100644
--- a/jstests/concurrency/fsm_workloads/drop_collection.js
+++ b/jstests/concurrency/fsm_workloads/drop_collection.js
@@ -1,11 +1,9 @@
-'use strict';
-
/**
* drop_collection.js
*
* Repeatedly creates and drops a collection.
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
@@ -33,13 +31,5 @@ var $config = (function() {
var transitions = {init: {createAndDrop: 1}, createAndDrop: {createAndDrop: 1}};
- // This test performs dropCollection concurrently from many threads, and dropCollection on a
- // sharded cluster takes a distributed lock. Since a distributed lock is acquired by repeatedly
- // attempting to grab the lock every half second for 20 seconds (a max of 40 attempts), it's
- // possible that some thread will be starved by the other threads and fail to grab the lock
- // after 40 attempts. To reduce the likelihood of this, we choose threadCount and iterations so
- // that threadCount * iterations < 40.
- // The threadCount and iterations can be increased once PM-697 ("Remove all usages of
- // distributed lock") is complete.
- return {threadCount: 5, iterations: 5, data: data, states: states, transitions: transitions};
+ return {threadCount: 10, iterations: 10, data: data, states: states, transitions: transitions};
})();
diff --git a/jstests/concurrency/fsm_workloads/drop_collection_sharded.js b/jstests/concurrency/fsm_workloads/drop_collection_sharded.js
index 4f840353b653d..2cc36253521ec 100644
--- a/jstests/concurrency/fsm_workloads/drop_collection_sharded.js
+++ b/jstests/concurrency/fsm_workloads/drop_collection_sharded.js
@@ -7,8 +7,6 @@
* requires_sharding,
* ]
*/
-'use strict';
-
const dbPrefix = jsTestName() + '_DB_';
const dbCount = 2;
const collPrefix = 'sharded_coll_';
@@ -22,7 +20,7 @@ function getRandomCollection(db) {
return getRandomDb(db)[collPrefix + Random.randInt(collCount)];
}
-var $config = (function() {
+export const $config = (function() {
var setup = function(db, collName, cluster) {
// Initialize databases
for (var i = 0; i < dbCount; i++) {
diff --git a/jstests/concurrency/fsm_workloads/drop_database.js b/jstests/concurrency/fsm_workloads/drop_database.js
index dcf72f8ec9611..8e7df183e9e19 100644
--- a/jstests/concurrency/fsm_workloads/drop_database.js
+++ b/jstests/concurrency/fsm_workloads/drop_database.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* drop_database.js
*
@@ -8,7 +6,7 @@
* @tags: [
* ]
*/
-var $config = (function() {
+export const $config = (function() {
var states = {
init: function init(db, collName) {
this.uniqueDBName = db.getName() + 'drop_database' + this.tid;
diff --git a/jstests/concurrency/fsm_workloads/drop_database_sharded.js b/jstests/concurrency/fsm_workloads/drop_database_sharded.js
index ece356a23e0b2..41cec6cde8a02 100644
--- a/jstests/concurrency/fsm_workloads/drop_database_sharded.js
+++ b/jstests/concurrency/fsm_workloads/drop_database_sharded.js
@@ -5,8 +5,6 @@
* requires_sharding,
* ]
*/
-'use strict';
-
const dbPrefix = jsTestName() + '_DB_';
const dbCount = 2;
const collPrefix = 'sharded_coll_';
@@ -47,7 +45,7 @@ function getDropDbStateResults(db) {
return {ok: countOK, notOK: countNotOK};
}
-var $config = (function() {
+export const $config = (function() {
var states = (function() {
function init(db, collName) {
}
diff --git a/jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js b/jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js
index 6b604abdc81c2..99bf723dc0f5c 100644
--- a/jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js
+++ b/jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Repeatedly creates and drops a database in concurrency with FCV changes
*
@@ -10,10 +8,10 @@
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/drop_database_sharded.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/drop_database_sharded.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.setFCV = function(db, collName) {
const fcvValues = [lastLTSFCV, lastContinuousFCV, latestFCV];
const targetFCV = fcvValues[Random.randInt(3)];
diff --git a/jstests/concurrency/fsm_workloads/drop_index_during_lookup.js b/jstests/concurrency/fsm_workloads/drop_index_during_lookup.js
index 05b15c755ffbd..3e679be8869b8 100644
--- a/jstests/concurrency/fsm_workloads/drop_index_during_lookup.js
+++ b/jstests/concurrency/fsm_workloads/drop_index_during_lookup.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* drop_index_during_lookup.js
*
* Sets up a situation where index join strategy will be chosen for $lookup while while running
* concurrent dropIndexes against the index chosen for the foreign side.
*/
-var $config = (function() {
+export const $config = (function() {
let data = {
collName: 'localColl',
foreignCollName: 'foreignColl',
diff --git a/jstests/concurrency/fsm_workloads/drop_index_during_replan.js b/jstests/concurrency/fsm_workloads/drop_index_during_replan.js
index f4f68e0ffb36f..36b9406308b52 100644
--- a/jstests/concurrency/fsm_workloads/drop_index_during_replan.js
+++ b/jstests/concurrency/fsm_workloads/drop_index_during_replan.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* drop_index_during_replan.js
*
@@ -8,7 +6,7 @@
* time, other threads may be dropping {b: 1}. This tests that the replanning process is robust to
* index drops.
*/
-var $config = (function() {
+export const $config = (function() {
let data = {
collName: 'drop_index_during_replan',
indexSpecs: [
diff --git a/jstests/concurrency/fsm_workloads/drop_sharded_timeseries_collection.js b/jstests/concurrency/fsm_workloads/drop_sharded_timeseries_collection.js
index d5b17b67b0740..5883d35b789b5 100644
--- a/jstests/concurrency/fsm_workloads/drop_sharded_timeseries_collection.js
+++ b/jstests/concurrency/fsm_workloads/drop_sharded_timeseries_collection.js
@@ -8,8 +8,6 @@
* requires_fcv_51,
* ]
*/
-'use strict';
-
const dbPrefix = 'fsm_db_for_sharded_timeseries_collection_';
const dbCount = 2;
const collPrefix = 'sharded_timeseries_collection_';
@@ -17,8 +15,6 @@ const collCount = 2;
const timeField = 'time';
const metaField = 'hostId';
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers.
-
function getRandomDb(db) {
return db.getSiblingDB(dbPrefix + Random.randInt(dbCount));
}
@@ -27,23 +23,8 @@ function getRandomTimeseriesView(db) {
return getRandomDb(db)[collPrefix + Random.randInt(collCount)];
}
-var $config = (function() {
+export const $config = (function() {
const setup = function(db, collName, cluster) {
- // Check that necessary feature flags are enabled on each of the mongods.
- let isEnabled = true;
- cluster.executeOnMongodNodes(function(db) {
- if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(db)) {
- isEnabled = false;
- }
- });
- this.isShardedTimeseriesEnabled = isEnabled;
-
- if (!this.isShardedTimeseriesEnabled) {
- jsTestLog(
- "Feature flags for sharded time-series collections are not enabled. This test will do nothing.");
- return;
- }
-
// Enable sharding for the test databases.
for (var i = 0; i < dbCount; i++) {
const dbName = dbPrefix + i;
@@ -54,10 +35,6 @@ var $config = (function() {
const states = {
init: function(db, collName) {},
create: function(db, collName) {
- if (!this.isShardedTimeseriesEnabled) {
- return;
- }
-
const coll = getRandomTimeseriesView(db);
jsTestLog("Executing create state on: " + coll.getFullName());
assertAlways.commandWorked(db.adminCommand({
@@ -67,10 +44,6 @@ var $config = (function() {
}));
},
dropView: function(db, collName) {
- if (!this.isShardedTimeseriesEnabled) {
- return;
- }
-
const coll = getRandomTimeseriesView(db);
jsTestLog("Executing dropView state on: " + coll.getFullName());
assertAlways.commandWorked(coll.getDB().runCommand({drop: coll.getName()}));
diff --git a/jstests/concurrency/fsm_workloads/explain.js b/jstests/concurrency/fsm_workloads/explain.js
index 135e7720bee10..b666bacfbaceb 100644
--- a/jstests/concurrency/fsm_workloads/explain.js
+++ b/jstests/concurrency/fsm_workloads/explain.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* explain.js
*
@@ -7,9 +5,9 @@
*
*/
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod
-load('jstests/libs/analyze_plan.js');
+import {getWinningPlan} from "jstests/libs/analyze_plan.js";
-var $config = (function() {
+export const $config = (function() {
var data = {
collNotExist: 'donotexist__',
nInserted: 0,
diff --git a/jstests/concurrency/fsm_workloads/explain_aggregate.js b/jstests/concurrency/fsm_workloads/explain_aggregate.js
index b59524c2abb0a..44f2a4762df35 100644
--- a/jstests/concurrency/fsm_workloads/explain_aggregate.js
+++ b/jstests/concurrency/fsm_workloads/explain_aggregate.js
@@ -1,15 +1,13 @@
-'use strict';
-
/**
* explain_aggregate.js
*
* Runs explain() and aggregate() on a collection.
*
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/explain.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
function assertCursorStages(num, obj) {
assertAlways(obj.stages, tojson(obj));
assertAlways.eq(num, obj.stages.length, tojson(obj.stages));
diff --git a/jstests/concurrency/fsm_workloads/explain_count.js b/jstests/concurrency/fsm_workloads/explain_count.js
index fe5a71e3dfbb1..5eb5ac98cced6 100644
--- a/jstests/concurrency/fsm_workloads/explain_count.js
+++ b/jstests/concurrency/fsm_workloads/explain_count.js
@@ -1,16 +1,14 @@
-'use strict';
-
/**
* explain_count.js
*
* Runs explain() and count() on a collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/explain.js";
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
-load('jstests/libs/analyze_plan.js'); // for planHasStage
+import {planHasStage} from "jstests/libs/analyze_plan.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
function assertNCounted(num, obj, db) {
var stage = obj.executionStats.executionStages;
// get sharded stage(s) if counting on mongos
diff --git a/jstests/concurrency/fsm_workloads/explain_distinct.js b/jstests/concurrency/fsm_workloads/explain_distinct.js
index 2786a7f80ecd0..6d8a66c464c64 100644
--- a/jstests/concurrency/fsm_workloads/explain_distinct.js
+++ b/jstests/concurrency/fsm_workloads/explain_distinct.js
@@ -1,15 +1,13 @@
-'use strict';
-
/**
* explain_distinct.js
*
* Runs explain() and distinct() on a collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-load('jstests/libs/analyze_plan.js'); // for planHasStage
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/explain.js";
+import {planHasStage} from "jstests/libs/analyze_plan.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states = Object.extend({
explainBasicDistinct: function(db, collName) {
var res = db[collName].explain().distinct('i');
diff --git a/jstests/concurrency/fsm_workloads/explain_find.js b/jstests/concurrency/fsm_workloads/explain_find.js
index 65378c9ac9009..dc8ae4e921c60 100644
--- a/jstests/concurrency/fsm_workloads/explain_find.js
+++ b/jstests/concurrency/fsm_workloads/explain_find.js
@@ -1,16 +1,14 @@
-'use strict';
-
/**
* explain_find.js
*
* Runs explain() and find() on a collection.
*
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-load('jstests/libs/analyze_plan.js'); // for planHasStage and isIxscan
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/explain.js";
+import {isIxscan, planHasStage} from "jstests/libs/analyze_plan.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states = Object.extend({
explainLimit: function explainLimit(db, collName) {
var res = db[collName].find().limit(3).explain();
diff --git a/jstests/concurrency/fsm_workloads/explain_remove.js b/jstests/concurrency/fsm_workloads/explain_remove.js
index fe47554a83fa6..6465c14430963 100644
--- a/jstests/concurrency/fsm_workloads/explain_remove.js
+++ b/jstests/concurrency/fsm_workloads/explain_remove.js
@@ -1,14 +1,12 @@
-'use strict';
-
/**
* explain_remove.js
*
* Runs explain() and remove() on a collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/explain.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states = Object.extend({
explainSingleRemove: function explainSingleRemove(db, collName) {
var res = db[collName]
diff --git a/jstests/concurrency/fsm_workloads/explain_update.js b/jstests/concurrency/fsm_workloads/explain_update.js
index 5176f8d6c6267..91ff4894d79ff 100644
--- a/jstests/concurrency/fsm_workloads/explain_update.js
+++ b/jstests/concurrency/fsm_workloads/explain_update.js
@@ -1,15 +1,13 @@
-'use strict';
-
/**
* explain_update.js
*
* Runs explain() and update() on a collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/explain.js";
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states = Object.extend({
explainBasicUpdate: function explainBasicUpdate(db, collName) {
var res =
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_flip_location.js b/jstests/concurrency/fsm_workloads/findAndModify_flip_location.js
deleted file mode 100644
index cfd2871737f70..0000000000000
--- a/jstests/concurrency/fsm_workloads/findAndModify_flip_location.js
+++ /dev/null
@@ -1,163 +0,0 @@
-'use strict';
-
-/**
- * Each thread uses its own LSID and performs `findAndModify`s with retries on documents while the
- * `storeFindAndModifyImagesInSideCollection` server parameter gets flipped.
- *
- * @tags: [requires_replication, requires_non_retryable_commands, uses_transactions];
- */
-var $config = (function() {
- var data = {
- numDocs: 100,
- };
-
- var states = (function() {
- function init(db, collName) {
- this._lastTxnId = 0;
- this._lsid = UUID();
- }
-
- function findAndModifyUpsert(db, collName) {
- // `auto_retry_transactions` is not compatible with explicitly testing retryable writes.
- // This avoids issues regarding the multi_stmt tasks.
- fsm.forceRunningOutsideTransaction(this);
-
- this._lastTxnId += 1;
- this._lastCmd = {
- findandmodify: collName,
- lsid: {id: this._lsid},
- txnNumber: NumberLong(this._lastTxnId),
- stmtId: NumberInt(1),
- query: {_id: Math.round(Math.random() * this.numDocs)},
- new: Math.random() > 0.5,
- upsert: true,
- update: {$inc: {counter: 1}},
- };
- this._lastResponse = assert.commandWorked(db.runCommand(this._lastCmd));
- }
-
- function findAndModifyUpdate(db, collName) {
- // `auto_retry_transactions` is not compatible with explicitly testing retryable writes.
- // This avoids issues regarding the multi_stmt tasks.
- fsm.forceRunningOutsideTransaction(this);
-
- this._lastTxnId += 1;
- this._lastCmd = {
- findandmodify: collName,
- lsid: {id: this._lsid},
- txnNumber: NumberLong(this._lastTxnId),
- stmtId: NumberInt(1),
- query: {_id: Math.round(Math.random() * this.numDocs)},
- new: Math.random() > 0.5,
- upsert: false,
- update: {$inc: {counter: 1}},
- };
- this._lastResponse = assert.commandWorked(db.runCommand(this._lastCmd));
- }
-
- function findAndModifyDelete(db, collName) {
- // `auto_retry_transactions` is not compatible with explicitly testing retryable writes.
- // This avoids issues regarding the multi_stmt tasks.
- fsm.forceRunningOutsideTransaction(this);
-
- this._lastTxnId += 1;
- this._lastCmd = {
- findandmodify: collName,
- lsid: {id: this._lsid},
- txnNumber: NumberLong(this._lastTxnId),
- stmtId: NumberInt(1),
- query: {_id: Math.round(Math.random() * this.numDocs)},
- // Deletes may not ask for the postImage
- new: false,
- remove: true,
- };
- this._lastResponse = assert.commandWorked(db.runCommand(this._lastCmd));
- }
-
- function findAndModifyRetry(db, collName) {
- // `auto_retry_transactions` is not compatible with explicitly testing retryable writes.
- // This avoids issues regarding the multi_stmt tasks.
- fsm.forceRunningOutsideTransaction(this);
-
- assert(this._lastCmd);
- assert(this._lastResponse);
-
- let response = assert.commandWorked(db.runCommand(this._lastCmd));
- let debugMsg = {
- "TID": this.tid,
- "LastCmd": this._lastCmd,
- "LastResponse": this._lastResponse,
- "Response": response
- };
- assert.eq(this._lastResponse.hasOwnProperty("lastErrorObject"),
- response.hasOwnProperty("lastErrorObject"),
- debugMsg);
- if (response.hasOwnProperty("lastErrorObject") &&
- // If the original command affected `n=1` document, all retries must return
- // identical results. If an original command receives `n=0`, then a retry may find a
- // match and return `n=1`. Only compare `lastErrorObject` and `value` when retries
- // must be identical.
- this._lastResponse["lastErrorObject"].n === 1) {
- assert.eq(
- this._lastResponse["lastErrorObject"], response["lastErrorObject"], debugMsg);
- }
- assert.eq(this._lastResponse.hasOwnProperty("value"),
- response.hasOwnProperty("value"),
- debugMsg);
- if (response.hasOwnProperty("value") && this._lastResponse["lastErrorObject"].n === 1) {
- assert.eq(this._lastResponse["value"], response["value"], debugMsg);
- }
-
- // Have all workers participate in creating some chaos.
- assert.commandWorked(db.adminCommand({
- setParameter: 1,
- storeFindAndModifyImagesInSideCollection: Math.random() > 0.5,
- }));
- }
-
- return {
- init: init,
- findAndModifyUpsert: findAndModifyUpsert,
- findAndModifyUpdate: findAndModifyUpdate,
- findAndModifyDelete: findAndModifyDelete,
- findAndModifyRetry: findAndModifyRetry
- };
- })();
-
- var transitions = {
- init: {findAndModifyUpsert: 1.0},
- findAndModifyUpsert: {
- findAndModifyRetry: 3.0,
- findAndModifyUpsert: 1.0,
- findAndModifyUpdate: 1.0,
- findAndModifyDelete: 1.0
- },
- findAndModifyUpdate: {
- findAndModifyRetry: 3.0,
- findAndModifyUpsert: 1.0,
- findAndModifyUpdate: 1.0,
- findAndModifyDelete: 1.0
- },
- findAndModifyDelete: {
- findAndModifyRetry: 3.0,
- findAndModifyUpsert: 1.0,
- findAndModifyUpdate: 1.0,
- findAndModifyDelete: 1.0
- },
- findAndModifyRetry: {
- findAndModifyRetry: 1.0,
- findAndModifyUpsert: 1.0,
- findAndModifyUpdate: 1.0,
- findAndModifyDelete: 1.0
- },
- };
-
- return {
- threadCount: 10,
- iterations: 100,
- data: data,
- states: states,
- transitions: transitions,
- setup: function() {},
- };
-})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_inc.js b/jstests/concurrency/fsm_workloads/findAndModify_inc.js
index 4b823bfb9c335..489694f3b165e 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_inc.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_inc.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* findAndModify_inc.js
*
@@ -14,7 +12,7 @@
// For isMongod.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config = (function() {
+export const $config = (function() {
let data = {
getUpdateArgument: function(fieldName) {
return {$inc: {[fieldName]: 1}};
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_inc_pipeline.js b/jstests/concurrency/fsm_workloads/findAndModify_inc_pipeline.js
index c275ac35021e1..05423a687baa4 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_inc_pipeline.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_inc_pipeline.js
@@ -1,15 +1,13 @@
-'use strict';
-
/**
* findAndModify_inc_pipeline.js
*
* This is the same workload as findAndModify_inc.js, but substitutes a $mod-style update with a
* pipeline-style one.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_inc.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/findAndModify_inc.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.getUpdateArgument = function getUpdateArgument(fieldName) {
return [{$addFields: {[fieldName]: {$add: ["$" + fieldName, 1]}}}];
};
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
index 4faefca2fd11f..e1cdfd33173a4 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* findAndModify_mixed_queue_unindexed.js
*
@@ -15,13 +13,13 @@
*
* This workload was designed to reproduce SERVER-21434.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config
-
-// For isMongod.
-load('jstests/concurrency/fsm_workload_helpers/server_types.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js";
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod.
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Use the workload name as the database name, since the workload name is assumed to be
// unique.
$config.data.uniqueDBName = 'findAndModify_mixed_queue_unindexed';
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove.js b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
index 3c20a7ae61f1b..d1a2902ec8264 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* findAndModify_remove.js
*
* Each thread repeatedly inserts a document, and subsequently performs
* the findAndModify command to remove it.
*/
-var $config = (function() {
+export const $config = (function() {
var data = {shardKey: {tid: 1}};
var states = (function() {
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
index 68796a2000a4c..25fcf95421612 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* findAndModify_remove_queue.js
*
@@ -14,7 +12,7 @@
// For isMongod.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as the database name, since the workload name is assumed to be
// unique.
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
index 981568904ad42..5a86dd399250d 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* findAndModify_remove_queue_unindexed.js
*
@@ -13,10 +11,12 @@
*
* This workload was designed to reproduce SERVER-21434.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Use the workload name as the database name, since the workload
// name is assumed to be unique.
$config.data.uniqueDBName = 'findAndModify_remove_queue_unindexed';
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update.js b/jstests/concurrency/fsm_workloads/findAndModify_update.js
index e97281b5a8a94..7e7f72bddeaab 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* findAndModify_update.js
*
@@ -8,7 +6,7 @@
* selected based on 'query' and 'sort' specifications, and updated
* using either the $min or $max operator.
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
numDocsPerThread: 3, // >1 for 'sort' to be meaningful
shardKey: {tid: 1}
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
index aba96be9648c4..7dec62d6bcf5c 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* findAndModify_update_collscan.js
*
@@ -10,10 +8,10 @@
*
* Attempts to force a collection scan by not creating an index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_update.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/findAndModify_update.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Do not create the { tid: 1, value: 1 } index so that a
// collection
// scan is performed for the query and sort operations.
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
index 0eebb29cb5904..e068fa8c6bd3b 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* findAndModify_update_grow.js
*
@@ -10,7 +8,7 @@
*/
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod
-var $config = (function() {
+export const $config = (function() {
var data = {
shardKey: {tid: 1},
};
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
index 6937756ed0f76..398fb5a912063 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* findAndModify_update_queue.js
*
@@ -11,13 +9,13 @@
* This workload was designed to reproduce an issue similar to SERVER-18304 for update operations
* using the findAndModify command where the old version of the document is returned.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config
-
-// For isMongod.
-load('jstests/concurrency/fsm_workload_helpers/server_types.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js";
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod.
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Use the workload name as the database name, since the workload name is assumed to be
// unique.
$config.data.uniqueDBName = 'findAndModify_update_queue';
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
index c70b80058f0d2..d7384994104f4 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* findAndModify_update_queue_unindexed.js
*
@@ -13,10 +11,12 @@
*
* This workload was designed to reproduce SERVER-21434.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_update_queue.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/findAndModify_update_queue.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Use the workload name as the database name, since the workload
// name is assumed to be unique.
$config.data.uniqueDBName = 'findAndModify_update_queue_unindexed';
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
index e9b06e7afb72e..b12b83380b109 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* findAndModify_upsert.js
*
@@ -8,7 +6,7 @@
* created) based on the 'query' specification, and updated using the
* $push operator.
*/
-var $config = (function() {
+export const $config = (function() {
var data = {sort: false, shardKey: {tid: 1}};
var states = (function() {
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
index 0cbfbd3ab2145..fb6f5ced90df8 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* findAndModify_upsert_collscan.js
*
@@ -10,10 +8,10 @@
*
* Forces 'sort' to perform a collection scan by using $natural.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_upsert.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/findAndModify_upsert.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.sort = {$natural: 1};
return $config;
diff --git a/jstests/concurrency/fsm_workloads/find_cmd_with_indexes_timeseries.js b/jstests/concurrency/fsm_workloads/find_cmd_with_indexes_timeseries.js
index 901b45520001e..b48bb62c9e5f7 100644
--- a/jstests/concurrency/fsm_workloads/find_cmd_with_indexes_timeseries.js
+++ b/jstests/concurrency/fsm_workloads/find_cmd_with_indexes_timeseries.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* This test verifies that neither index creation nor find cmd operation on a time-series collection
* leads to incorrect data results.
@@ -17,7 +15,7 @@
* ]
*/
-var $config = (function() {
+export const $config = (function() {
// Hardcode time-series collection information so that the threads can all obtain it and run on
// the same fields and indexes.
const timeFieldName = "tm";
diff --git a/jstests/concurrency/fsm_workloads/find_flip_sbe_enabled.js b/jstests/concurrency/fsm_workloads/find_flip_sbe_enabled.js
index 32c9352a62510..dc1c90061c03d 100644
--- a/jstests/concurrency/fsm_workloads/find_flip_sbe_enabled.js
+++ b/jstests/concurrency/fsm_workloads/find_flip_sbe_enabled.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Sets the internalQueryFrameworkControl flag to "forceClassicEngine" and "trySbeEngine", and
* asserts that find queries using the plan cache produce the correct results.
@@ -11,7 +9,7 @@
* ]
*/
-var $config = (function() {
+export const $config = (function() {
let data = {originalParamValues: {}};
function getCollectionName(collName) {
diff --git a/jstests/concurrency/fsm_workloads/global_index.js b/jstests/concurrency/fsm_workloads/global_index.js
index e41a5f77a99c4..e5f0b6ecd92fb 100644
--- a/jstests/concurrency/fsm_workloads/global_index.js
+++ b/jstests/concurrency/fsm_workloads/global_index.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Perform global index CRUD operations, with create and drop commands.
*
@@ -9,7 +7,7 @@
* requires_replication
* ]
*/
-var $config = (function() {
+export const $config = (function() {
const data = {
uuidArr: ["47b5c083-8d60-4920-90e2-ba3ff668c371", "8acc9ba2-2d8f-4b01-b835-8f1818c1df1c"],
range: 5
diff --git a/jstests/concurrency/fsm_workloads/globally_managed_cursors.js b/jstests/concurrency/fsm_workloads/globally_managed_cursors.js
index 42e4abfa3e2b1..91b56e5d92bdc 100644
--- a/jstests/concurrency/fsm_workloads/globally_managed_cursors.js
+++ b/jstests/concurrency/fsm_workloads/globally_managed_cursors.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Runs a variety commands which need to interact with the global cursor manager. This test was
* designed to reproduce SERVER-33959.
@@ -7,10 +5,12 @@
* The "grandparent test," invalidated_cursors.js, uses $currentOp.
* @tags: [uses_curop_agg_stage, state_functions_share_cursor]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.listCollections = function listCollections(unusedDB, _) {
const db = unusedDB.getSiblingDB(this.uniqueDBName);
const cmdRes =
diff --git a/jstests/concurrency/fsm_workloads/index_build_abort.js b/jstests/concurrency/fsm_workloads/index_build_abort.js
new file mode 100644
index 0000000000000..bbf58ab93e9df
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/index_build_abort.js
@@ -0,0 +1,145 @@
+/**
+ * Build indexes with different abort causes.
+ * - Indexing error.
+ * - Abort due to dropIndexes.
+ * - Abort due to killOp on primary.
+ *
+ * Abort due to DiskSpaceMonitor is not tested as it would interfere with other concurrent tests
+ * creating index builds. Similarly, killOp on secondary nodes is not tested as it can result in a
+ * node crash, interfering with other tests.
+ *
+ * @tags: [
+ * creates_background_indexes,
+ * # The test uses $currentOp, which is not supported in transactions.
+ * does_not_support_transactions,
+ * requires_fcv_71,
+ * requires_replication
+ * ]
+ */
+
+load("jstests/libs/fail_point_util.js");
+load("jstests/noPassthrough/libs/index_build.js");
+
+export const $config = (function() {
+ const data = {
+ prefix: "index_build_abort_",
+ nCollections: 3,
+ nDocuments: 25000,
+ expectedErrorCodes: [ErrorCodes.IndexBuildAborted, ErrorCodes.Interrupted, 13026],
+ mutexColl: "index_build_abort_mutexes",
+ };
+
+ function randInt(max) {
+ return Math.floor(Math.random() * max);
+ }
+
+ function getRandCollectionName() {
+ return data.prefix + randInt(data.nCollections);
+ }
+
+ function getCollMutexName(collName) {
+ return collName + "_mutex";
+ }
+
+ function mutexTryLock(db, collName) {
+ const collMutex = getCollMutexName(collName);
+ let doc = db[data.mutexColl].findAndModify(
+ {query: {mutex: collMutex, locked: 0}, update: {$set: {locked: 1}}});
+ if (doc === null) {
+ return false;
+ }
+ return true;
+ }
+
+ function mutexUnlock(db, collName) {
+ const collMutex = getCollMutexName(collName);
+ db[data.mutexColl].update({mutex: collMutex}, {$set: {locked: 0}});
+ }
+
+ const states = {
+ dropCollAndCreateIndexBuild: function dropCollAndCreateIndexBuild(db, collName) {
+ const randomColl = getRandCollectionName();
+ var coll = db[randomColl];
+ if (mutexTryLock(db, randomColl)) {
+ try {
+ // Having the collection drop outside the lock to allow a drop concurrent to an
+ // index build might be more interesting, but we'd also be allowing a drop in
+ // the middle of bulk insert, or before the createIndexes starts.
+ coll.drop();
+ var bulk = coll.initializeUnorderedBulkOp();
+ const failDocumentIndex = randInt(this.nDocuments);
+ for (var i = 0; i < this.nDocuments; ++i) {
+ if (failDocumentIndex == i) {
+ bulk.insert({a: [0, "a"]});
+ } else {
+ bulk.insert({a: [0, 0]});
+ }
+ }
+ let bulkRes = bulk.execute();
+ assertAlways.commandWorked(bulkRes);
+ assertAlways.eq(this.nDocuments, bulkRes.nInserted, tojson(bulkRes));
+ assertAlways.commandFailedWithCode(coll.createIndexes([{a: "2d"}]),
+ this.expectedErrorCodes);
+ } finally {
+ mutexUnlock(db, randomColl);
+ }
+ }
+ },
+ dropIndexes: function dropIndexes(db, collName) {
+ assertAlways.commandWorkedOrFailedWithCode(
+ db.runCommand({dropIndexes: getRandCollectionName(), index: "*"}),
+ ErrorCodes.NamespaceNotFound);
+ },
+ killOpIndexBuild: function killOpIndexBuild(db, collName) {
+ let nTry = 0;
+ while (nTry++ < 10) {
+ try {
+ const opId = IndexBuildTest.getIndexBuildOpId(db, getRandCollectionName());
+ if (opId != -1) {
+ db.killOp(opId);
+ break;
+ }
+ } catch (e) {
+ jsTestLog("Suppressed exception during killOp attempt: " + e);
+ }
+ }
+ }
+ };
+
+ const transtitionToAllStates = {
+ dropCollAndCreateIndexBuild: 1,
+ dropIndexes: 1,
+ killOpIndexBuild: 1,
+ };
+ const transitions = {
+ dropCollAndCreateIndexBuild: transtitionToAllStates,
+ dropIndexes: transtitionToAllStates,
+ killOpIndexBuild: transtitionToAllStates
+ };
+
+ const setup = function(db, collName, cluster) {
+ for (let coll = 0; coll < this.nCollections; ++coll) {
+ const mutexName = getCollMutexName(data.prefix + coll);
+ db[data.mutexColl].insert({mutex: mutexName, locked: 0});
+ }
+ };
+
+ const teardown = function(db, collName, cluster) {
+ for (let coll = 0; coll < this.nCollections; ++coll) {
+ const collName = data.prefix + coll;
+ db[collName].drop();
+ db[getCollMutexName(collName)].drop();
+ }
+ };
+
+ return {
+ threadCount: 12,
+ iterations: 200,
+ startState: 'dropCollAndCreateIndexBuild',
+ states: states,
+ transitions: transitions,
+ setup: setup,
+ teardown: teardown,
+ data: data,
+ };
+})();
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_1char.js b/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
index b264b6561a5dd..a6c7dfb49decf 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_1char.js
*
@@ -7,10 +5,10 @@
* documents appear in both a collection scan and an index scan. The indexed
* value is a 1-character string based on the thread's id.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.indexedField = 'indexed_insert_1char';
$config.data.shardKey = {};
$config.data.shardKey[$config.data.indexedField] = 1;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js
index 1a8165cf82ed0..161261cab54cf 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* indexed_insert_1char_noindex.js
*
* Executes the indexed_insert_1char.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_1char.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_1char.js";
load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
-var $config = extendWorkload($config, indexedNoindex);
+export const $config = extendWorkload($baseConfig, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_2d.js b/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
index d4ec75e992d78..91f35686c39c5 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_2d.js
*
@@ -7,10 +5,10 @@
* appear in both a collection scan and an index scan. The indexed value is a
* legacy coordinate pair, indexed with a 2d index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.indexedField = 'indexed_insert_2d';
// Remove the shard key for 2d indexes, as they are not supported
delete $config.data.shardKey;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js b/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
index 20ac7b4b5883a..9cd60c6d7b9b1 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_2dsphere.js
*
@@ -7,10 +5,10 @@
* appear in both a collection scan and an index scan. The indexed value is a
* legacy coordinate pair, indexed with a 2dsphere index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_2d.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_2d.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.indexedField = 'indexed_insert_2dsphere';
$config.data.getIndexSpec = function getIndexSpec() {
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base.js b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
index 6b8649b4dbe65..b5d719b8d41ff 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_base.js
*
@@ -7,7 +5,7 @@
* documents appear in both a collection scan and an index scan. The indexed
* value is the thread's id.
*/
-var $config = (function() {
+export const $config = (function() {
function makeSortSpecFromIndexSpec(ixSpec) {
var sort = {};
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js b/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js
index e03d5359ab103..a5f508e3c18c0 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js
@@ -1,16 +1,14 @@
-'use strict';
-
/**
* indexed_insert_base_capped.js
*
* Executes the indexed_insert_base.js workload on a capped collection.
* @tags: [requires_capped]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped
-var $config = extendWorkload($config, makeCapped);
+export const $config = extendWorkload($baseConfig, makeCapped);
// Remove the shard key for capped collections, as they cannot be sharded
delete $config.data.shardKey;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js
index 5e4a2f0f6099a..1d4ec58b1e21b 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* indexed_insert_base_noindex.js
*
* Executes the indexed_insert_base.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
-var $config = extendWorkload($config, indexedNoindex);
+export const $config = extendWorkload($baseConfig, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_compound.js b/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
index f8b44cfd97819..95808e9090f87 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_compound.js
*
@@ -7,10 +5,10 @@
* appear in both a collection scan and an index scan. The collection is indexed
* with a compound index on three different fields.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.init = function init(db, collName) {
$super.states.init.apply(this, arguments);
};
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
index 5306facb834b2..48dbcecedc75f 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_heterogeneous.js
*
@@ -7,10 +5,10 @@
* documents appear in both a collection scan and an index scan. The indexed
* value is a different BSON type, depending on the thread's id.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.indexedField = 'indexed_insert_heterogeneous';
$config.data.shardKey = {};
$config.data.shardKey[$config.data.indexedField] = 1;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js
index f8adab70ffc0d..73308989fc86c 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js
@@ -1,12 +1,12 @@
-'use strict';
-
/**
* indexed_insert_heterogeneous_noindex.js
*
* Executes the indexed_insert_heterogeneous.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js";
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
-var $config = extendWorkload($config, indexedNoindex);
+export const $config = extendWorkload($baseConfig, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_large.js b/jstests/concurrency/fsm_workloads/indexed_insert_large.js
index 55dd1daf4dc26..cc87a318059e4 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_large.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_large.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_large.js
*
@@ -8,10 +6,10 @@
* value is a string large enough to make the whole index key be 1K, which is
* the maximum.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.indexedField = 'indexed_insert_large';
// Remove the shard key, since it cannot be greater than 512 bytes
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js
index 625de8a387ee0..02c99f0b94550 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* indexed_insert_large_noindex.js
*
* Executes the indexed_insert_large.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_large.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_large.js";
load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
-var $config = extendWorkload($config, indexedNoindex);
+export const $config = extendWorkload($baseConfig, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
index 06e92f5907fbd..c1373ab08932a 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_long_fieldname.js
*
@@ -7,10 +5,10 @@
* documents appear in both a collection scan and an index scan. The indexed
* field name is a long string.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// The indexedField must be limited such that the namespace and indexedField does not
// exceed 128 characters. The namespace defaults to // "test_fsmdb.fsmcoll",
// where i, j & k are increasing integers for each test, workload and thread.
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js
index f8960c40b4d02..e9619b11842c2 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js
@@ -1,12 +1,12 @@
-'use strict';
-
/**
* indexed_insert_long_fieldname_noindex.js
*
* Executes the indexed_insert_long_fieldname.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js";
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
-var $config = extendWorkload($config, indexedNoindex);
+export const $config = extendWorkload($baseConfig, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js b/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
index 4343de2d2bea9..d938c0bac7c76 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_multikey.js
*
@@ -7,10 +5,10 @@
* documents appear in both a collection scan and an index scan. The indexed
* value is an array of numbers.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.indexedField = 'indexed_insert_multikey';
// Remove the shard key, since it cannot be a multikey index
delete $config.data.shardKey;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js
index 8995d209a47d8..459a786a8dfcf 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* indexed_insert_multikey_noindex.js
*
* Executes the indexed_insert_multikey.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_multikey.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_multikey.js";
load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
-var $config = extendWorkload($config, indexedNoindex);
+export const $config = extendWorkload($baseConfig, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
index 148c77edbfb87..c65bdc71dcda0 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_ordered_bulk.js
*
@@ -8,10 +6,10 @@
*
* Uses an ordered, bulk operation to perform the inserts.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.indexedField = 'indexed_insert_ordered_bulk';
$config.data.shardKey = {};
$config.data.shardKey[$config.data.indexedField] = 1;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text.js b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
index 0e2a815f8dbc4..2ac6deed0a4d0 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_text.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
@@ -1,11 +1,9 @@
-'use strict';
-
/**
* indexed_insert_text.js
*
* Inserts some documents into a collection with a text index.
*/
-var $config = (function() {
+export const $config = (function() {
var states = {
init: function init(db, collName) {
// noop
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js b/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
index a665b7e61acee..5c197fc967445 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
@@ -1,14 +1,12 @@
-'use strict';
-
/**
* indexed_insert_text_multikey.js
*
* like indexed_insert_text.js but the indexed value is an array of strings
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_text.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_text.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.init = function init(db, collName) {
$super.states.init.apply(this, arguments);
};
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
index 12a7529e1108b..375fee0c5215e 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_ttl.js
*
@@ -12,7 +10,7 @@
load('jstests/concurrency/fsm_workload_helpers/balancer.js');
-var $config = (function() {
+export const $config = (function() {
var states = {
init: function init(db, collName) {
var res = db[collName].insert({indexed_insert_ttl: new ISODate(), first: true});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
index e8fe91483d425..09ce0918d4831 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_unordered_bulk.js
*
@@ -8,10 +6,10 @@
*
* Uses an unordered, bulk operation to perform the inserts.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.indexedField = 'indexed_insert_unordered_bulk';
$config.data.shardKey = {};
$config.data.shardKey[$config.data.indexedField] = 1;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js b/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
index 627fad30832c4..f3a9fee78a13d 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_upsert.js
*
@@ -10,10 +8,10 @@
* Instead of inserting via coll.insert(), this workload inserts using an
* upsert.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.indexedField = 'indexed_insert_upsert';
$config.data.shardKey = {};
$config.data.shardKey[$config.data.indexedField] = 1;
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_where.js b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
index 4cd751c1e1ccc..05a5f3bff1f27 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_where.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* indexed_insert_where.js
*
@@ -8,7 +6,7 @@
* data.insertedDocuments is used as a counter by all of those workloads for their own checks.
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
documentsToInsert: 100,
insertedDocuments: 0,
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js b/jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js
index 05cd8fa8494e3..178381951cca6 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js
@@ -1,15 +1,13 @@
-'use strict';
-
/**
* indexed_insert_wildcard.js
*
* Inserts documents into an indexed collection and asserts that the documents appear in both a
* collection scan and an index scan. The collection is indexed with a wildcard index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // For extendWorkload().
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // For $config().
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.setup = function init(db, collName) {
$super.setup.apply(this, arguments);
};
diff --git a/jstests/concurrency/fsm_workloads/insert_duplicates_unique_index.js b/jstests/concurrency/fsm_workloads/insert_duplicates_unique_index.js
index 5ca0c603b9ae9..e6d155c3821f9 100644
--- a/jstests/concurrency/fsm_workloads/insert_duplicates_unique_index.js
+++ b/jstests/concurrency/fsm_workloads/insert_duplicates_unique_index.js
@@ -11,7 +11,7 @@
"use strict";
-var $config = (function() {
+export const $config = (function() {
const initData = {
getCollectionName: function(collName) {
return "insert_duplicates_unique_index_" + collName;
diff --git a/jstests/concurrency/fsm_workloads/insert_ttl_retry_writes_timeseries.js b/jstests/concurrency/fsm_workloads/insert_ttl_retry_writes_timeseries.js
index e916c366f457b..49f3d77054cfd 100644
--- a/jstests/concurrency/fsm_workloads/insert_ttl_retry_writes_timeseries.js
+++ b/jstests/concurrency/fsm_workloads/insert_ttl_retry_writes_timeseries.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* insert_ttl_retry_writes_timeseries.js
*
@@ -13,10 +11,10 @@
* uses_ttl,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/insert_ttl_timeseries.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/insert_ttl_timeseries.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.getCollectionName = function getCollectionName(collName) {
return "insert_ttl_retry_writes_timeseries_" + collName;
};
diff --git a/jstests/concurrency/fsm_workloads/insert_ttl_timeseries.js b/jstests/concurrency/fsm_workloads/insert_ttl_timeseries.js
index 891df4042c34e..d16d80975631f 100644
--- a/jstests/concurrency/fsm_workloads/insert_ttl_timeseries.js
+++ b/jstests/concurrency/fsm_workloads/insert_ttl_timeseries.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Creates a time-series collection with a short expireAfterSeconds value. Each thread does an
* insert on each iteration with a time, a metadata field, 'tid', and random measurement, 'data'. At
@@ -15,7 +13,7 @@
load('jstests/concurrency/fsm_workload_helpers/balancer.js');
-var $config = (function() {
+export const $config = (function() {
const initData = {
getCollectionName: function(collName) {
return "insert_ttl_timeseries_" + collName;
diff --git a/jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js b/jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js
index b7b4fb6806b53..94859ac588562 100644
--- a/jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js
+++ b/jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* - Shard several collections with different (random) configured maxChunkSize
* - Perform continuous inserts of random amounts of data into the collections
@@ -31,7 +29,7 @@ function getRandomCollName(tid) {
return collNames[Random.randInt(tid * tid) % collNames.length];
}
-var $config = (function() {
+export const $config = (function() {
let states = {
/*
* Insert into a test collection a random amount of documents (up to 10MB per iteration)
diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_kill_sessions.js b/jstests/concurrency/fsm_workloads/internal_transactions_kill_sessions.js
index cefe90d143b8a..5b59a9bcdd07a 100644
--- a/jstests/concurrency/fsm_workloads/internal_transactions_kill_sessions.js
+++ b/jstests/concurrency/fsm_workloads/internal_transactions_kill_sessions.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Runs insert, update, delete and findAndModify commands in internal transactions using all the
* available client session settings, and occasionally kills a random session.
@@ -11,12 +9,14 @@
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js";
load('jstests/concurrency/fsm_workload_helpers/kill_session.js'); // for killSession
-load('jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js');
load('jstests/libs/override_methods/retry_on_killed_session.js');
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.retryOnKilledSession = true;
// Insert initial documents during setup instead of the init state, otherwise the insert could
diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_move_chunk.js b/jstests/concurrency/fsm_workloads/internal_transactions_move_chunk.js
index 457124bfdaf09..e9ed5551da169 100644
--- a/jstests/concurrency/fsm_workloads/internal_transactions_move_chunk.js
+++ b/jstests/concurrency/fsm_workloads/internal_transactions_move_chunk.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Runs insert, update, delete and findAndModify commands against a sharded collection inside
* single-shard and cross-shard internal transactions using all the available client session
@@ -14,10 +12,12 @@
* does_not_support_config_fuzzer,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/internal_transactions_sharded.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/internal_transactions_sharded.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.transitions = {
init: {
moveChunk: 0.2,
diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_resharding.js b/jstests/concurrency/fsm_workloads/internal_transactions_resharding.js
index 949b5cf32d3e6..9423e791a150b 100644
--- a/jstests/concurrency/fsm_workloads/internal_transactions_resharding.js
+++ b/jstests/concurrency/fsm_workloads/internal_transactions_resharding.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Runs insert, update, delete and findAndModify commands against a sharded collection inside
* single-shard and cross-shard internal transactions using all client session configurations, and
@@ -12,12 +10,13 @@
* antithesis_incompatible,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/internal_transactions_sharded.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/internal_transactions_sharded.js";
load('jstests/libs/fail_point_util.js');
-load("jstests/libs/feature_flag_util.js");
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// reshardingMinimumOperationDurationMillis is set to 30 seconds when there are stepdowns.
// So in order to limit the overall time for the test, we limit the number of resharding
// operations to maxReshardingExecutions.
@@ -87,13 +86,6 @@ var $config = extendWorkload($config, function($config, $super) {
}
assert(res.hasOwnProperty("code"));
- if (!FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) {
- // Expected error prior to the PointInTimeCatalogLookups project.
- if (res.code === ErrorCodes.SnapshotUnavailable) {
- return true;
- }
- }
-
// Race to retry.
if (res.code === ErrorCodes.ReshardCollectionInProgress) {
return false;
diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_sharded.js b/jstests/concurrency/fsm_workloads/internal_transactions_sharded.js
index 0ba8b429730d1..eb4cb39d04415 100644
--- a/jstests/concurrency/fsm_workloads/internal_transactions_sharded.js
+++ b/jstests/concurrency/fsm_workloads/internal_transactions_sharded.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Runs insert, update, delete and findAndModify commands against a sharded collection inside
* single-shard and cross-shard internal transactions using all the available client session
@@ -12,13 +10,19 @@
* antithesis_incompatible,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js');
-load('jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ extendWithInternalTransactionsUnsharded
+} from "jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js";
+import {$config as $baseConfig} from 'jstests/concurrency/fsm_workloads/random_moveChunk_base.js';
load('jstests/concurrency/fsm_workload_helpers/balancer.js');
load('jstests/libs/fail_point_util.js');
-var $config = extendWorkload($config, function($config, $super) {
+const parsedBaseConfig = parseConfig($baseConfig);
+const $extendedBaseConfig = extendWithInternalTransactionsUnsharded(
+ Object.extend({}, parsedBaseConfig, true), parsedBaseConfig);
+
+export const $config = extendWorkload($extendedBaseConfig, function($config, $super) {
$config.data.getQueryForDocument = function getQueryForDocument(doc) {
// The query for a write command against a sharded collection must contain the shard key.
const query = $super.data.getQueryForDocument.apply(this, arguments);
diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod.js b/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod.js
index f2ae41e3ac5c0..bc3acb7170517 100644
--- a/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod.js
+++ b/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Runs insert, update, delete and findAndModify commands against a sharded collection inside
* single-shard and cross-shard internal transactions started on a shard using all the available
@@ -12,11 +10,13 @@
* antithesis_incompatible
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/internal_transactions_sharded.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/internal_transactions_sharded.js";
load('jstests/libs/fixture_helpers.js');
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.expectDirtyDocs = {
// The client is either not using a session or is using a session without retryable writes
// enabled. Therefore, when a write is interrupted due to stepdown/kill/terminate, they
diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod_kill_sessions.js b/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod_kill_sessions.js
index 013850a86d6ff..2d1e74b684e13 100644
--- a/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod_kill_sessions.js
+++ b/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod_kill_sessions.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Runs insert, update, delete and findAndModify commands against a sharded collection inside
* single-shard and cross-shard internal transactions started on a shard using all the available
@@ -14,11 +12,13 @@
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod.js";
load('jstests/libs/override_methods/retry_on_killed_session.js');
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.retryOnKilledSession = true;
// Insert initial documents during setup instead of the init state, otherwise the insert could
diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js b/jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js
index 383de45585657..b87dc3fad104f 100644
--- a/jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js
+++ b/jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Runs insert, update, delete and findAndModify commands in internal transactions using all the
* available client session settings. This workload works on both standalone replica sets and
@@ -14,29 +12,14 @@
* assumes_unsharded_collection,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js');
load("jstests/libs/override_methods/retry_writes_at_least_once.js");
// This workload involves running commands outside a session.
TestData.disableImplicitSessions = true;
-if ($config === undefined) {
- // There is no workload to extend. Define a noop base workload to make the 'extendWorkload' call
- // below still work.
- $config = {
- threadCount: 1,
- iterations: 1,
- startState: "init",
- data: {},
- states: {init: function(db, collName) {}},
- transitions: {init: {init: 1}},
- setup: function(db, collName) {},
- teardown: function(db, collName) {},
- };
-}
-
-var $config = extendWorkload($config, function($config, $super) {
+export function extendWithInternalTransactionsUnsharded($config, $super) {
$config.threadCount = 5;
$config.iterations = 50;
@@ -749,4 +732,17 @@ var $config = extendWorkload($config, function($config, $super) {
};
return $config;
-});
+}
+
+const kBaseConfig = {
+ threadCount: 1,
+ iterations: 1,
+ startState: "init",
+ data: {},
+ states: {init: function(db, collName) {}},
+ transitions: {init: {init: 1}},
+ setup: function(db, collName) {},
+ teardown: function(db, collName) {},
+};
+
+export const $config = extendWorkload(kBaseConfig, extendWithInternalTransactionsUnsharded);
diff --git a/jstests/concurrency/fsm_workloads/invalidated_cursors.js b/jstests/concurrency/fsm_workloads/invalidated_cursors.js
index 3c9ffcf55d7ff..70ffc1706802c 100644
--- a/jstests/concurrency/fsm_workloads/invalidated_cursors.js
+++ b/jstests/concurrency/fsm_workloads/invalidated_cursors.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* invalidated_cursors.js
*
@@ -13,7 +11,7 @@
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
load("jstests/concurrency/fsm_workload_helpers/assert_handle_fail_in_transaction.js");
-var $config = (function() {
+export const $config = (function() {
let data = {
chooseRandomlyFrom: function chooseRandomlyFrom(arr) {
if (!Array.isArray(arr)) {
diff --git a/jstests/concurrency/fsm_workloads/kill_aggregation.js b/jstests/concurrency/fsm_workloads/kill_aggregation.js
index 8d658d0ae5b2e..2197849b36523 100644
--- a/jstests/concurrency/fsm_workloads/kill_aggregation.js
+++ b/jstests/concurrency/fsm_workloads/kill_aggregation.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* kill_aggregation.js
*
@@ -10,10 +8,10 @@
* This workload was designed to reproduce SERVER-25039.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/kill_rooted_or.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/kill_rooted_or.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Use the workload name as the collection name, since the workload name is assumed to be
// unique. Note that we choose our own collection name instead of using the collection provided
// by the concurrency framework, because this workload drops its collection.
diff --git a/jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js b/jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js
index 484d4febdecc2..caae498baae41 100644
--- a/jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js
+++ b/jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* kill_multicollection_aggregation.js
*
@@ -12,10 +10,10 @@
* The parent test, invalidated_cursors.js, uses $currentOp.
* @tags: [uses_curop_agg_stage, state_functions_share_cursor]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/invalidated_cursors.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/invalidated_cursors.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
/**
* Runs the specified aggregation pipeline and stores the resulting cursor (if the command
* is successful) in 'this.cursor'.
diff --git a/jstests/concurrency/fsm_workloads/kill_rooted_or.js b/jstests/concurrency/fsm_workloads/kill_rooted_or.js
index 53e745128634e..f95774d0fefb0 100644
--- a/jstests/concurrency/fsm_workloads/kill_rooted_or.js
+++ b/jstests/concurrency/fsm_workloads/kill_rooted_or.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* kill_rooted_or.js
*
@@ -10,7 +8,8 @@
* This workload was designed to reproduce SERVER-24761.
*/
load("jstests/concurrency/fsm_workload_helpers/assert_handle_fail_in_transaction.js");
-var $config = (function() {
+
+export const $config = (function() {
// Use the workload name as the collection name, since the workload name is assumed to be
// unique. Note that we choose our own collection name instead of using the collection provided
// by the concurrency framework, because this workload drops its collection.
diff --git a/jstests/concurrency/fsm_workloads/list_indexes.js b/jstests/concurrency/fsm_workloads/list_indexes.js
index 687f871a37830..4c135a2d4f041 100644
--- a/jstests/concurrency/fsm_workloads/list_indexes.js
+++ b/jstests/concurrency/fsm_workloads/list_indexes.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* list_indexes.js
*
* Checks that the listIndexes command can tolerate concurrent modifications to the
* index catalog.
*/
-var $config = (function() {
+export const $config = (function() {
var states = (function() {
// Picks a random index to drop and recreate.
function modifyIndices(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_drop.js b/jstests/concurrency/fsm_workloads/map_reduce_drop.js
index 0a287f2f0054e..bad68e7eda11c 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_drop.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_drop.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* map_reduce_drop.js
*
@@ -16,7 +14,7 @@
* does_not_support_causal_consistency,
* ]
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
mapper: function mapper() {
emit(this.key, 1);
@@ -61,8 +59,8 @@ var $config = (function() {
var res = bulk.execute();
assertAlways.commandWorked(res);
} catch (ex) {
- assert.writeErrorWithCode(ex, ErrorCodes.DatabaseDropPending);
assert.eq(true, ex instanceof BulkWriteError, tojson(ex));
+ assert.writeErrorWithCode(ex, ErrorCodes.DatabaseDropPending);
}
var options = {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_inline.js b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
index 96b71c191f42e..b37b0515cb540 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_inline.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* map_reduce_inline.js
*
@@ -13,7 +11,7 @@
* does_not_support_causal_consistency
* ]
*/
-var $config = (function() {
+export const $config = (function() {
function mapper() {
if (this.hasOwnProperty('key') && this.hasOwnProperty('value')) {
var obj = {};
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_interrupt.js b/jstests/concurrency/fsm_workloads/map_reduce_interrupt.js
index 6113d7c949149..45cdbf7c6b8de 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_interrupt.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_interrupt.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* map_reduce_interrupt.js
*
@@ -13,10 +11,12 @@
* uses_curop_agg_stage
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.prefix = 'map_reduce_interrupt';
$config.states.killOp = function killOp(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge.js b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
index 05fd0d549100c..b4eaf035f7b63 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_merge.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* map_reduce_merge.js
*
@@ -17,10 +15,10 @@
* does_not_support_causal_consistency
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/map_reduce_inline.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Use the workload name as the database name,
// since the workload name is assumed to be unique.
var uniqueDBName = 'map_reduce_merge';
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
index 20328d6da0b5a..ac85c1ca5e629 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* map_reduce_reduce.js
*
@@ -15,10 +13,10 @@
* does_not_support_causal_consistency,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/map_reduce_inline.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
var prefix = 'map_reduce_reduce';
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace.js b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
index 074dad1732a9c..be9f5c9fe8ede 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* map_reduce_replace.js
*
@@ -15,10 +13,10 @@
* does_not_support_causal_consistency
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/map_reduce_inline.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
var prefix = 'map_reduce_replace';
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
index f328b9f5358e4..e6f99540ed0b5 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* map_reduce_replace_nonexistent.js
*
@@ -14,10 +12,10 @@
* does_not_support_causal_consistency
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/map_reduce_inline.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
$config.data.prefix = 'map_reduce_replace_nonexistent';
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
index 60e29554027a9..d58ba35804237 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* map_reduce_replace_remove.js
*
@@ -15,10 +13,10 @@
* does_not_support_causal_consistency
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_replace.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/map_reduce_replace.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.remove = function remove(db, collName) {
for (var i = 0; i < 20; ++i) {
var res = db[collName].remove({_id: Random.randInt(this.numDocs)}, {justOne: true});
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js b/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js
index 1c11384d0e9c1..fd0717a5faa64 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* map_reduce_with_chunk_migrations.js
*
@@ -17,10 +15,12 @@
* does_not_support_causal_consistency
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// The base setup will insert 'partitionSize' number of documents per thread, evenly
// distributing across the chunks. Documents will only have the "_id" field.
$config.data.partitionSize = 50;
diff --git a/jstests/concurrency/fsm_workloads/move_primary_with_crud.js b/jstests/concurrency/fsm_workloads/move_primary_with_crud.js
index a9f50d57debaa..44f9d46400ada 100644
--- a/jstests/concurrency/fsm_workloads/move_primary_with_crud.js
+++ b/jstests/concurrency/fsm_workloads/move_primary_with_crud.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Randomly performs a series of CRUD and movePrimary operations on unsharded collections, checking
* for data consistency as a consequence of these operations.
@@ -10,9 +8,9 @@
* ]
*/
-load('jstests/libs/feature_flag_util.js');
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
-const $config = (function() {
+export const $config = (function() {
const kCollNamePrefix = 'unsharded_coll_';
const kInitialCollSize = 100;
const kBatchSizeForDocsLookup = kInitialCollSize * 2;
@@ -167,9 +165,7 @@ const $config = (function() {
// Due to a stepdown of the donor during the cloning phase, the movePrimary
// operation failed. It is not automatically recovered, but any orphaned data on
// the recipient has been deleted.
- 7120202,
- // Same as the above, but due to a stepdown of the recipient.
- ErrorCodes.MovePrimaryAborted
+ 7120202
]);
},
checkDatabaseMetadataConsistency: function(db, collName, connCache) {
@@ -220,9 +216,7 @@ const $config = (function() {
let setup = function(db, collName, cluster) {
this.skipMetadataChecks =
// TODO SERVER-70396: remove this flag
- !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency') ||
- // TODO SERVER-74445: re-enable metadata checks on catalog shard deployments
- cluster.hasCatalogShard();
+ !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency');
};
const standardTransition = {
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js
index 81cc596e228c1..c0eac298a79fd 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js
@@ -1,12 +1,11 @@
-'use strict';
-
/**
* Runs findAndModify, update, delete, find, and getMore within a transaction.
*
* @tags: [uses_transactions, state_functions_share_transaction]
*/
load('jstests/concurrency/fsm_workload_helpers/cleanup_txns.js');
-var $config = (function() {
+
+export const $config = (function() {
function quietly(func) {
const printOriginal = print;
try {
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
index 2ecd324f04d6d..caa0055a14b40 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Runs update, findAndModify, delete, find, and getMore in a transaction with all threads using the
* same session.
@@ -7,11 +5,12 @@
* @tags: [uses_transactions, state_functions_share_transaction, assumes_snapshot_transactions]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js'); // for
- // $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.setup = function(db, collName, cluster) {
$super.setup.apply(this, arguments);
this.lsid = tojson({id: UUID()});
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js
index 4c5e8e30b8fef..70c5caa3a78d9 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* multi_statement_transaction_atomicity_isolation.js
*
@@ -52,7 +50,7 @@ load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js');
// For arrayEq.
load("jstests/aggregation/extras/utils.js");
-var $config = (function() {
+export const $config = (function() {
function checkTransactionCommitOrder(documents) {
const graph = new Graph();
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
index fb53631212911..cbfe20679be53 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* This test checks high level invariants of various transaction related metrics reported in
* serverStatus and currentOp.
@@ -7,13 +5,14 @@
* @tags: [uses_transactions, uses_prepare_transaction, assumes_snapshot_transactions]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js";
load('jstests/concurrency/fsm_workload_helpers/check_transaction_server_status_invariants.js');
-load('jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js');
load('jstests/core/txns/libs/prepare_helpers.js');
-// for $config
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.setup = function(db, collName, cluster) {
$super.setup.apply(this, arguments);
this.prepareProbability = 0.5;
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js
index 7946b83b2a2cd..5255727173703 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js
@@ -1,15 +1,15 @@
-'use strict';
-
/**
* Test transactions atomicity and isolation guarantees for transactions across multiple DBs.
*
* @tags: [uses_transactions, assumes_snapshot_transactions]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js";
-var $config = extendWorkload($config, ($config, $super) => {
+export const $config = extendWorkload($baseConfig, ($config, $super) => {
// Number of unique collections and number of unique databases. The square root is used
// here to ensure the total number of namespaces (coll * db) is roughly equal to the
// number of threads.
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js
index 72661493d5d94..135082f6f1bed 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js
@@ -1,16 +1,15 @@
-'use strict';
-
/**
* Performs repeated reads of the documents in the collection to test snapshot isolation.
*
* @tags: [uses_transactions, assumes_snapshot_transactions]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js');
-// for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.numReads = 5;
$config.states.repeatedRead = function repeatedRead(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js
index 9f4ff6a31e19e..7b5dbbbc1519a 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Verifies the transactions server status metrics on mongos while running transactions.
* Temporarily disabled for BF-24311.
@@ -8,11 +6,13 @@
* uses_transactions]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js";
load('jstests/concurrency/fsm_workload_helpers/check_transaction_server_status_invariants.js');
-load('jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js');
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.teardown = function(db, collName, cluster) {
// Check the server-wide invariants one last time with only a single sample, since all user
// operations should have finished.
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_current_op.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_current_op.js
index c8e69017336be..e3eb5e3e897bd 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_current_op.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_current_op.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Runs update, findAndModify, delete, find, and getMore in a transaction with all threads using the
* same session.
@@ -13,11 +11,12 @@
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js'); // for
- // $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.verifyMongosSessionsWithTxns = function verifyMongosSessionsWithTxns(sessions) {
const acceptableReadConcernLevels = ['snapshot', 'local'];
sessions.forEach((session) => {
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_kill_sessions_atomicity_isolation.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_kill_sessions_atomicity_isolation.js
index a186164622009..b1797c3723cb1 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_kill_sessions_atomicity_isolation.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_kill_sessions_atomicity_isolation.js
@@ -1,16 +1,16 @@
-'use strict';
-
/**
* Tests periodically killing sessions that are running transactions.
*
* @tags: [uses_transactions, assumes_snapshot_transactions, kills_random_sessions]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js";
load('jstests/concurrency/fsm_workload_helpers/kill_session.js'); // for killSession
-load('jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js');
-var $config = extendWorkload($config, ($config, $super) => {
+export const $config = extendWorkload($baseConfig, ($config, $super) => {
$config.data.retryOnKilledSession = true;
$config.states.killSession = killSession;
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js
index ef3178f9dc998..00421f70edb51 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Creates several bank accounts. On each iteration, each thread:
* - chooses two accounts and amount of money being transfer
@@ -11,7 +9,7 @@
// For withTxnAndAutoRetry.
load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js');
-var $config = (function() {
+export const $config = (function() {
function computeTotalOfAllBalances(documents) {
return documents.reduce((total, account) => total + account.balance, 0);
}
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_kill_sessions.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_kill_sessions.js
index 94b595825cc74..453ad7b680717 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_kill_sessions.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_kill_sessions.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Tests periodically killing sessions that are running transactions. The base workload runs
* transactions with two writes, which will require two phase commit in a sharded cluster if each
@@ -8,11 +6,13 @@
* @tags: [uses_transactions, assumes_snapshot_transactions, kills_random_sessions]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js";
load('jstests/concurrency/fsm_workload_helpers/kill_session.js'); // for killSession
-load('jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js'); // for $config
-var $config = extendWorkload($config, ($config, $super) => {
+export const $config = extendWorkload($baseConfig, ($config, $super) => {
$config.data.retryOnKilledSession = true;
$config.states.killSession = killSession;
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js
index ed01e10ad88c6..542cb491fbc80 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Performs concurrent majority writes alongside transactions to verify both will eventually
* complete as expected.
@@ -8,11 +6,13 @@
* @tags: [uses_transactions, assumes_snapshot_transactions]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js";
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.majorityWriteCollName = 'majority_writes';
$config.data.counter = 0;
diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js
index 93d89bbce0f20..25ede737e2855 100644
--- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js
+++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js
@@ -1,15 +1,15 @@
-'use strict';
-
/**
* Performs repeated reads of the documents in the collection to test snapshot isolation.
*
* @tags: [uses_transactions, assumes_snapshot_transactions]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.numReads = 5;
$config.states.repeatedRead = function repeatedRead(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
index acca1470b3f02..61547bf708f13 100644
--- a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
+++ b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* plan_cache_drop_database.js
*
@@ -8,7 +6,7 @@
* events triggers the concurrent destruction of a Collection object and
* the updating of said object's PlanCache (SERVER-17117).
*/
-var $config = (function() {
+export const $config = (function() {
function populateData(db, collName) {
var coll = db[collName];
diff --git a/jstests/concurrency/fsm_workloads/profile_command.js b/jstests/concurrency/fsm_workloads/profile_command.js
index dd85296aa8d40..1374c00031c50 100644
--- a/jstests/concurrency/fsm_workloads/profile_command.js
+++ b/jstests/concurrency/fsm_workloads/profile_command.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* profile_command.js
*
@@ -13,7 +11,7 @@
* ]
*/
-var $config = (function() {
+export const $config = (function() {
const data = {
numDocs: 1000,
checkProfileResult: function(result) {
diff --git a/jstests/concurrency/fsm_workloads/query_stats_concurrent.js b/jstests/concurrency/fsm_workloads/query_stats_concurrent.js
new file mode 100644
index 0000000000000..a44b269c35f03
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/query_stats_concurrent.js
@@ -0,0 +1,157 @@
+/**
+ * query_stats_concurrent.js
+ *
+ * Stresses $queryStats running concurrently with queries.
+ *
+ * @tags: [
+ * featureFlagQueryStats,
+ * does_not_support_causal_consistency,
+ * ]
+ *
+ */
+export const $config = (function() {
+ var states = (function() {
+ function init(db, collName) {
+ }
+
+ function reInit(db, collName) {
+ }
+
+ // Runs one find query so that the queryStatsEntry is updated.
+ function findOneShape(db, collName) {
+ assertWhenOwnColl.gt(db[collName].find({i: {$lt: 50}}).itcount(), 0);
+ }
+
+ // Runs one agg query so that the queryStatsEntry is updated.
+ function aggOneShape(db, collName) {
+ assertWhenOwnColl.gt(db[collName].aggregate([{$match: {i: {$gt: 900}}}]).itcount(), 0);
+ }
+
+ // Runs many queries with different shapes to ensure eviction occurs in the queryStats
+ // store.
+ function multipleShapes(db, collName) {
+ for (var i = 0; i < 10000; i++) {
+ let query = {};
+ query["foo" + i] = "bar";
+ db[collName].aggregate([{$match: query}]).itcount();
+ }
+ const evictedAfter = db.serverStatus().metrics.queryStats.numEvicted;
+ assertAlways.gt(evictedAfter, 0);
+ }
+
+ // Runs queryStats with transformation.
+ function runQueryStatsWithHmac(db, collName) {
+ let response = db.adminCommand({
+ aggregate: 1,
+ pipeline: [{
+ $queryStats: {
+ transformIdentifiers: {
+ algorithm: "hmac-sha-256",
+ hmacKey: BinData(0, "MjM0NTY3ODkxMDExMTIxMzE0MTUxNjE3MTgxOTIwMjE=")
+ }
+ }
+ }],
+ // Use a small batch size to ensure these operations open up a cursor and use
+ // multiple getMores.
+ cursor: {batchSize: 1}
+ });
+ assertAlways.commandWorked(response);
+ const cursor = new DBCommandCursor(db.getSiblingDB("admin"), response);
+ assertAlways.gt(cursor.itcount(), 0);
+ }
+
+ // Runs queryStats without transformation.
+ function runQueryStatsWithoutHmac(db, collName) {
+ let response = db.adminCommand({
+ aggregate: 1,
+ pipeline: [{$queryStats: {}}],
+ // Use a small batch size to ensure these operations open up a cursor and use
+ // multiple getMores.
+ cursor: {batchSize: 1}
+ });
+ assertAlways.commandWorked(response);
+ const cursor = new DBCommandCursor(db.getSiblingDB("admin"), response);
+ assertAlways.gt(cursor.itcount(), 0);
+ }
+
+ return {
+ init: init,
+ reInit: reInit,
+ findOneShape: findOneShape,
+ multipleShapes: multipleShapes,
+ aggOneShape: aggOneShape,
+ runQueryStatsWithHmac: runQueryStatsWithHmac,
+ runQueryStatsWithoutHmac: runQueryStatsWithoutHmac
+ };
+ })();
+
+ var internalQueryStatsRateLimit;
+ var internalQueryStatsCacheSize;
+
+ let setup = function(db, collName, cluster) {
+ const setQueryStatsParams = (db) => {
+ var res;
+ res = db.adminCommand({setParameter: 1, internalQueryStatsRateLimit: -1});
+ assertAlways.commandWorked(res);
+ internalQueryStatsRateLimit = res.was;
+
+ res = db.adminCommand({setParameter: 1, internalQueryStatsCacheSize: "1MB"});
+ assertAlways.commandWorked(res);
+ internalQueryStatsCacheSize = res.was;
+ };
+
+ cluster.executeOnMongodNodes(setQueryStatsParams);
+ cluster.executeOnMongosNodes(setQueryStatsParams);
+
+ assert.commandWorked(db[collName].createIndex({i: 1}));
+ const bulk = db[collName].initializeUnorderedBulkOp();
+ for (let i = 0; i < 1000; ++i) {
+ bulk.insert({i: i});
+ }
+ assert.commandWorked(bulk.execute());
+ };
+
+ let teardown = function(db, collName, cluster) {
+ const resetQueryStatsParams = (db) => assert.commandWorked(db.adminCommand({
+ setParameter: 1,
+ internalQueryStatsRateLimit: internalQueryStatsRateLimit,
+ internalQueryStatsCacheSize: internalQueryStatsCacheSize
+ }));
+
+ cluster.executeOnMongodNodes(resetQueryStatsParams);
+ cluster.executeOnMongosNodes(resetQueryStatsParams);
+
+ db[collName].drop();
+ };
+
+ let transitions = {
+ // To start, add some $queryStats data so that it is never empty.
+ init: {
+ aggOneShape: 0.33,
+ findOneShape: 0.33,
+ multipleShapes: 0.34,
+ },
+ // From then on, choose evenly among all possibilities:
+ reInit: {
+ aggOneShape: 0.2,
+ findOneShape: 0.2,
+ multipleShapes: 0.2,
+ runQueryStatsWithHmac: 0.2,
+ runQueryStatsWithoutHmac: 0.2
+ },
+ findOneShape: {reInit: 1},
+ multipleShapes: {reInit: 1},
+ runQueryStatsWithHmac: {reInit: 1},
+ runQueryStatsWithoutHmac: {reInit: 1},
+ aggOneShape: {reInit: 1}
+ };
+
+ return {
+ threadCount: 10,
+ iterations: 10,
+ states: states,
+ setup: setup,
+ teardown: teardown,
+ transitions: transitions
+ };
+})();
diff --git a/jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js b/jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js
index d0f2df581821c..d2a15b4483b03 100644
--- a/jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js
+++ b/jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Performs a series of CRUD operations while DDL commands are running in the background
* and verifies guarantees are not broken.
@@ -19,9 +17,9 @@
load("jstests/concurrency/fsm_workload_helpers/state_transition_utils.js");
load("jstests/libs/uuid_util.js");
-load('jstests/libs/feature_flag_util.js');
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
-var $config = (function() {
+export const $config = (function() {
function threadCollectionName(prefix, tid) {
return prefix + tid;
}
@@ -78,13 +76,39 @@ var $config = (function() {
tid = Random.randInt(this.threadCount);
const targetThreadColl = threadCollectionName(collName, tid);
- const coll = db[threadCollectionName(collName, tid)];
+ const coll = db[targetThreadColl];
const fullNs = coll.getFullName();
jsTestLog('create state tid:' + tid + ' currentTid:' + this.tid +
' collection:' + targetThreadColl);
- assertAlways.commandWorked(
- db.adminCommand({shardCollection: fullNs, key: {_id: 1}, unique: false}));
- jsTestLog('create state finished');
+ // Add necessary indexes for resharding.
+ assertAlways.commandWorked(db.adminCommand({
+ createIndexes: targetThreadColl,
+ indexes: [
+ {key: {[`tid_${tid}_0`]: 1}, name: `tid_${tid}_0_1`, unique: false},
+ {key: {[`tid_${tid}_1`]: 1}, name: `tid_${tid}_1_1`, unique: false}
+ ],
+ writeConcern: {w: 'majority'}
+ }));
+ try {
+ assertAlways.commandWorked(db.adminCommand(
+ {shardCollection: fullNs, key: {[`tid_${tid}_0`]: 1}, unique: false}));
+ } catch (e) {
+ const exceptionCode = e.code;
+ if (exceptionCode) {
+ if (exceptionCode == ErrorCodes.AlreadyInitialized ||
+ exceptionCode == ErrorCodes.InvalidOptions) {
+ // It is fine for a shardCollection to throw AlreadyInitialized, a
+ // resharding state might have changed the shard key for the namespace. It
+ // is also fine to fail with InvalidOptions, a drop state could've removed
+ // the indexes and the CRUD state might have added some documents, forcing
+ // the need to manually create indexes.
+ return;
+ }
+ }
+ throw e;
+ } finally {
+ jsTestLog('create state finished');
+ }
},
drop: function(db, collName, connCache) {
let tid = this.tid;
@@ -155,6 +179,34 @@ var $config = (function() {
jsTestLog('rename state finished');
}
},
+ resharding: function(db, collName, connCache) {
+ let tid = this.tid;
+ // Pick a tid at random until we pick one that doesn't target this thread's collection.
+ while (tid === this.tid)
+ tid = Random.randInt(this.threadCount);
+ const fullNs = db[threadCollectionName(collName, tid)].getFullName();
+ let newKey = 'tid_' + tid + '_' + Random.randInt(2);
+ try {
+ jsTestLog('resharding state tid:' + tid + ' currentTid:' + this.tid +
+ ' collection:' + fullNs + ' newKey ' + newKey);
+ assertAlways.commandWorked(
+ db.adminCommand({reshardCollection: fullNs, key: {[`${newKey}`]: 1}}));
+ } catch (e) {
+ const exceptionCode = e.code;
+ if (exceptionCode == ErrorCodes.ConflictingOperationInProgress ||
+ exceptionCode == ErrorCodes.ReshardCollectionInProgress ||
+ exceptionCode == ErrorCodes.NamespaceNotSharded) {
+ // It is fine for a resharding operation to throw ConflictingOperationInProgress
+ // if a concurrent resharding with the same collection is ongoing.
+ // It is also fine for a resharding operation to throw NamespaceNotSharded,
+ // because a drop state could've happend recently.
+ return;
+ }
+ throw e;
+ } finally {
+ jsTestLog('resharding state finished');
+ }
+ },
checkDatabaseMetadataConsistency: function(db, collName, connCache) {
if (this.skipMetadataChecks) {
return;
@@ -188,7 +240,6 @@ var $config = (function() {
jsTestLog('CRUD state tid:' + tid + ' currentTid:' + this.tid +
' collection:' + targetThreadColl);
const coll = db[targetThreadColl];
- const fullNs = coll.getFullName();
const generation = new Date().getTime();
// Insert Data
@@ -196,7 +247,8 @@ var $config = (function() {
let insertBulkOp = coll.initializeUnorderedBulkOp();
for (let i = 0; i < numDocs; ++i) {
- insertBulkOp.insert({generation: generation, count: i, tid: tid});
+ insertBulkOp.insert(
+ {generation: generation, count: i, [`tid_${tid}_0`]: i, [`tid_${tid}_1`]: i});
}
mutexLock(db, tid, targetThreadColl);
@@ -247,9 +299,7 @@ var $config = (function() {
let setup = function(db, collName, cluster) {
this.skipMetadataChecks =
// TODO SERVER-70396: remove this flag
- !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency') ||
- // TODO SERVER-74445: re-enable metadata checks on catalog shard deployments
- cluster.hasCatalogShard();
+ !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency');
for (let tid = 0; tid < this.threadCount; ++tid) {
db[data.CRUDMutex].insert({tid: tid, mutex: 0});
diff --git a/jstests/concurrency/fsm_workloads/random_DDL_CRUD_setFCV_operations.js b/jstests/concurrency/fsm_workloads/random_DDL_CRUD_setFCV_operations.js
index 3cd6110c939f5..0fc9f11645a5d 100644
--- a/jstests/concurrency/fsm_workloads/random_DDL_CRUD_setFCV_operations.js
+++ b/jstests/concurrency/fsm_workloads/random_DDL_CRUD_setFCV_operations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Concurrently performs CRUD operations, DDL commands and FCV changes and verifies guarantees are
* not broken.
@@ -19,10 +17,12 @@
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.setFCV = function(db, collName, connCache) {
const fcvValues = [lastLTSFCV, lastContinuousFCV, latestFCV];
const targetFCV = fcvValues[Random.randInt(3)];
diff --git a/jstests/concurrency/fsm_workloads/random_DDL_operations.js b/jstests/concurrency/fsm_workloads/random_DDL_operations.js
index 6749900610b83..47f9ca6d7af0e 100644
--- a/jstests/concurrency/fsm_workloads/random_DDL_operations.js
+++ b/jstests/concurrency/fsm_workloads/random_DDL_operations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Concurrently performs DDL commands and verifies guarantees are not broken.
*
@@ -11,7 +9,7 @@
*/
load("jstests/concurrency/fsm_workload_helpers/state_transition_utils.js");
-load('jstests/libs/feature_flag_util.js');
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const dbPrefix = jsTestName() + '_DB_';
const dbCount = 2;
@@ -31,7 +29,7 @@ function getRandomShard(connCache) {
return shards[Random.randInt(shards.length)];
}
-var $config = (function() {
+export const $config = (function() {
let states = {
create: function(db, collName, connCache) {
db = getRandomDb(db);
@@ -65,10 +63,6 @@ var $config = (function() {
]);
},
movePrimary: function(db, collName, connCache) {
- if (this.skipMovePrimary) {
- return;
- }
-
db = getRandomDb(db);
const shardId = getRandomShard(connCache);
@@ -77,10 +71,7 @@ var $config = (function() {
db.adminCommand({movePrimary: db.getName(), to: shardId}), [
ErrorCodes.ConflictingOperationInProgress,
// The cloning phase has failed (e.g. as a result of a stepdown). When a failure
- // occurs at this phase, the movePrimary operation does not recover. Either of
- // the following error codes could be seen depending on if the failover was on
- // the donor or recipient node.
- ErrorCodes.MovePrimaryAborted,
+ // occurs at this phase, the movePrimary operation does not recover.
7120202
]);
},
@@ -116,14 +107,9 @@ var $config = (function() {
};
let setup = function(db, collName, cluster) {
- // TODO (SERVER-71309): Remove once 7.0 becomes last LTS. Prevent non-resilient movePrimary
- // operations from being executed in multiversion suites.
- this.skipMovePrimary = !FeatureFlagUtil.isEnabled(db.getMongo(), 'ResilientMovePrimary');
this.skipMetadataChecks =
// TODO SERVER-70396: remove this flag
- !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency') ||
- // TODO SERVER-74445: re-enable metadata checks on catalog shard deployments
- cluster.hasCatalogShard();
+ !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency');
for (var i = 0; i < dbCount; i++) {
const dbName = dbPrefix + i;
diff --git a/jstests/concurrency/fsm_workloads/random_DDL_resetPlacementHistory_operations.js b/jstests/concurrency/fsm_workloads/random_DDL_resetPlacementHistory_operations.js
new file mode 100644
index 0000000000000..ab9ca9eaf925b
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/random_DDL_resetPlacementHistory_operations.js
@@ -0,0 +1,211 @@
+/**
+ * Performs a series of placement-changing commands (DDLs and chunk migrations) while
+ * resetPlacementHistory may be run in parallel. After tearing down the test, the
+ * check_routing_table_consistency hook will verify that the content config.placementHistory will
+ * still be consistent with the rest of the catalog.
+ *
+ * @tags: [
+ * requires_fcv_71,
+ * requires_sharding,
+ * assumes_balancer_off,
+ * does_not_support_causal_consistency,
+ * does_not_support_add_remove_shards,
+ * # The mechanism to pick a random collection is not resilient to stepdowns
+ * does_not_support_stepdowns,
+ * does_not_support_transactions,
+ * ]
+ */
+
+load("jstests/libs/uuid_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+load('jstests/concurrency/fsm_workload_helpers/chunks.js');
+
+export const $config = (function() {
+ const testCollectionsState = 'testCollectionsState';
+ const resetPlacementHistoryState = 'resetPlacementHistoryState';
+ const resetPlacementHistoryStateId = 'x';
+ const numThreads = 12;
+ const numTestCollections = numThreads + 5;
+
+ function getConfig(db) {
+ return db.getSiblingDB('config');
+ }
+
+ /**
+ * Used to guarantee that a namespace isn't targeted by multiple FSM thread at the same time.
+ */
+ function acquireCollectionName(db, mustBeAlreadyCreated = true) {
+ let acquiredCollDoc = null;
+ assertAlways.soon(function() {
+ const query = {acquired: false};
+ if (mustBeAlreadyCreated) {
+ query.created = true;
+ }
+ acquiredCollDoc = db[testCollectionsState].findAndModify({
+ query: query,
+ sort: {lastAcquired: 1},
+ update: {$set: {acquired: true, lastAcquired: new Date()}}
+ });
+ return acquiredCollDoc !== null;
+ });
+ return acquiredCollDoc.collName;
+ }
+
+ function releaseCollectionName(db, collName, wasDropped = false) {
+ // in case of collection dropped, leave a chance of reusing the same name during the next
+ // shardCollection
+ const newExtension = wasDropped && Math.random() < 0.5 ? 'e' : '';
+ const match = db[testCollectionsState].findAndModify({
+ query: {collName: collName, acquired: true},
+ update:
+ {$set: {collName: collName + newExtension, acquired: false, created: !wasDropped}}
+ });
+ assertAlways(match !== null);
+ }
+
+ let states = {
+ shardCollection: function(db, _, connCache) {
+ // To avoid starvation problems during the execution of the FSM, it is OK to pick
+ // up an already sharded collection.
+ const collName = acquireCollectionName(db, false /*mustBeAlreadyCreated*/);
+ try {
+ jsTestLog(`Beginning shardCollection state for ${collName}`);
+ assertAlways.commandWorked(
+ db.adminCommand({shardCollection: db[collName].getFullName(), key: {_id: 1}}));
+ jsTestLog(`shardCollection state for ${collName} completed`);
+ } catch (e) {
+ throw e;
+ } finally {
+ releaseCollectionName(db, collName);
+ }
+ },
+
+ dropCollection: function(db, _, connCache) {
+ // To avoid starvation problems during the execution of the FSM, it is OK to pick
+ // up an already dropped collection.
+ const collName = acquireCollectionName(db, false /*mustBeAlreadyCreated*/);
+ try {
+ jsTestLog(`Beginning dropCollection state for ${collName}`);
+ // Avoid checking the outcome, as the drop may result into a no-op.
+ db[collName].drop();
+ jsTestLog(`dropCollection state for ${collName} completed`);
+ } catch (e) {
+ throw e;
+ } finally {
+ releaseCollectionName(db, collName, true /*wasDropped*/);
+ }
+ },
+
+ renameCollection: function(db, _, connCache) {
+ const collName = acquireCollectionName(db);
+ const renamedCollName = collName + '_renamed';
+ try {
+ jsTestLog(`Beginning renameCollection state for ${collName}`);
+ assertAlways.commandWorked(db[collName].renameCollection(renamedCollName));
+ // reverse the rename before leaving the state.
+ assertAlways.commandWorked(db[renamedCollName].renameCollection(collName));
+ jsTestLog(`renameCollection state for ${collName} completed`);
+ } catch (e) {
+ throw e;
+ } finally {
+ releaseCollectionName(db, collName);
+ }
+ },
+
+ moveChunk: function(db, _, connCache) {
+ const collName = acquireCollectionName(db);
+ try {
+ jsTestLog(`Beginning moveChunk state for ${collName}`);
+ const collUUID =
+ getConfig(db).collections.findOne({_id: db[collName].getFullName()}).uuid;
+ assertAlways(collUUID);
+ const shards = getConfig(db).shards.find().toArray();
+ const chunkToMove = getConfig(db).chunks.findOne({uuid: collUUID});
+ const destination = shards.filter(
+ s => s._id !==
+ chunkToMove.shard)[Math.floor(Math.random() * (shards.length - 1))];
+ ChunkHelper.moveChunk(
+ db, collName, [chunkToMove.min, chunkToMove.max], destination._id, true);
+ jsTestLog(`moveChunk state for ${collName} completed`);
+ } catch (e) {
+ throw e;
+ } finally {
+ releaseCollectionName(db, collName);
+ }
+ },
+
+ resetPlacementHistory: function(db, collName, connCache) {
+ jsTestLog(`Beginning resetPlacementHistory state`);
+ assertAlways.commandWorked(db.adminCommand({resetPlacementHistory: 1}));
+ jsTestLog(`resetPlacementHistory state completed`);
+ },
+
+ };
+
+ let transitions = {
+ shardCollection: {
+ shardCollection: 0.22,
+ dropCollection: 0.22,
+ renameCollection: 0.22,
+ moveChunk: 0.22,
+ resetPlacementHistory: 0.12
+ },
+ dropCollection: {
+ shardCollection: 0.22,
+ dropCollection: 0.22,
+ renameCollection: 0.22,
+ moveChunk: 0.22,
+ resetPlacementHistory: 0.12
+ },
+ renameCollection: {
+ shardCollection: 0.22,
+ dropCollection: 0.22,
+ renameCollection: 0.22,
+ moveChunk: 0.22,
+ resetPlacementHistory: 0.12
+ },
+ moveChunk: {
+ shardCollection: 0.22,
+ dropCollection: 0.22,
+ renameCollection: 0.22,
+ moveChunk: 0.22,
+ resetPlacementHistory: 0.12
+ },
+ resetPlacementHistory: {
+ shardCollection: 0.22,
+ dropCollection: 0.22,
+ renameCollection: 0.22,
+ moveChunk: 0.22,
+ },
+ };
+
+ let setup = function(db, _, cluster) {
+ this.skipMetadataChecks =
+ // TODO SERVER-70396: remove this flag
+ !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency');
+
+ for (let i = 0; i < numTestCollections; ++i) {
+ db[testCollectionsState].insert({
+ collName: `testColl_${i}`,
+ acquired: false,
+ lastAcquired: new Date(),
+ created: false
+ });
+ }
+
+ db[resetPlacementHistoryState].insert({_id: resetPlacementHistoryStateId, ongoing: false});
+ };
+
+ let teardown = function(db, collName, cluster) {};
+
+ return {
+ threadCount: numThreads,
+ iterations: 32,
+ startState: 'shardCollection',
+ states: states,
+ transitions: transitions,
+ setup: setup,
+ teardown: teardown,
+ passConnectionCache: true
+ };
+})();
diff --git a/jstests/concurrency/fsm_workloads/random_DDL_setFCV_operations.js b/jstests/concurrency/fsm_workloads/random_DDL_setFCV_operations.js
index b6740c8cf89cc..87ee5c7297ed7 100644
--- a/jstests/concurrency/fsm_workloads/random_DDL_setFCV_operations.js
+++ b/jstests/concurrency/fsm_workloads/random_DDL_setFCV_operations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Concurrently performs DDL commands and FCV changes and verifies guarantees are
* not broken.
@@ -13,10 +11,10 @@
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/random_DDL_operations.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_DDL_operations.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.setFCV = function(db, collName, connCache) {
const fcvValues = [lastLTSFCV, lastContinuousFCV, latestFCV];
const targetFCV = fcvValues[Random.randInt(3)];
diff --git a/jstests/concurrency/fsm_workloads/random_internal_transactions_setFCV_operations.js b/jstests/concurrency/fsm_workloads/random_internal_transactions_setFCV_operations.js
index 7e08e5b4e1d60..429bff0ca21f0 100644
--- a/jstests/concurrency/fsm_workloads/random_internal_transactions_setFCV_operations.js
+++ b/jstests/concurrency/fsm_workloads/random_internal_transactions_setFCV_operations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Performs updates that will change a document's shard key across chunks while simultaneously
* changing the FCV.
@@ -13,14 +11,16 @@
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js";
// Transactions that run concurrently with a setFCV may get interrupted due to setFCV issuing for a
// killSession any open sessions during an FCV change. We want to have to retryability support for
// such scenarios.
load('jstests/libs/override_methods/retry_on_killed_session.js');
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Sessions of open transactions can be killed and throw "Interrupted" if we run it concurrently
// with a setFCV command, so we want to be able to catch those as acceptable killSession errors.
$config.data.retryOnKilledSession = true;
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_base.js b/jstests/concurrency/fsm_workloads/random_moveChunk_base.js
index 38644ab1d6333..927f9ae1c0244 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_base.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_base.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Shards a collection by 'skey' and creates one chunk per thread, filling each chunk with
* documents, and assigning each document to a random thread. Meant to be extended by workloads that
@@ -10,10 +8,12 @@
* assumes_balancer_off,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/sharded_base_partitioned.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.threadCount = 1;
$config.iterations = 1;
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js
index bc60f549c7298..293f6b4f0b3d7 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Performs deletes in transactions without the shard key while chunks are being moved. This
* includes multi=true deletes and multi=false deletes with exact _id queries.
@@ -11,11 +9,11 @@
* uses_transactions,
* ];
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js";
load('jstests/concurrency/fsm_workload_helpers/delete_in_transaction_states.js');
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.threadCount = 5;
$config.iterations = 50;
@@ -37,7 +35,7 @@ var $config = extendWorkload($config, function($config, $super) {
exactIdDelete(db, collName, this.session);
};
$config.states.multiDelete = function(db, collName, connCache) {
- multiDelete(db, collName, this.session, this.tid);
+ multiDelete(db, collName, this.session, this.tid, this.partitionSize);
};
$config.states.verifyDocuments = function(db, collName, connCache) {
verifyDocuments(db, collName, this.tid);
@@ -50,7 +48,7 @@ var $config = extendWorkload($config, function($config, $super) {
$config.states.init = function init(db, collName, connCache) {
$super.states.init.apply(this, arguments);
this.session = db.getMongo().startSession({causalConsistency: false});
- initDeleteInTransactionStates(db, collName, this.tid);
+ initDeleteInTransactionStates(db, collName, this.tid, this.partitionSize);
};
$config.transitions = {
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js
index 18557281a315a..e059cb07ed796 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Performs updates in transactions without the shard key while chunks are being moved. This
* includes multi=true updates and multi=false updates with exact _id queries.
@@ -11,11 +9,11 @@
* uses_transactions,
* ];
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js";
load('jstests/concurrency/fsm_workload_helpers/update_in_transaction_states.js');
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.threadCount = 5;
$config.iterations = 50;
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js b/jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js
index e5db4d61d5362..7989de521c1e7 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Performs a series of index operations while chunk migrations are running in the background
* and verifies that indexes are not left in an inconsistent state.
@@ -13,7 +11,7 @@
load("jstests/concurrency/fsm_workload_helpers/chunks.js"); // for chunk helpers
load("jstests/sharding/libs/sharded_index_util.js"); // for findInconsistentIndexesAcrossShards
-var $config = (function() {
+export const $config = (function() {
function threadCollectionName(prefix, tid) {
return prefix + tid;
}
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js b/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js
index cc72d684522ab..16fbc727b08d2 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Performs a series of {multi: true} updates/deletes while moving chunks, and checks that the
* expected change stream events are received and that no events are generated to writes on orphan
@@ -11,10 +9,10 @@
* uses_change_streams
* ];
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.threadCount = 5;
$config.iterations = 50;
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_multiple_collections.js b/jstests/concurrency/fsm_workloads/random_moveChunk_multiple_collections.js
index c9eb00ac7c761..523fb4fde048e 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_multiple_collections.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_multiple_collections.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Perform continuous moveChunk on multiple collections/databases.
*
@@ -9,8 +7,8 @@
* does_not_support_add_remove_shards,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js";
const dbNames = ['db0', 'db1', 'db2'];
const collNames = ['collA', 'collB', 'collC'];
@@ -44,7 +42,7 @@ const runWithManualRetriesIfInStepdownSuite = (fn) => {
}
};
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.threadCount = dbNames.length * collNames.length;
$config.iterations = 64;
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js
index c397518ecc53d..f8b93ae941c41 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Performs updates in transactions without the shard key while chunks are being moved. This
* includes multi=true updates and multi=false updates with exact _id queries.
@@ -11,10 +9,10 @@
* uses_transactions,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.threadCount = 5;
$config.iterations = 50;
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_delete_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_delete_transaction.js
index 85eac01c61871..49a5fb438dff6 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_delete_transaction.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_delete_transaction.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Performs these actions in parallel:
* 1. Refine a collection's shard key.
@@ -19,18 +17,24 @@
* uses_transactions,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js";
load('jstests/concurrency/fsm_workload_helpers/delete_in_transaction_states.js');
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.iterations = 10;
$config.states.exactIdDelete = function(db, collName, connCache) {
exactIdDelete(db, this.getCurrentOrPreviousLatchCollName(collName), this.session);
};
$config.states.multiDelete = function(db, collName, connCache) {
- multiDelete(db, this.getCurrentOrPreviousLatchCollName(collName), this.session, this.tid);
+ multiDelete(db,
+ this.getCurrentOrPreviousLatchCollName(collName),
+ this.session,
+ this.tid,
+ this.partitionSize);
};
$config.states.verifyDocuments = function(db, collName, connCache) {
verifyDocuments(db, this.getCurrentOrPreviousLatchCollName(collName), this.tid);
@@ -48,7 +52,7 @@ var $config = extendWorkload($config, function($config, $super) {
for (let i = this.latchCount; i >= 0; --i) {
const latchCollName = collName + '_' + i;
- initDeleteInTransactionStates(db, latchCollName, this.tid);
+ initDeleteInTransactionStates(db, latchCollName, this.tid, this.partitionSize);
}
};
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_update_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_update_transaction.js
index 984b713d36a25..db31f2f9e9eb8 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_update_transaction.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_update_transaction.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Performs these actions in parallel:
* 1. Refine a collection's shard key.
@@ -19,11 +17,13 @@
* uses_transactions,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js";
load('jstests/concurrency/fsm_workload_helpers/update_in_transaction_states.js');
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.threadCount = 5;
$config.iterations = 10;
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_delete_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_delete_transaction.js
index 9e5df7168f737..22b7c2b5f6a45 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_delete_transaction.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_delete_transaction.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Same as the base workload, but refines to a nested shard key.
*
@@ -15,11 +13,13 @@
* uses_transactions,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load(
- 'jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_delete_transaction.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from
+ "jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_delete_transaction.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.newShardKey = {a: 1, "b.c": 1};
$config.data.newShardKeyFields = ["a", "b.c"];
return $config;
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_update_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_update_transaction.js
index b796aee38e140..8b12af42b2c1b 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_update_transaction.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_update_transaction.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Same as the base workload, but refines to a nested shard key.
*
@@ -15,11 +13,13 @@
* uses_transactions,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load(
- 'jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_update_transaction.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from
+ "jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_update_transaction.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.newShardKey = {a: 1, "b.c": 1};
$config.data.newShardKeyFields = ["a", "b.c"];
return $config;
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js
index 3dd6588ef6838..08f748980dcf3 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js
@@ -9,13 +9,13 @@
* requires_fcv_51,
* ]
*/
-'use strict';
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from 'jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js';
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
-load('jstests/core/timeseries/libs/timeseries.js'); // For 'TimeseriesTest' helpers.
-// Load parent workload for extending below.
-load('jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js');
-
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.generateMetaFieldValueForInitialInserts = () => {
let meta = {};
// Insert a document with a field for every thread to test concurrent deletes of the
@@ -38,30 +38,37 @@ var $config = extendWorkload($config, function($config, $super) {
$config.states.init = function init(db, collName, connCache) {
$super.states.init.call(this, db, collName, connCache);
- this.featureFlagDisabled = this.featureFlagDisabled ||
- !TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(db);
- if (this.featureFlagDisabled) {
- jsTestLog(
- "Skipping executing this test as the requisite feature flags are not enabled.");
- }
+ this.arbitraryDeletesEnabled =
+ FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesDeletesSupport");
};
$config.states.doDelete = function doDelete(db, collName, connCache) {
- if (this.featureFlagDisabled) {
- return;
- }
-
+ // Alternate between filtering on the meta field and filtering on a data field. This will
+ // cover both the timeseries batch delete and arbitrary delete paths.
+ const filterFieldName = !this.arbitraryDeletesEnabled || Random.randInt(2) == 0
+ ? "m.tid" + this.tid
+ : "f.tid" + this.tid;
const filter = {
- m: {
- ["tid" + this.tid]: {
- $gte: Random.randInt($config.data.numMetaCount),
- },
+ [filterFieldName]: {
+ $gte: Random.randInt($config.data.numMetaCount),
},
};
assertAlways.commandWorked(db[collName].deleteMany(filter));
assertAlways.commandWorked(db[this.nonShardCollName].deleteMany(filter));
};
+ $config.data.validateCollection = function validate(db, collName) {
+ // Since we can't use a 'snapshot' read concern for timeseries deletes, deletes on the
+ // sharded collection may not see the exact same records as the non-sharded, so the
+ // validation needs to be more lenient.
+ const count = db[collName].find().itcount();
+ const countNonSharded = db[this.nonShardCollName].find().itcount();
+ assertAlways.gte(
+ count,
+ countNonSharded,
+ "Expected sharded collection to have the same or more records than unsharded");
+ };
+
$config.transitions = {
init: {insert: 1},
insert: {insert: 3, doDelete: 3, moveChunk: 1},
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js
index 4f95d508df058..e43b3c8b53427 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js
@@ -11,14 +11,14 @@
* requires_fcv_51,
* ]
*/
-'use strict';
-
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from 'jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js';
load('jstests/concurrency/fsm_workload_helpers/chunks.js'); // for chunk helpers
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers.
-load("jstests/libs/analyze_plan.js"); // for 'getPlanStages'
-load('jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js');
+import {getPlanStages} from "jstests/libs/analyze_plan.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.nonShardCollName = "unsharded";
// A random non-round start value was chosen so that we can verify the rounding behavior that
@@ -32,12 +32,10 @@ var $config = extendWorkload($config, function($config, $super) {
$config.data.numInitialDocs = 60 * 24 * 30;
$config.data.numMetaCount = 30;
- $config.data.featureFlagDisabled = true;
-
$config.data.bucketPrefix = "system.buckets.";
- $config.data.timeField = 't';
$config.data.metaField = 'm';
+ $config.data.timeField = 't';
$config.data.generateMetaFieldValueForInitialInserts = () => {
return Math.floor(Random.rand() * $config.data.numMetaCount);
@@ -52,18 +50,16 @@ var $config = extendWorkload($config, function($config, $super) {
$config.startState = "init";
$config.states.insert = function insert(db, collName, connCache) {
- if (this.featureFlagDisabled) {
- return;
- }
-
for (let i = 0; i < 10; i++) {
// Generate a random timestamp between 'startTime' and largest timestamp we inserted.
const timer =
this.startTime + Math.floor(Random.rand() * this.numInitialDocs * this.increment);
+ const metaVal = this.generateMetaFieldValueForInsertStage(this.tid);
const doc = {
_id: new ObjectId(),
+ [this.metaField]: metaVal,
[this.timeField]: new Date(timer),
- [this.metaField]: this.generateMetaFieldValueForInsertStage(this.tid),
+ f: metaVal,
};
assertAlways.commandWorked(db[collName].insert(doc));
assertAlways.commandWorked(db[this.nonShardCollName].insert(doc));
@@ -74,10 +70,6 @@ var $config = extendWorkload($config, function($config, $super) {
* Moves a random chunk in the target collection.
*/
$config.states.moveChunk = function moveChunk(db, collName, connCache) {
- if (this.featureFlagDisabled) {
- return;
- }
-
const configDB = db.getSiblingDB('config');
const ns = db[this.bucketPrefix + collName].getFullName();
const chunks = findChunksUtil.findChunksByNs(configDB, ns).toArray();
@@ -100,11 +92,7 @@ var $config = extendWorkload($config, function($config, $super) {
waitForDelete);
};
- $config.states.init = function init(db, collName, connCache) {
- if (TimeseriesTest.shardedtimeseriesCollectionsEnabled(db.getMongo())) {
- this.featureFlagDisabled = false;
- }
- };
+ $config.states.init = function init(db, collName, connCache) {};
$config.transitions = {
init: {insert: 1},
@@ -112,31 +100,33 @@ var $config = extendWorkload($config, function($config, $super) {
moveChunk: {insert: 1, moveChunk: 0}
};
- $config.teardown = function teardown(db, collName, cluster) {
- if (this.featureFlagDisabled) {
- return;
- }
+ $config.data.validateCollection = function validate(db, collName) {
+ const pipeline =
+ [{$project: {_id: "$_id", m: "$m", t: "$t"}}, {$sort: {m: 1, t: 1, _id: 1}}];
+ const diff = DataConsistencyChecker.getDiff(db[collName].aggregate(pipeline),
+ db[this.nonShardCollName].aggregate(pipeline));
+ assertAlways.eq(
+ diff, {docsWithDifferentContents: [], docsMissingOnFirst: [], docsMissingOnSecond: []});
+ };
+ $config.teardown = function teardown(db, collName, cluster) {
const numBuckets = db[this.bucketPrefix + collName].find({}).itcount();
const numInitialDocs = db[collName].find().itcount();
jsTestLog("NumBuckets " + numBuckets + ", numDocs on sharded cluster" +
db[collName].find().itcount() + "numDocs on unsharded collection " +
db[this.nonShardCollName].find({}).itcount());
- const pipeline =
- [{$project: {_id: "$_id", m: "$m", t: "$t"}}, {$sort: {m: 1, t: 1, _id: 1}}];
- const diff = DataConsistencyChecker.getDiff(db[collName].aggregate(pipeline),
- db[this.nonShardCollName].aggregate(pipeline));
- assertAlways.eq(
- diff, {docsWithDifferentContents: [], docsMissingOnFirst: [], docsMissingOnSecond: []});
+
+ // Validate the contents of the collection.
+ this.validateCollection(db, collName);
// Make sure that queries using various indexes on time-series buckets collection return
// buckets with all documents.
const verifyBucketIndex = (bucketIndex) => {
const unpackStage = {
"$_internalUnpackBucket": {
- "timeField": this.timeField,
"metaField": this.metaField,
+ "timeField": this.timeField,
"bucketMaxSpanSeconds": NumberInt(3600)
}
};
@@ -157,17 +147,11 @@ var $config = extendWorkload($config, function($config, $super) {
};
$config.setup = function setup(db, collName, cluster) {
- if (TimeseriesTest.shardedtimeseriesCollectionsEnabled(db.getMongo())) {
- this.featureFlagDisabled = false;
- } else {
- return;
- }
-
db[collName].drop();
db[this.nonShardCollName].drop();
assertAlways.commandWorked(db.createCollection(
- collName, {timeseries: {timeField: this.timeField, metaField: this.metaField}}));
+ collName, {timeseries: {metaField: this.metaField, timeField: this.timeField}}));
cluster.shardCollection(db[collName], {t: 1}, false);
// Create indexes to verify index integrity during the teardown state.
@@ -185,10 +169,12 @@ var $config = extendWorkload($config, function($config, $super) {
for (let i = 0; i < this.numInitialDocs; ++i) {
currentTimeStamp += this.increment;
+ const metaVal = this.generateMetaFieldValueForInitialInserts(i);
const doc = {
_id: new ObjectId(),
+ [this.metaField]: metaVal,
[this.timeField]: new Date(currentTimeStamp),
- [this.metaField]: this.generateMetaFieldValueForInitialInserts(i),
+ f: metaVal,
};
bulk.insert(doc);
bulkUnsharded.insert(doc);
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_updates.js b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_updates.js
index 7046ebac1edef..5cac3c4aa9ed5 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_updates.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_updates.js
@@ -11,23 +11,15 @@
* requires_fcv_51,
* ]
*/
-'use strict';
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from 'jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js';
const numValues = 10;
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers.
-load('jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js');
-
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.init = function(db, collName, connCache) {
- if (TimeseriesTest.shardedtimeseriesCollectionsEnabled(db) &&
- TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(db)) {
- this.featureFlagDisabled = false;
- } else {
- jsTestLog(
- "Skipping executing this test as the requisite feature flags are not enabled.");
- }
-
$super.states.init(db, collName);
};
@@ -46,26 +38,27 @@ var $config = extendWorkload($config, function($config, $super) {
};
$config.states.update = function(db, collName, connCache) {
- if (this.featureFlagDisabled) {
- return;
- }
-
const shardedColl = db[collName];
- const unshardedColl = db[this.nonShardCollName];
- const updateField = "tid" + this.tid;
+ const updateField = this.metaField + ".tid" + this.tid;
const oldValue = Random.randInt(numValues);
// Updates some measurements along the field owned by this thread in both sharded and
// unsharded ts collections.
jsTestLog("Executing update state on: " + collName + " on field " + updateField);
- assertAlways.commandWorked(
- shardedColl.update({[this.metaField]: {[updateField]: {$gte: oldValue}}},
- {$inc: {[this.metaField + "." + updateField]: 1}},
- {multi: true}));
- assertAlways.commandWorked(
- unshardedColl.update({[this.metaField]: {[updateField]: {$gte: oldValue}}},
- {$inc: {[this.metaField + "." + updateField]: 1}},
- {multi: true}));
+ assertAlways.commandWorked(shardedColl.update(
+ {[updateField]: {$gte: oldValue}}, {$inc: {[updateField]: 1}}, {multi: true}));
+ };
+
+ $config.data.validateCollection = function validate(db, collName) {
+ // Since we can't use a 'snapshot' read concern for timeseries updates, updates on the
+ // sharded collection may not see the exact same records as the non-sharded, so the
+ // validation needs to be more lenient.
+ const count = db[collName].find().itcount();
+ const countNonSharded = db[this.nonShardCollName].find().itcount();
+ assertAlways.eq(
+ count,
+ countNonSharded,
+ "Expected sharded collection to have the same number of records as unsharded");
};
$config.transitions = {
@@ -75,5 +68,14 @@ var $config = extendWorkload($config, function($config, $super) {
moveChunk: {insert: 0.4, moveChunk: 0.1, update: 0.5},
};
+ // Reduced iteration and document counts to avoid timeouts.
+ $config.iterations = 20;
+
+ // Five minutes.
+ $config.data.increment = 1000 * 60 * 5;
+
+ // This should generate documents for a span of one month.
+ $config.data.numInitialDocs = 12 * 24 * 30;
+
return $config;
});
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js
index cd7831128f7bf..26fa4f8be1c1d 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Performs updates that will change a document's shard key while migrating chunks. Uses both
* retryable writes and multi-statement transactions.
@@ -10,11 +8,11 @@
* uses_transactions,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js";
load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js');
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.threadCount = 5;
$config.iterations = 50;
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key_kill_sessions.js b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key_kill_sessions.js
index 1f258d7614deb..f769dd4563bab 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key_kill_sessions.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key_kill_sessions.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Performs updates that will change a document's shard key while migrating chunks and killing
* sessions. Only runs updates that cause a document to change shards to increase the odds of
@@ -11,9 +9,11 @@
* uses_transactions,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js";
load('jstests/concurrency/fsm_workload_helpers/kill_session.js'); // for killSession
-load('jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js');
load('jstests/libs/override_methods/retry_on_killed_session.js');
// By default retry_on_killed_session.js will only retry known retryable operations like reads and
@@ -21,7 +21,7 @@ load('jstests/libs/override_methods/retry_on_killed_session.js');
// into always retrying killed operations.
TestData.alwaysRetryOnKillSessionErrors = true;
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.retryOnKilledSession = true;
// The base workload uses connCache, so wrap killSessions so the fsm runner doesn't complain
diff --git a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_crud_ops.js b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_crud_ops.js
index 87b71758213d8..ea01ba0808e81 100644
--- a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_crud_ops.js
+++ b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_crud_ops.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Runs refineCollectionShardKey and CRUD operations concurrently.
*
@@ -8,7 +6,7 @@
load('jstests/libs/parallelTester.js');
-var $config = (function() {
+export const $config = (function() {
// The organization of documents in every collection is as follows:
//
// (i) Reserved for find: {tid: tid, a: 0, b: 0} -->> {tid: tid, a: 24, b: 24}
diff --git a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_crud_ops.js b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_crud_ops.js
index fc8181fda9b8f..7f70e9d13f5e0 100644
--- a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_crud_ops.js
+++ b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_crud_ops.js
@@ -1,15 +1,15 @@
-'use strict';
-
/**
* Same as the base workload, but refines to a nested shard key.
*
* @tags: [requires_persistence, requires_sharding]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/refine_collection_shard_key_crud_ops.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/refine_collection_shard_key_crud_ops.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
// Note the base workload assumes this is the nested key when constructing the crud ops.
$config.data.newShardKey = {a: 1, "b.c": 1};
$config.data.usingNestedKey = true;
diff --git a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_zone_ops.js b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_zone_ops.js
index 5123475ecbfbf..535bfa67eeea4 100644
--- a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_zone_ops.js
+++ b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_zone_ops.js
@@ -1,15 +1,15 @@
-'use strict';
-
/**
* Same as the base workload, but refines to a nested shard key.
*
* @tags: [requires_persistence, requires_sharding]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/refine_collection_shard_key_zone_ops.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/refine_collection_shard_key_zone_ops.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.newShardKey = {a: 1, "b.c": 1};
$config.data.newShardKeyFields = ["a", "b.c"];
return $config;
diff --git a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_zone_ops.js b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_zone_ops.js
index 9323b64edb1ca..79fb1416fa1a6 100644
--- a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_zone_ops.js
+++ b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_zone_ops.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Runs refineCollectionShardKey and zone operations concurrently.
*
@@ -20,7 +18,7 @@
load('jstests/libs/parallelTester.js');
-var $config = (function() {
+export const $config = (function() {
var data = {
oldShardKeyField: 'a',
newShardKeyFields: ['a', 'b'],
diff --git a/jstests/concurrency/fsm_workloads/reindex.js b/jstests/concurrency/fsm_workloads/reindex.js
index 1fe4393fe35e7..ce62e01798985 100644
--- a/jstests/concurrency/fsm_workloads/reindex.js
+++ b/jstests/concurrency/fsm_workloads/reindex.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* reindex.js
*
@@ -10,7 +8,7 @@
*/
load("jstests/concurrency/fsm_workload_helpers/assert_handle_fail_in_transaction.js");
-var $config = (function() {
+export const $config = (function() {
var data = {
nIndexes: 4 + 1, // 4 created and 1 for _id.
nDocumentsToInsert: 1000,
diff --git a/jstests/concurrency/fsm_workloads/reindex_background.js b/jstests/concurrency/fsm_workloads/reindex_background.js
index 575cf89400eba..336edc1a2638a 100644
--- a/jstests/concurrency/fsm_workloads/reindex_background.js
+++ b/jstests/concurrency/fsm_workloads/reindex_background.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* reindex_background.js
*
@@ -11,24 +9,23 @@
* @tags: [SERVER-40561, creates_background_indexes]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/reindex.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/reindex.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.prefix = 'reindex_background';
$config.states.createIndexes = function createIndexes(db, collName) {
const coll = db[this.threadCollName];
- const options = {background: true};
// The number of indexes created here is also stored in data.nIndexes.
- assertWorkedHandleTxnErrors(coll.createIndex({text: 'text'}, options),
+ assertWorkedHandleTxnErrors(coll.createIndex({text: 'text'}),
ErrorCodes.IndexBuildAlreadyInProgress);
- assertWorkedHandleTxnErrors(coll.createIndex({geo: '2dsphere'}, options),
+ assertWorkedHandleTxnErrors(coll.createIndex({geo: '2dsphere'}),
ErrorCodes.IndexBuildAlreadyInProgress);
- assertWorkedHandleTxnErrors(coll.createIndex({integer: 1}, options),
+ assertWorkedHandleTxnErrors(coll.createIndex({integer: 1}),
ErrorCodes.IndexBuildAlreadyInProgress);
- assertWorkedHandleTxnErrors(coll.createIndex({"$**": 1}, options),
+ assertWorkedHandleTxnErrors(coll.createIndex({"$**": 1}),
ErrorCodes.IndexBuildAlreadyInProgress);
};
diff --git a/jstests/concurrency/fsm_workloads/reindex_writeconflict.js b/jstests/concurrency/fsm_workloads/reindex_writeconflict.js
index 36818b04fe0df..20d5f3533e29f 100644
--- a/jstests/concurrency/fsm_workloads/reindex_writeconflict.js
+++ b/jstests/concurrency/fsm_workloads/reindex_writeconflict.js
@@ -1,11 +1,9 @@
-'use strict';
-
/**
* reindex_writeconflict.js
*
* Ensures reIndex successfully handles WriteConflictExceptions.
*/
-var $config = (function() {
+export const $config = (function() {
var states = {
reIndex: function reIndex(db, collName) {
var res = db[collName].reIndex();
diff --git a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
index 8ef93b538c9d4..1abb9530460c0 100644
--- a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
+++ b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* remove_and_bulk_insert.js
*
@@ -9,7 +7,7 @@
* This workload was designed to reproduce SERVER-20512, where a record in an evicted page was
* accessed after a WriteConflictException occurred in Collection::deleteDocument().
*/
-var $config = (function() {
+export const $config = (function() {
var states = {
insert: function insert(db, collName) {
var bulk = db[collName].initializeUnorderedBulkOp();
diff --git a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
index c349dc2087487..3ddbebd3ea097 100644
--- a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
+++ b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* remove_multiple_documents.js
*
@@ -12,7 +10,7 @@
*
* @tags: [assumes_balancer_off]
*/
-var $config = (function() {
+export const $config = (function() {
var states = {
init: function init(db, collName) {
this.numDocs = 200;
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document.js b/jstests/concurrency/fsm_workloads/remove_single_document.js
index ce5660c9fe065..148d087a3e367 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* remove_single_document.js
*
@@ -7,7 +5,7 @@
*
* @tags: [assumes_balancer_off]
*/
-var $config = (function() {
+export const $config = (function() {
var states = {
remove: function remove(db, collName) {
// try removing a random document
diff --git a/jstests/concurrency/fsm_workloads/remove_where.js b/jstests/concurrency/fsm_workloads/remove_where.js
index 3565a77d62f00..e342a775644d6 100644
--- a/jstests/concurrency/fsm_workloads/remove_where.js
+++ b/jstests/concurrency/fsm_workloads/remove_where.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* remove_where.js
*
@@ -13,10 +11,10 @@
* @tags: [assumes_balancer_off]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_where.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.randomBound = 10;
$config.data.generateDocumentToInsert = function generateDocumentToInsert() {
return {tid: this.tid, x: Random.randInt(this.randomBound)};
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
index 1725314a07704..865cf85ebaff0 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* rename_capped_collection_chain.js
*
@@ -10,7 +8,7 @@
* @tags: [requires_capped]
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
index 5110802cd20aa..bdb080f60054b 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* rename_capped_collection_dbname_chain.js
*
@@ -14,7 +12,7 @@
* ]
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
index 8933f45b12474..026e31b92c049 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* rename_capped_collection_dbname_droptarget.js
*
@@ -14,7 +12,7 @@
* ]
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
index f27afe5120802..b724766c5a7fc 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* rename_capped_collection_droptarget.js
*
@@ -10,7 +8,7 @@
* @tags: [requires_capped]
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_chain.js
index c9a34ce850a34..fe54fe130238d 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_chain.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* rename_collection_chain.js
*
@@ -12,7 +10,7 @@
* ]
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
index e12bc1e4eef05..1c66aad7e48ba 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* rename_collection_dbname_chain.js
*
@@ -13,7 +11,7 @@
* ]
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
index 51b6a82ec9d7a..f718698ad61d8 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* rename_collection_dbname_droptarget.js
*
@@ -12,7 +10,7 @@
* ]
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
index 9888f6a040d84..98098d84e7064 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* rename_collection_droptarget.js
*
@@ -12,7 +10,7 @@
* ]
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the collection name,
// since the workload name is assumed to be unique.
diff --git a/jstests/concurrency/fsm_workloads/rename_sharded_collection.js b/jstests/concurrency/fsm_workloads/rename_sharded_collection.js
index 49152c3dcbbc1..594dcc1a519da 100644
--- a/jstests/concurrency/fsm_workloads/rename_sharded_collection.js
+++ b/jstests/concurrency/fsm_workloads/rename_sharded_collection.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Perform continuous renames on 3 collections per database, with the objective to verify that:
* - Upon successful renames, no data are lost
@@ -97,7 +95,7 @@ function checkExceptionHasBeenThrown(db, exceptionCode) {
assert.gte(count, 1, 'No exception with error code ' + exceptionCode + ' has been thrown');
}
-var $config = (function() {
+export const $config = (function() {
let states = {
rename: function(db, collName, connCache) {
const dbName = getRandomDbName(this.threadCount);
diff --git a/jstests/concurrency/fsm_workloads/reshard_collection_crud_ops.js b/jstests/concurrency/fsm_workloads/reshard_collection_crud_ops.js
index 28a0fb258a5de..a39ab11fe0fef 100644
--- a/jstests/concurrency/fsm_workloads/reshard_collection_crud_ops.js
+++ b/jstests/concurrency/fsm_workloads/reshard_collection_crud_ops.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* Runs reshardCollection and CRUD operations concurrently.
*
* @tags: [requires_sharding]
*/
-var $config = (function() {
+export const $config = (function() {
const shardKeys = [
{a: 1},
{b: 1},
diff --git a/jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js b/jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js
index 4e6d69da704d2..a088972dfdebc 100644
--- a/jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js
+++ b/jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js
@@ -11,7 +11,7 @@
"use strict";
-var $config = (function() {
+export const $config = (function() {
function setup(db, collName) {
for (let i = 0; i < 200; ++i) {
assertAlways.commandWorked(
diff --git a/jstests/concurrency/fsm_workloads/secondary_reads.js b/jstests/concurrency/fsm_workloads/secondary_reads.js
index 403773c2e4a45..1d6498f0ab5a0 100644
--- a/jstests/concurrency/fsm_workloads/secondary_reads.js
+++ b/jstests/concurrency/fsm_workloads/secondary_reads.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* secondary_reads.js
*
@@ -20,7 +18,7 @@
* @tags: [requires_replication, uses_write_concern]
*/
-var $config = (function() {
+export const $config = (function() {
// Use the workload name as the collection name.
var uniqueCollectionName = 'secondary_reads';
diff --git a/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js b/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js
index b3d7224a40d48..067a5721e2ccb 100644
--- a/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js
+++ b/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js
@@ -1,8 +1,3 @@
-'use strict';
-
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/secondary_reads.js'); // for $config
-
/**
* secondary_reads_with_catalog_changes.js
*
@@ -27,7 +22,11 @@ load('jstests/concurrency/fsm_workloads/secondary_reads.js'); // for $config
* uses_write_concern,
* ]
*/
-var $config = extendWorkload($config, function($config, $super) {
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/secondary_reads.js";
+load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
+
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.buildIndex = function buildIndex(db, spec) {
// Index must be built eventually.
assertWhenOwnColl.soon(() => {
diff --git a/jstests/concurrency/fsm_workloads/server_status.js b/jstests/concurrency/fsm_workloads/server_status.js
index 0c95bfe0ff371..d0ffe0403ee8c 100644
--- a/jstests/concurrency/fsm_workloads/server_status.js
+++ b/jstests/concurrency/fsm_workloads/server_status.js
@@ -1,11 +1,9 @@
-'use strict';
-
/**
* server_status.js
*
* Simply checks that the serverStatus command works
*/
-var $config = (function() {
+export const $config = (function() {
var states = {
status: function status(db, collName) {
var opts =
diff --git a/jstests/concurrency/fsm_workloads/server_status_with_time_out_cursors.js b/jstests/concurrency/fsm_workloads/server_status_with_time_out_cursors.js
index 5d60c79b58054..8b342a79b2a60 100644
--- a/jstests/concurrency/fsm_workloads/server_status_with_time_out_cursors.js
+++ b/jstests/concurrency/fsm_workloads/server_status_with_time_out_cursors.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Run serverStatus() while running a large number of queries which are expected to reach maxTimeMS
* and time out.
@@ -12,7 +10,7 @@
*/
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
-var $config = (function() {
+export const $config = (function() {
const states = {
/**
* This is a no-op, used only as a transition state.
diff --git a/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
index a466f3bb18ba9..b8ebca24fe281 100644
--- a/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Provides an init state that partitions the data space into chunks evenly across threads.
*
@@ -27,7 +25,7 @@
load('jstests/concurrency/fsm_workload_helpers/chunks.js'); // for chunk helpers
load("jstests/sharding/libs/find_chunks_util.js");
-var $config = (function() {
+export const $config = (function() {
var data = {
partitionSize: 1,
// We use a non-hashed shard key of { _id: 1 } so that documents reside on their expected
diff --git a/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
index 5b29d413bbf04..9fa5ad5fb8138 100644
--- a/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Extends sharded_base_partitioned.js.
*
@@ -12,11 +10,13 @@
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/sharded_base_partitioned.js";
load("jstests/sharding/libs/find_chunks_util.js");
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.iterations = 8;
$config.threadCount = 5;
diff --git a/jstests/concurrency/fsm_workloads/sharded_mergeSplitChunks_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_mergeSplitChunks_partitioned.js
index 9ce7dc23a7948..1d5247b9fe2f6 100644
--- a/jstests/concurrency/fsm_workloads/sharded_mergeSplitChunks_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_mergeSplitChunks_partitioned.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Extends sharded_mergeChunks_partitioned.js.
*
@@ -9,12 +7,14 @@
* @tags: [requires_sharding, assumes_balancer_off, does_not_support_stepdowns]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js";
load("jstests/sharding/libs/find_chunks_util.js");
load("jstests/concurrency/fsm_workload_helpers/state_transition_utils.js");
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.iterations = 6;
$config.threadCount = 5;
diff --git a/jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js
index d5313a25776df..e3983380abd3e 100644
--- a/jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Extends sharded_base_partitioned.js.
*
@@ -12,10 +10,12 @@
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/sharded_base_partitioned.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.iterations = 5;
$config.threadCount = 5;
diff --git a/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js
index 2f20353213824..746380a2e4367 100644
--- a/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js
+++ b/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Extends sharded_base_partitioned.js.
*
@@ -12,10 +10,12 @@
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/sharded_base_partitioned.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.iterations = 5;
$config.threadCount = 5;
diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_crud_operations.js b/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_crud_operations.js
index 78cc71e87a7f3..635a70fb20ebe 100644
--- a/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_crud_operations.js
+++ b/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_crud_operations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Perform point-in-time snapshot reads that span a 'find' and multiple 'getmore's concurrently with
* CRUD operations, after initial insert operations. This tests that the effects of concurrent CRUD
@@ -17,7 +15,8 @@
*/
load('jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js');
-var $config = (function() {
+
+export const $config = (function() {
const data = {numIds: 100, numDocsToInsertPerThread: 5, batchSize: 10};
const states = {
diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_ddl_operations.js b/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_ddl_operations.js
index 9f3ae16a19eb9..e6eda9a57b820 100644
--- a/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_ddl_operations.js
+++ b/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_ddl_operations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Perform point-in-time snapshot reads that span a 'find' and multiple 'getmore's concurrently with
* CRUD operations.
@@ -13,7 +11,8 @@
*/
load('jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js');
-var $config = (function() {
+
+export const $config = (function() {
const data = {numIds: 100, numDocsToInsertPerThread: 5, batchSize: 10};
const states = {
@@ -78,7 +77,7 @@ var $config = (function() {
},
createIndex: function createIndex(db, collName) {
- db[collName].createIndex({a: 1}, {background: true});
+ db[collName].createIndex({a: 1});
},
dropIndex: function dropIndex(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js b/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js
index 5b89d01d29fdf..253c53a089302 100644
--- a/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js
+++ b/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Perform snapshot reads that span a find and a getmore concurrently with CRUD operations. The
* snapshot reads and CRUD operations will all contend for locks on db and collName. Since the
@@ -11,7 +9,8 @@
*/
load('jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js');
-var $config = (function() {
+
+export const $config = (function() {
const data = {numIds: 100, numDocsToInsertPerThread: 5, valueToBeInserted: 1, batchSize: 50};
const states = {
@@ -106,7 +105,7 @@ var $config = (function() {
},
createIndex: function createIndex(db, collName) {
- db[collName].createIndex({value: 1}, {background: true});
+ db[collName].createIndex({value: 1});
},
dropIndex: function dropIndex(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js b/jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js
index fbf9de53ad8b3..94328bac6648d 100644
--- a/jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js
+++ b/jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Test a snapshot read spanning a find and getmore that runs concurrently with
* killOp and txnNumber change.
@@ -9,10 +7,12 @@
* @tags: [uses_transactions, state_functions_share_transaction]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.transitions = {
init: {snapshotFind: 1.0},
snapshotFind: {incrementTxnNumber: 0.33, killOp: 0.34, snapshotGetMore: 0.33},
diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js b/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
index ccefd552a3f0b..fedbca1fc7f69 100644
--- a/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
+++ b/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Test a snapshot read spanning a find and getmore that runs concurrently with killSessions,
* killOp, killCursors, and txnNumber change.
@@ -9,7 +7,7 @@
load('jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js');
-var $config = (function() {
+export const $config = (function() {
const data = {numIds: 100, batchSize: 50};
const states = {
diff --git a/jstests/concurrency/fsm_workloads/timeseries_agg_out.js b/jstests/concurrency/fsm_workloads/timeseries_agg_out.js
new file mode 100644
index 0000000000000..fc4b194fc31c1
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/timeseries_agg_out.js
@@ -0,0 +1,125 @@
+/**
+ * This test runs many concurrent aggregations using $out, writing to the same time-series
+ * collection. While this is happening, other threads may be creating or dropping indexes, changing
+ * the collection options, or sharding the collection. We expect an aggregate with a $out stage to
+ * fail if another client executed one of these changes between the creation of $out's temporary
+ * collection and the eventual rename to the target collection.
+ *
+ * Unfortunately, there aren't very many assertions we can make here, so this is mostly to test that
+ * the server doesn't deadlock or crash, and that temporary namespaces are cleaned up.
+ *
+ * @tags: [
+ * requires_timeseries,
+ * does_not_support_transactions,
+ * does_not_support_stepdowns,
+ * requires_fcv_71,
+ * featureFlagAggOutTimeseries
+ * ]
+ */
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from 'jstests/concurrency/fsm_workloads/agg_out.js';
+
+export const $config = extendWorkload($baseConfig, function($config, $super) {
+ const timeFieldName = 'time';
+ const metaFieldName = 'tag';
+ const numDocs = 100;
+ $config.data.outputCollName = 'timeseries_agg_out';
+ $config.data.shardKey = {[metaFieldName]: 1};
+
+ /**
+ * Runs an aggregate with a $out with time-series into '$config.data.outputCollName'.
+ */
+ $config.states.query = function query(db, collName) {
+ const res = db[collName].runCommand({
+ aggregate: collName,
+ pipeline: [
+ {$set: {"time": new Date()}},
+ {
+ $out: {
+ db: db.getName(),
+ coll: this.outputCollName,
+ timeseries: {timeField: timeFieldName, metaField: metaFieldName}
+ }
+ }
+ ],
+ cursor: {}
+ });
+
+ const allowedErrorCodes = [
+ ErrorCodes.CommandFailed, // indexes of target collection changed during processing.
+ ErrorCodes.IllegalOperation, // $out is not supported to an existing *sharded* output
+ // collection.
+ 17152, // namespace is capped so it can't be used for $out.
+ 28769, // $out collection cannot be sharded.
+ ErrorCodes.NamespaceExists, // $out tries to create a view when a buckets collection
+ // already exists. This error is not caught because the
+ // view is being dropped by a previous thread.
+ ];
+ assertWhenOwnDB.commandWorkedOrFailedWithCode(res, allowedErrorCodes);
+ if (res.ok) {
+ const cursor = new DBCommandCursor(db, res);
+ assertAlways.eq(0, cursor.itcount()); // No matter how many documents were in the
+ // original input stream, $out should never return any results.
+ }
+ };
+
+ /**
+ * Changes the 'expireAfterSeconds' value for the time-series collection.
+ */
+ $config.states.collMod = function collMod(db, unusedCollName) {
+ let expireAfterSeconds = "off";
+ if (Random.rand() < 0.5) {
+ // Change the expireAfterSeconds
+ expireAfterSeconds = Random.rand();
+ }
+
+ assertWhenOwnDB.commandWorkedOrFailedWithCode(
+ db.runCommand({collMod: this.outputCollName, expireAfterSeconds: expireAfterSeconds}),
+ [ErrorCodes.ConflictingOperationInProgress, ErrorCodes.NamespaceNotFound]);
+ };
+
+ /**
+ * 'convertToCapped' should always fail with a 'CommandNotSupportedOnView' error.
+ */
+ $config.states.convertToCapped = function convertToCapped(db, unusedCollName) {
+ if (isMongos(db)) {
+ return; // convertToCapped can't be run against a mongos.
+ }
+ assertWhenOwnDB.commandFailedWithCode(
+ db.runCommand({convertToCapped: this.outputCollName, size: 100000}),
+ ErrorCodes.CommandNotSupportedOnView);
+ };
+
+ $config.teardown = function teardown(db) {
+ const collNames = db.getCollectionNames();
+ // Ensure that a temporary collection is not left behind.
+ assertAlways.eq(db.getCollectionNames()
+ .filter(col => col.includes('system.buckets.tmp.agg_out'))
+ .length,
+ 0);
+
+ // Ensure that for the buckets collection there is a corresponding view.
+ assertAlways(!(collNames.includes('system.buckets.timeseries_agg_out') &&
+ !collNames.includes('timeseries_agg_out')));
+ };
+
+ /**
+ * Create a time-series collection and insert 100 documents.
+ */
+ $config.setup = function setup(db, collName, cluster) {
+ db[collName].drop();
+ assertWhenOwnDB.commandWorked(db.createCollection(
+ collName, {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
+ const docs = [];
+ for (let i = 0; i < numDocs; ++i) {
+ docs.push({
+ [timeFieldName]: ISODate(),
+ [metaFieldName]: (this.tid * numDocs) + i,
+ });
+ }
+ assertWhenOwnDB.commandWorked(
+ db.runCommand({insert: collName, documents: docs, ordered: false}));
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js b/jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
new file mode 100644
index 0000000000000..bf1c01c5f2b94
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
@@ -0,0 +1,93 @@
+/**
+ * Tests $out stage of aggregate command with time-series collections concurrently with killOp.
+ * Ensures that all the temporary collections created during the aggregate command are deleted and
+ * that all buckets collection have a corresponding view. This workloads extends
+ * 'agg_out_interrupt_cleanup'.
+ *
+ * @tags: [
+ * requires_timeseries,
+ * does_not_support_transactions,
+ * does_not_support_stepdowns,
+ * uses_curop_agg_stage,
+ * requires_fcv_71,
+ * featureFlagAggOutTimeseries
+ * ]
+ */
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js";
+
+export const $config = extendWorkload($baseConfig, function($config, $super) {
+ const timeFieldName = 'time';
+ const metaFieldName = 'tag';
+ const numDocs = 100;
+
+ $config.states.aggregate = function aggregate(db, collName) {
+ // drop the view to ensure that each time a buckets collection is made, the view will also
+ // be made or both be destroyed.
+ assert(db["interrupt_temp_out"].drop());
+ // $out to the same collection so that concurrent aggregate commands would cause congestion.
+ db[collName].runCommand({
+ aggregate: collName,
+ pipeline: [{
+ $out: {
+ db: db.getName(),
+ coll: "interrupt_temp_out",
+ timeseries: {timeField: timeFieldName, metaField: metaFieldName}
+ }
+ }],
+ cursor: {}
+ });
+ };
+
+ $config.states.killOp = function killOp(db, collName) {
+ // The aggregate command could be running different commands internally (renameCollection,
+ // insertDocument, etc.) depending on which stage of execution it is in. So, get all the
+ // operations that are running against the input, output or temp collections.
+ $super.data.killOpsMatchingFilter(db, {
+ op: "command",
+ active: true,
+ $or: [
+ {"ns": db.getName() + ".interrupt_temp_out"}, // For the view.
+ {"ns": db.getName() + "." + collName}, // For input collection.
+ // For the tmp collection.
+ {"ns": {$regex: "^" + db.getName() + "\.system.buckets\.tmp\.agg_out.*"}}
+ ],
+ "command.drop": {
+ $exists: false
+ } // Exclude 'drop' command from the filter to make sure that we don't kill the the
+ // drop command which is responsible for dropping the temporary collection.
+ });
+ };
+
+ $config.teardown = function teardown(db) {
+ const collNames = db.getCollectionNames();
+ // Ensure that a temporary collection is not left behind.
+ assertAlways.eq(
+ collNames.filter(coll => coll.includes('system.buckets.tmp.agg_out')).length, 0);
+
+ // Ensure that for the buckets collection there is a corresponding view.
+ assertAlways(!(collNames.includes('system.buckets.interrupt_temp_out') &&
+ !collNames.includes('interrupt_temp_out')));
+ };
+
+ /**
+ * Create a time-series collection and insert 100 documents.
+ */
+ $config.setup = function setup(db, collName, cluster) {
+ db[collName].drop();
+ assert.commandWorked(db.createCollection(
+ collName, {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
+ const docs = [];
+ for (let i = 0; i < numDocs; ++i) {
+ docs.push({
+ [timeFieldName]: ISODate(),
+ [metaFieldName]: (this.tid * numDocs) + i,
+ });
+ }
+ assert.commandWorked(db.runCommand({insert: collName, documents: docs, ordered: false}));
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/timeseries_collmod_granularity_update.js b/jstests/concurrency/fsm_workloads/timeseries_collmod_granularity_update.js
index d406cd34802d9..67efd3dfb9ee6 100644
--- a/jstests/concurrency/fsm_workloads/timeseries_collmod_granularity_update.js
+++ b/jstests/concurrency/fsm_workloads/timeseries_collmod_granularity_update.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Tests read and write operations concurrent with granularity updates on sharded time-series
* collection.
@@ -11,9 +9,9 @@
* ]
*/
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
-var $config = (function() {
+export const $config = (function() {
const shardedCollName = i => `sharded_${i}`;
const unshardedCollName = i => `unsharded_${i}`;
const collCount = 50;
diff --git a/jstests/concurrency/fsm_workloads/timeseries_deletes_and_inserts.js b/jstests/concurrency/fsm_workloads/timeseries_deletes_and_inserts.js
index c766b42265ada..fdca16127bec1 100644
--- a/jstests/concurrency/fsm_workloads/timeseries_deletes_and_inserts.js
+++ b/jstests/concurrency/fsm_workloads/timeseries_deletes_and_inserts.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* timeseries_deletes_and_inserts.js
*
@@ -17,7 +15,7 @@
* ]
*/
-var $config = (function() {
+export const $config = (function() {
const data = {
logColl: "deletes_and_inserts_log",
nReadingsPerSensor: 100,
diff --git a/jstests/concurrency/fsm_workloads/timeseries_insert_idle_bucket_expiration.js b/jstests/concurrency/fsm_workloads/timeseries_insert_idle_bucket_expiration.js
index eeceb57ae5109..2e7f96c1e34c3 100644
--- a/jstests/concurrency/fsm_workloads/timeseries_insert_idle_bucket_expiration.js
+++ b/jstests/concurrency/fsm_workloads/timeseries_insert_idle_bucket_expiration.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Tests concurrent time-series inserts, with enough batches and data to force buckets to be closed
* due to the memory usage threshold.
@@ -8,15 +6,10 @@
* requires_timeseries,
* # Timeseries do not support multi-document transactions with inserts.
* does_not_support_transactions,
- * # Stepdowns can cause inserts to fail in some cases in sharded passthroughs and not be
- * # automatically retried. We aren't sure of the root cause yet, but we are excluding this tests
- * # from those suites for now.
- * # TODO (SERVER-67609): Remove this tag, or update the explanation above.
- * does_not_support_stepdowns,
* ]
*/
-var $config = (function() {
+export const $config = (function() {
const timeFieldName = 'time';
const metaFieldName = 'tag';
const numDocs = 100;
diff --git a/jstests/concurrency/fsm_workloads/timeseries_insert_kill_op.js b/jstests/concurrency/fsm_workloads/timeseries_insert_kill_op.js
index 16c21363d9470..682865258cb58 100644
--- a/jstests/concurrency/fsm_workloads/timeseries_insert_kill_op.js
+++ b/jstests/concurrency/fsm_workloads/timeseries_insert_kill_op.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* Tests killing time-series inserts.
*
@@ -14,7 +12,7 @@
* ]
*/
-var $config = (function() {
+export const $config = (function() {
const timeFieldName = 'time';
const metaFieldName = 'tag';
diff --git a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
index e07300d6c5c62..c1a5dcc7a226a 100644
--- a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
+++ b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* update_and_bulk_insert.js
*
@@ -10,7 +8,7 @@
* we attempted to make a copy of a record after a WriteConflictException occurred in
* Collection::updateDocument().
*/
-var $config = (function() {
+export const $config = (function() {
var states = {
insert: function insert(db, collName) {
var bulk = db[collName].initializeUnorderedBulkOp();
diff --git a/jstests/concurrency/fsm_workloads/update_array.js b/jstests/concurrency/fsm_workloads/update_array.js
index e0b802f4e611c..d3f77daf40133 100644
--- a/jstests/concurrency/fsm_workloads/update_array.js
+++ b/jstests/concurrency/fsm_workloads/update_array.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* update_array.js
*
@@ -13,7 +11,7 @@
// For isMongod.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config = (function() {
+export const $config = (function() {
var states = (function() {
// db: explicitly passed to avoid accidentally using the global `db`
// res: WriteResult
diff --git a/jstests/concurrency/fsm_workloads/update_array_noindex.js b/jstests/concurrency/fsm_workloads/update_array_noindex.js
index c61116843807d..f6b7b31677f1e 100644
--- a/jstests/concurrency/fsm_workloads/update_array_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_array_noindex.js
@@ -1,13 +1,11 @@
-'use strict';
-
/**
* update_array_noindex.js
*
* Executes the update_array.js workload after dropping all non-_id indexes on
* the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_array.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_array.js";
load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
-var $config = extendWorkload($config, dropAllIndexes);
+export const $config = extendWorkload($baseConfig, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_check_index.js b/jstests/concurrency/fsm_workloads/update_check_index.js
index f960f2abf831f..6c03bef33d1dc 100644
--- a/jstests/concurrency/fsm_workloads/update_check_index.js
+++ b/jstests/concurrency/fsm_workloads/update_check_index.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* update_check_index.js
*
* Ensures that concurrent multi updates cannot produce duplicate index entries. Regression test
* for SERVER-17132.
*/
-var $config = (function() {
+export const $config = (function() {
var states = (function() {
function multiUpdate(db, collName) {
// Set 'c' to some random value.
diff --git a/jstests/concurrency/fsm_workloads/update_inc.js b/jstests/concurrency/fsm_workloads/update_inc.js
index 5e71dd40d7081..1ae5dcaa05b21 100644
--- a/jstests/concurrency/fsm_workloads/update_inc.js
+++ b/jstests/concurrency/fsm_workloads/update_inc.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* update_inc.js
*
@@ -12,7 +10,7 @@
// For isMongod.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config = (function() {
+export const $config = (function() {
var data = {
// uses the workload name as _id on the document.
// assumes this name will be unique.
@@ -34,6 +32,7 @@ var $config = (function() {
var updateDoc = this.getUpdateArgument(this.fieldName);
var res = db[collName].update({_id: this.id}, updateDoc);
+ assert.commandWorked(res);
assertAlways.eq(0, res.nUpserted, tojson(res));
if (isMongod(db)) {
diff --git a/jstests/concurrency/fsm_workloads/update_inc_capped.js b/jstests/concurrency/fsm_workloads/update_inc_capped.js
index fb37c1ee027b4..37285c7b7635d 100644
--- a/jstests/concurrency/fsm_workloads/update_inc_capped.js
+++ b/jstests/concurrency/fsm_workloads/update_inc_capped.js
@@ -1,13 +1,11 @@
-'use strict';
-
/**
* update_inc_capped.js
*
* Executes the update_inc.js workload on a capped collection.
* @tags: [requires_capped]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_inc.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_inc.js";
load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped
-var $config = extendWorkload($config, makeCapped);
+export const $config = extendWorkload($baseConfig, makeCapped);
diff --git a/jstests/concurrency/fsm_workloads/update_inc_pipeline.js b/jstests/concurrency/fsm_workloads/update_inc_pipeline.js
index 95fdc674ab1ad..712d70a2fd126 100644
--- a/jstests/concurrency/fsm_workloads/update_inc_pipeline.js
+++ b/jstests/concurrency/fsm_workloads/update_inc_pipeline.js
@@ -1,15 +1,13 @@
-'use strict';
-
/**
* update_inc_pipeline.js
*
* This is the same workload as update_inc.js, but substitutes a $mod-style update with a
* pipeline-style one.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_inc.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_inc.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.getUpdateArgument = function getUpdateArgument(fieldName) {
return [{$set: {[fieldName]: {$add: ["$" + fieldName, 1]}}}];
};
diff --git a/jstests/concurrency/fsm_workloads/update_multifield.js b/jstests/concurrency/fsm_workloads/update_multifield.js
index 02b2e6962abab..a657d63e9735c 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* update_multifield.js
*
@@ -10,7 +8,7 @@
// For isMongod.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config = (function() {
+export const $config = (function() {
function makeQuery(options) {
var query = {};
if (!options.multi) {
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
index c33451fe28e2b..f82882449fd55 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
@@ -1,39 +1,49 @@
-'use strict';
-
/**
* update_multifield_multiupdate.js
*
* Does updates that affect multiple fields on multiple documents.
* The collection has an index for each field, and a multikey index for all fields.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
-
-// For isMongod
-load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.multi = true;
-
- $config.data.assertResult = function(res, db, collName, query) {
- assertAlways.eq(0, res.nUpserted, tojson(res));
-
- if (isMongod(db)) {
- // If a document's RecordId cannot change, then we should not have updated any document
- // more than once, since the update stage internally de-duplicates based on RecordId.
- assertWhenOwnColl.lte(this.numDocs, res.nMatched, tojson(res));
- } else { // mongos
- assertAlways.gte(res.nMatched, 0, tojson(res));
- }
-
- assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res));
-
- var docs = db[collName].find().toArray();
- docs.forEach(function(doc) {
- assertWhenOwnColl.eq('number', typeof doc.z);
- assertWhenOwnColl.gt(doc.z, 0);
- });
- };
-
- return $config;
-});
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_multifield.js";
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // For isMongod
+
+export const $config = extendWorkload($baseConfig,
+ function($config, $super) {
+ $config.data.multi = true;
+
+ $config.data.assertResult = function(
+ res, db, collName, query) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+
+ if (isMongod(db)) {
+ // If a document's RecordId cannot change, then we
+ // should not have updated any document more than
+ // once, since the update stage internally
+ // de-duplicates based on RecordId.
+ assertWhenOwnColl.lte(
+ this.numDocs, res.nMatched, tojson(res));
+ } else { // mongos
+ assertAlways.gte(res.nMatched, 0, tojson(res));
+ }
+
+ assertWhenOwnColl.eq(
+ res.nMatched, res.nModified, tojson(res));
+
+ if (TestData.runningWithBalancer !== true) {
+ var docs = db[collName].find().toArray();
+ docs.forEach(function(doc) {
+ assertWhenOwnColl.eq(
+'number',
+typeof doc.z,
+`The query is ${tojson(query)}, and doc is ${
+tojson(doc)}, the number of all docs is ${
+docs.length}. The response of update is ${tojson(res)}, and config multi is ${
+$config.data.multi.toString()}`);
+ assertWhenOwnColl.gt(doc.z, 0);
+ });
+ }
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js
index ea0cc4fc4b770..4075d424299ff 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js
@@ -1,13 +1,13 @@
-'use strict';
-
/**
* update_multifield_multiupdate_noindex.js
*
* Executes the update_multifield_multiupdate.js workload after dropping all
* non-_id indexes on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js";
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
-var $config = extendWorkload($config, dropAllIndexes);
+export const $config = extendWorkload($baseConfig, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_noindex.js
index 0f15037c56874..e56c348ba63c6 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_noindex.js
@@ -1,13 +1,11 @@
-'use strict';
-
/**
* update_multifield_noindex.js
*
* Executes the update_multifield.js workload after dropping all non-_id indexes
* on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_multifield.js";
load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
-var $config = extendWorkload($config, dropAllIndexes);
+export const $config = extendWorkload($baseConfig, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
index 863a9deac7c66..f6af33ebc2088 100644
--- a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
+++ b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* update_ordered_bulk_inc.js
*
@@ -14,7 +12,7 @@
// For isMongod.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config = (function() {
+export const $config = (function() {
var states = {
init: function init(db, collName) {
this.fieldName = 't' + this.tid;
diff --git a/jstests/concurrency/fsm_workloads/update_rename.js b/jstests/concurrency/fsm_workloads/update_rename.js
index 26c52371ac90a..b6c22732b3cbc 100644
--- a/jstests/concurrency/fsm_workloads/update_rename.js
+++ b/jstests/concurrency/fsm_workloads/update_rename.js
@@ -1,11 +1,9 @@
-'use strict';
-
/**
* update_rename.js
*
* Each thread does a $rename to cause documents to jump between indexes.
*/
-var $config = (function() {
+export const $config = (function() {
var fieldNames = ['update_rename_x', 'update_rename_y', 'update_rename_z'];
function choose(array) {
diff --git a/jstests/concurrency/fsm_workloads/update_rename_noindex.js b/jstests/concurrency/fsm_workloads/update_rename_noindex.js
index 96af5a8f1cc2c..34be43de205ca 100644
--- a/jstests/concurrency/fsm_workloads/update_rename_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_rename_noindex.js
@@ -1,13 +1,11 @@
-'use strict';
-
/**
* update_rename_noindex.js
*
* Executes the update_rename.js workload after dropping all non-_id indexes on
* the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_rename.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_rename.js";
load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
-var $config = extendWorkload($config, dropAllIndexes);
+export const $config = extendWorkload($baseConfig, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_replace.js b/jstests/concurrency/fsm_workloads/update_replace.js
index aad7feb171695..0602c99670c01 100644
--- a/jstests/concurrency/fsm_workloads/update_replace.js
+++ b/jstests/concurrency/fsm_workloads/update_replace.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* update_replace.js
*
@@ -10,7 +8,7 @@
// For isMongod.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config = (function() {
+export const $config = (function() {
// explicitly pass db to avoid accidentally using the global `db`
function assertResult(db, res) {
assertAlways.eq(0, res.nUpserted, tojson(res));
diff --git a/jstests/concurrency/fsm_workloads/update_replace_noindex.js b/jstests/concurrency/fsm_workloads/update_replace_noindex.js
index 14dc0b16e2a5b..90ae75dc52e81 100644
--- a/jstests/concurrency/fsm_workloads/update_replace_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_replace_noindex.js
@@ -1,13 +1,11 @@
-'use strict';
-
/**
* update_replace_noindex.js
*
* Executes the update_replace.js workload after dropping all non-_id indexes
* on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_replace.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_replace.js";
load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
-var $config = extendWorkload($config, dropAllIndexes);
+export const $config = extendWorkload($baseConfig, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_simple.js b/jstests/concurrency/fsm_workloads/update_simple.js
index f530adbe78e27..3b952e8699a9e 100644
--- a/jstests/concurrency/fsm_workloads/update_simple.js
+++ b/jstests/concurrency/fsm_workloads/update_simple.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* update_simple.js
*
@@ -12,7 +10,7 @@
// For isMongod.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config = (function() {
+export const $config = (function() {
var states = {
set: function set(db, collName) {
this.setOrUnset(db, collName, true, this.numDocs);
diff --git a/jstests/concurrency/fsm_workloads/update_simple_noindex.js b/jstests/concurrency/fsm_workloads/update_simple_noindex.js
index f255967b6145e..f636b9871297b 100644
--- a/jstests/concurrency/fsm_workloads/update_simple_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_simple_noindex.js
@@ -1,13 +1,11 @@
-'use strict';
-
/**
* update_simple_noindex.js
*
* Executes the update_simple.js workload after dropping all non-_id indexes on
* the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_simple.js";
load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
-var $config = extendWorkload($config, dropAllIndexes);
+export const $config = extendWorkload($baseConfig, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_upsert.js b/jstests/concurrency/fsm_workloads/update_upsert.js
index f0b4da763eafb..d31df50eba554 100644
--- a/jstests/concurrency/fsm_workloads/update_upsert.js
+++ b/jstests/concurrency/fsm_workloads/update_upsert.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* update_upsert_multi.js
*
* Tests updates that specify upsert=true.
*/
-var $config = (function() {
+export const $config = (function() {
let states = {
update: function update(db, collName) {
const docId = Random.randInt(5) * 4;
diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi.js b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
index 12825d51702fc..7a93b41892509 100644
--- a/jstests/concurrency/fsm_workloads/update_upsert_multi.js
+++ b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* update_upsert_multi.js
*
@@ -11,7 +9,7 @@
*
* @tags: [requires_non_retryable_writes]
*/
-var $config = (function() {
+export const $config = (function() {
var states = {
insert: function insert(db, collName) {
var query, update, options;
diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js b/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
index be943cf7b02cb..86b76a323abe4 100644
--- a/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* update_upsert_multi_noindex.js
*
@@ -8,8 +6,8 @@
*
* @tags: [requires_non_retryable_writes]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_upsert_multi.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_upsert_multi.js";
load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
-var $config = extendWorkload($config, dropAllIndexes);
+export const $config = extendWorkload($baseConfig, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_where.js b/jstests/concurrency/fsm_workloads/update_where.js
index c5526f8205c69..0add009504a4c 100644
--- a/jstests/concurrency/fsm_workloads/update_where.js
+++ b/jstests/concurrency/fsm_workloads/update_where.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* update_where.js
*
@@ -7,10 +5,10 @@
* thread and updates them. Also queries by the thread that created the documents to verify counts.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_where.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.randomBound = 10;
$config.data.generateDocumentToInsert = function generateDocumentToInsert() {
return {tid: this.tid, x: Random.randInt(this.randomBound)};
diff --git a/jstests/concurrency/fsm_workloads/upsert_unique_index.js b/jstests/concurrency/fsm_workloads/upsert_unique_index.js
index 15a7d1b14fcf0..61c39f3dfeba6 100644
--- a/jstests/concurrency/fsm_workloads/upsert_unique_index.js
+++ b/jstests/concurrency/fsm_workloads/upsert_unique_index.js
@@ -1,11 +1,9 @@
-'use strict';
-
/**
* Performs concurrent upsert and delete operations against a small set of documents with a unique
* index in place. One specific scenario this test exercises is upsert retry in the case where an
* upsert generates an insert, which then fails due to another operation inserting first.
*/
-var $config = (function() {
+export const $config = (function() {
const data = {
numDocs: 4,
getDocValue: function() {
diff --git a/jstests/concurrency/fsm_workloads/upsert_where.js b/jstests/concurrency/fsm_workloads/upsert_where.js
index 522fef6b32f9d..8ad43bed5085a 100644
--- a/jstests/concurrency/fsm_workloads/upsert_where.js
+++ b/jstests/concurrency/fsm_workloads/upsert_where.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* upsert_where.js
*
@@ -7,10 +5,10 @@
* updates it, and queries by the thread that created the documents to verify counts.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_where.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.data.randomBound = 10;
$config.data.generateDocumentToInsert = function generateDocumentToInsert() {
return {tid: this.tid, x: Random.randInt(this.randomBound)};
diff --git a/jstests/concurrency/fsm_workloads/view_catalog.js b/jstests/concurrency/fsm_workloads/view_catalog.js
index 9557a9f93879e..5d09f22d4e4e1 100644
--- a/jstests/concurrency/fsm_workloads/view_catalog.js
+++ b/jstests/concurrency/fsm_workloads/view_catalog.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* view_catalog.js
*
@@ -7,7 +5,7 @@
* built on a shared underlying collection.
*/
-var $config = (function() {
+export const $config = (function() {
var data = {
// Use the workload name as a prefix for the view name, since the workload name is assumed
// to be unique.
@@ -80,18 +78,9 @@ var $config = (function() {
assertAlways.commandWorked(bulk.execute());
};
- // This test performs createCollection concurrently from many threads, and createCollection on a
- // sharded cluster takes a distributed lock. Since a distributed lock is acquired by repeatedly
- // attempting to grab the lock every half second for 20 seconds (a max of 40 attempts), it's
- // possible that some thread will be starved by the other threads and fail to grab the lock
- // after 40 attempts. To reduce the likelihood of this, we choose threadCount and iterations so
- // that threadCount * iterations < 40.
- // The threadCount and iterations can be increased once PM-697 ("Remove all usages of
- // distributed lock") is complete.
-
return {
- threadCount: 5,
- iterations: 5,
+ threadCount: 10,
+ iterations: 10,
data: data,
setup: setup,
states: states,
diff --git a/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js b/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
index 64d4a81b1f6f9..192ac75b565e7 100644
--- a/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
+++ b/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* view_catalog_cycle_lookup.js
*
@@ -10,7 +8,7 @@
* @tags: [requires_fcv_51]
*/
-var $config = (function() {
+export const $config = (function() {
// Use the workload name as a prefix for the view names, since the workload name is assumed
// to be unique.
const prefix = 'view_catalog_cycle_lookup_';
diff --git a/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js b/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
index ef5bd2d6028da..658fa64e397e6 100644
--- a/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
+++ b/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* view_catalog_cycle_with_drop.js
*
@@ -7,7 +5,7 @@
* underlying collection.
*/
-var $config = (function() {
+export const $config = (function() {
// Use the workload name as a prefix for the view names, since the workload name is assumed
// to be unique.
const prefix = 'view_catalog_cycle_with_drop_';
@@ -98,18 +96,9 @@ var $config = (function() {
}
}
- // This test performs createCollection concurrently from many threads, and createCollection on a
- // sharded cluster takes a distributed lock. Since a distributed lock is acquired by repeatedly
- // attempting to grab the lock every half second for 20 seconds (a max of 40 attempts), it's
- // possible that some thread will be starved by the other threads and fail to grab the lock
- // after 40 attempts. To reduce the likelihood of this, we choose threadCount and iterations so
- // that threadCount * iterations < 40.
- // The threadCount and iterations can be increased once PM-697 ("Remove all usages of
- // distributed lock") is complete.
-
return {
- threadCount: 5,
- iterations: 5,
+ threadCount: 10,
+ iterations: 10,
data: data,
states: states,
startState: 'readFromView',
diff --git a/jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js b/jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js
index 6531268d2ed9e..52d0d8d844357 100644
--- a/jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js
+++ b/jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js
@@ -1,5 +1,3 @@
-'use strict';
-
/**
* view_catalog_direct_system_writes.js
*
@@ -7,10 +5,10 @@
* does so via direct writes to system.views instead of using the collMod or drop commands. Each
* worker operates on their own view, built on a shared underlying collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/view_catalog.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/view_catalog.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.create = function create(db, collName) {
this.counter++;
let pipeline = [{$match: {_id: this.counter}}];
diff --git a/jstests/concurrency/fsm_workloads/write_without_shard_key_base.js b/jstests/concurrency/fsm_workloads/write_without_shard_key_base.js
index f601c9990d830..a9b2f1bbd4968 100644
--- a/jstests/concurrency/fsm_workloads/write_without_shard_key_base.js
+++ b/jstests/concurrency/fsm_workloads/write_without_shard_key_base.js
@@ -1,29 +1,26 @@
-'use strict';
-
/**
* Runs updateOne, deleteOne, and findAndModify without shard key against a sharded cluster.
*
* @tags: [
- * featureFlagUpdateOneWithoutShardKey,
- * requires_fcv_70,
+ * requires_fcv_71,
* requires_sharding,
* uses_transactions,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js";
// This workload does not make use of random moveChunks, but other workloads that extend this base
// workload may.
-load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/balancer.js');
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.threadCount = 10;
- $config.iterations = 10;
+ $config.iterations = 50;
$config.startState = "init"; // Inherited from random_moveChunk_base.js.
- $config.data.partitionSize = 100;
+ $config.data.partitionSize = 50;
$config.data.secondaryDocField = 'y';
+ $config.data.tertiaryDocField = 'tertiaryField';
$config.data.runningWithStepdowns =
TestData.runningWithConfigStepdowns || TestData.runningWithShardStepdowns;
@@ -34,6 +31,13 @@ var $config = extendWorkload($config, function($config, $super) {
return Math.floor(Math.random() * (max - min + 1)) + min;
};
+ /**
+ * Returns a random boolean.
+ */
+ $config.data.generateRandomBool = function generateRandomBool() {
+ return Math.random() > 0.5;
+ };
+
/**
* Generates a random document.
*/
@@ -67,7 +71,7 @@ var $config = extendWorkload($config, function($config, $super) {
* mean the query could target a variable number of shards.
*/
$config.data.generateRandomQuery = function generateRandomQuery(db, collName) {
- const queryType = this.generateRandomInt(0, 2);
+ const queryType = this.generateRandomInt(0, 3);
if (queryType === 0 /* Range query on shard key field. */) {
return {
[this.defaultShardKeyField]:
@@ -78,6 +82,8 @@ var $config = extendWorkload($config, function($config, $super) {
[this.secondaryDocField]:
{$gte: this.partition.lower, $lte: this.partition.upper - 1}
};
+ } else if (queryType === 2 /* Equality query on a field that does not exist */) {
+ return {[this.tertiaryDocField]: {$eq: this.generateRandomInt(0, 500)}, tid: this.tid};
} else { /* Query any document in the partition. */
return {tid: this.tid};
}
@@ -93,10 +99,18 @@ var $config = extendWorkload($config, function($config, $super) {
const newValue = this.generateRandomInt(this.partition.lower, this.partition.upper - 1);
const updateType = this.generateRandomInt(0, 2);
const doShardKeyUpdate = this.generateRandomInt(0, 1);
+ const doUpsert = this.generateRandomBool();
// Used for validation after running the write operation.
const containsMatchedDocs = db[collName].findOne(query) != null;
+ jsTestLog("updateOne state running with the following parameters: \n" +
+ "query: " + tojson(query) + "\n" +
+ "updateType: " + updateType + "\n" +
+ "doShardKeyUpdate: " + doShardKeyUpdate + "\n" +
+ "doUpsert: " + doUpsert + "\n" +
+ "containsMatchedDocs: " + containsMatchedDocs);
+
let res;
try {
if (updateType === 0 /* Update operator document */) {
@@ -104,15 +118,17 @@ var $config = extendWorkload($config, function($config, $super) {
[doShardKeyUpdate ? this.defaultShardKeyField : this.secondaryDocField]:
newValue
};
- res = db[collName].updateOne(query, {$set: update});
+ res = db[collName].updateOne(query, {$set: update}, {upsert: doUpsert});
} else if (updateType === 1 /* Replacement Update */) {
// Always including a shard key update for replacement documents in order to keep
// the new document within the current thread's partition.
- res = db[collName].replaceOne(query, {
- [this.defaultShardKeyField]: newValue,
- [this.secondaryDocField]: newValue,
- tid: this.tid
- });
+ res = db[collName].replaceOne(query,
+ {
+ [this.defaultShardKeyField]: newValue,
+ [this.secondaryDocField]: newValue,
+ tid: this.tid
+ },
+ {upsert: doUpsert});
} else { /* Aggregation pipeline update */
const update = {
[doShardKeyUpdate ? this.defaultShardKeyField : this.secondaryDocField]:
@@ -121,7 +137,8 @@ var $config = extendWorkload($config, function($config, $super) {
// The $unset will result in a no-op since 'z' is not a field populated in any of
// the documents.
- res = db[collName].updateOne(query, [{$set: update}, {$unset: "z"}]);
+ res = db[collName].updateOne(
+ query, [{$set: update}, {$unset: "z"}], {upsert: doUpsert});
}
} catch (err) {
if (this.shouldSkipWriteResponseValidation(err)) {
@@ -136,6 +153,14 @@ var $config = extendWorkload($config, function($config, $super) {
assert.eq(res.matchedCount, 1, query);
} else {
assert.eq(res.matchedCount, 0, res);
+
+ if (doUpsert) {
+ assert.neq(res.upsertedId, null, res);
+ assert.eq(db[collName].find({"_id": res.upsertedId}).itcount(), 1);
+
+ // Clean up, remove upserted document.
+ assert.commandWorked(db[collName].deleteOne({"_id": res.upsertedId}));
+ }
}
assert.contains(res.modifiedCount, [0, 1], res);
@@ -158,6 +183,8 @@ var $config = extendWorkload($config, function($config, $super) {
ErrorCodes.IncompleteTransactionHistory,
ErrorCodes.NoSuchTransaction,
ErrorCodes.StaleConfig,
+ ErrorCodes.ShardCannotRefreshDueToLocksHeld,
+ ErrorCodes.WriteConflict
];
// If we're running in a stepdown suite, then attempting to update the shard key may
@@ -190,10 +217,12 @@ var $config = extendWorkload($config, function($config, $super) {
}
// This is a possible transient transaction error issue that could occur with
- // concurrent moveChunks and transactions (if we happen to run a
+ // concurrent moveChunks and/or reshardings and transactions (if we happen to run a
// WouldChangeOwningShard update).
if (res.code === ErrorCodes.LockTimeout || res.code === ErrorCodes.StaleConfig ||
- res.code === ErrorCodes.ConflictingOperationInProgress) {
+ res.code === ErrorCodes.ConflictingOperationInProgress ||
+ res.code === ErrorCodes.ShardCannotRefreshDueToLocksHeld ||
+ res.code == ErrorCodes.WriteConflict) {
if (!msg.includes(otherErrorsInChangeShardKeyMsg)) {
return false;
}
@@ -241,34 +270,55 @@ var $config = extendWorkload($config, function($config, $super) {
// Used for validation after running the write operation.
const containsMatchedDocs = db[collName].findOne(query) != null;
+ // Only test sort when there are matching documents in the collection.
+ const doSort = containsMatchedDocs && this.generateRandomBool();
+ let sortDoc, sortVal;
+
+ // If sorting, ensure that the correct document is modified.
+ if (doSort) {
+ sortVal = {[this.secondaryDocField]: this.generateRandomInt(0, 1) === 0 ? -1 : 1};
+ sortDoc = db[collName].find(query).sort(sortVal)[0];
+ }
+
let res;
const findAndModifyType = this.generateRandomInt(0, 1);
if (findAndModifyType === 0 /* Update */) {
const newValue = this.generateRandomInt(this.partition.lower, this.partition.upper - 1);
const updateType = this.generateRandomInt(0, 2);
const doShardKeyUpdate = this.generateRandomInt(0, 1);
+ const doUpsert = this.generateRandomBool();
+
+ jsTestLog("findAndModifyUpdate state running with the following parameters: \n" +
+ "query: " + tojson(query) + "\n" +
+ "updateType: " + updateType + "\n" +
+ "doShardKeyUpdate: " + doShardKeyUpdate + "\n" +
+ "doUpsert: " + doUpsert + "\n" +
+ "doSort: " + doSort + "\n" +
+ "containsMatchedDocs: " + containsMatchedDocs);
+
+ const cmdObj = {
+ findAndModify: collName,
+ query: query,
+ upsert: doUpsert,
+ };
+ Object.assign(cmdObj, doSort && {sort: sortVal});
+
if (updateType === 0 /* Update operator document */) {
const update = {
[doShardKeyUpdate ? this.defaultShardKeyField : this.secondaryDocField]:
newValue
};
- res = db.runCommand({
- findAndModify: collName,
- query: query,
- update: {$set: update},
- });
+ cmdObj.update = {$set: update};
+ res = db.runCommand(cmdObj);
} else if (updateType === 1 /* Replacement Update */) {
// Always including a shard key update for replacement documents in order to
// keep the new document within the current thread's partition.
- res = db.runCommand({
- findAndModify: collName,
- query: query,
- update: {
- [this.defaultShardKeyField]: newValue,
- [this.secondaryDocField]: newValue,
- tid: this.tid
- },
- });
+ cmdObj.update = {
+ [this.defaultShardKeyField]: newValue,
+ [this.secondaryDocField]: newValue,
+ tid: this.tid
+ };
+ res = db.runCommand(cmdObj);
} else { /* Aggregation pipeline update */
const update = {
[doShardKeyUpdate ? this.defaultShardKeyField : this.secondaryDocField]:
@@ -277,11 +327,8 @@ var $config = extendWorkload($config, function($config, $super) {
// The $unset will result in a no-op since 'z' is not a field populated in any
// of the documents.
- res = db.runCommand({
- findAndModify: collName,
- query: query,
- update: [{$set: update}, {$unset: "z"}],
- });
+ cmdObj.update = [{$set: update}, {$unset: "z"}];
+ res = db.runCommand(cmdObj);
}
if (this.shouldSkipWriteResponseValidation(res)) {
@@ -292,18 +339,35 @@ var $config = extendWorkload($config, function($config, $super) {
if (containsMatchedDocs) {
assert.eq(res.lastErrorObject.n, 1, res);
assert.eq(res.lastErrorObject.updatedExisting, true, res);
+ } else if (doUpsert) {
+ assert.eq(res.lastErrorObject.n, 1, res);
+ assert.eq(res.lastErrorObject.updatedExisting, false, res);
+ assert.neq(res.lastErrorObject.upserted, null, res);
+ assert.eq(db[collName].find({"_id": res.lastErrorObject.upserted}).itcount(), 1);
+
+ // Clean up, remove upserted document.
+ assert.commandWorked(db[collName].deleteOne({"_id": res.lastErrorObject.upserted}));
} else {
assert.eq(res.lastErrorObject.n, 0, res);
assert.eq(res.lastErrorObject.updatedExisting, false, res);
}
} else { /* Remove */
const numMatchedDocsBefore = db[collName].find(query).itcount();
-
- res = assert.commandWorked(db.runCommand({
+ const cmdObj = {
findAndModify: collName,
query: query,
remove: true,
- }));
+ };
+ if (doSort) {
+ cmdObj.sort = sortVal;
+ }
+
+ jsTestLog("findAndModifyDelete state running with the following parameters: \n" +
+ "query: " + tojson(query) + "\n" +
+ "numMatchedDocsBefore: " + numMatchedDocsBefore + "\n" +
+ "containsMatchedDocs: " + containsMatchedDocs);
+
+ res = assert.commandWorked(db.runCommand(cmdObj));
const numMatchedDocsAfter = db[collName].find(query).itcount();
@@ -317,11 +381,18 @@ var $config = extendWorkload($config, function($config, $super) {
assert.eq(numMatchedDocsAfter, numMatchedDocsBefore);
}
}
+
+ if (doSort) {
+ // Ensure correct document was modified by comparing sort field of the sortDoc and
+ // response image.
+ assert.eq(sortDoc.secondaryDocField, res.value.secondaryDocField, res);
+ }
};
$config.states.updateOne = function updateOne(db, collName, connCache) {
jsTestLog("Running updateOne state");
this.generateAndRunRandomUpdateOp(db, collName);
+ jsTestLog("Finished updateOne state");
};
$config.states.deleteOne = function deleteOne(db, collName, connCache) {
@@ -332,6 +403,10 @@ var $config = extendWorkload($config, function($config, $super) {
const containsMatchedDocs = db[collName].findOne(query) != null;
const numMatchedDocsBefore = db[collName].find(query).itcount();
+ jsTestLog("deleteOne state running with query: " + tojson(query) + "\n" +
+ "containsMatchedDocs: " + containsMatchedDocs + "\n" +
+ "numMatchedDocsBefore: " + numMatchedDocsBefore);
+
let res = assert.commandWorked(db[collName].deleteOne(query));
const numMatchedDocsAfter = db[collName].find(query).itcount();
@@ -345,11 +420,13 @@ var $config = extendWorkload($config, function($config, $super) {
// The count should both be 0.
assert.eq(numMatchedDocsAfter, numMatchedDocsBefore);
}
+ jsTestLog("Finished deleteOne state");
};
$config.states.findAndModify = function findAndModify(db, collName, connCache) {
jsTestLog("Running findAndModify state");
this.generateAndRunRandomFindAndModifyOp(db, collName);
+ jsTestLog("Finished findAndModify state");
};
$config.setup = function setup(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/write_without_shard_key_with_move_chunk.js b/jstests/concurrency/fsm_workloads/write_without_shard_key_with_move_chunk.js
index cd1bed6b593ff..1ac1c5e96e474 100644
--- a/jstests/concurrency/fsm_workloads/write_without_shard_key_with_move_chunk.js
+++ b/jstests/concurrency/fsm_workloads/write_without_shard_key_with_move_chunk.js
@@ -1,21 +1,20 @@
-'use strict';
-
/**
* Runs updateOne, deleteOne, and findAndModify without shard key against a sharded cluster while
* there are concurrent chunk migrations.
*
* @tags: [
- * featureFlagUpdateOneWithoutShardKey,
- * requires_fcv_70,
+ * requires_fcv_71,
* requires_sharding,
* uses_transactions,
* ]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js');
-load('jstests/concurrency/fsm_workloads/write_without_shard_key_base.js');
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/write_without_shard_key_base.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.startState = "init";
$config.transitions = {
init: {moveChunk: 0.3, updateOne: 0.3, deleteOne: 0.2, findAndModify: 0.2},
diff --git a/jstests/concurrency/fsm_workloads/write_without_shard_key_with_refine_collection_shard_key.js b/jstests/concurrency/fsm_workloads/write_without_shard_key_with_refine_collection_shard_key.js
new file mode 100644
index 0000000000000..42106a22da3b1
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/write_without_shard_key_with_refine_collection_shard_key.js
@@ -0,0 +1,148 @@
+/**
+ * Runs updateOne, deleteOne, and findAndModify without shard key against a sharded cluster while
+ * concurrently refining the collection's shard key.
+ *
+ * @tags: [
+ * requires_fcv_71,
+ * requires_sharding,
+ * uses_transactions,
+ * ]
+ */
+
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/write_without_shard_key_base.js";
+
+export const $config = extendWorkload($baseConfig, function($config, $super) {
+ $config.startState = "init";
+
+ // Use a CountDownLatch as if it were a std::atomic shared between all of the
+ // threads. The collection name is suffixed with the current this.latch.getCount() value
+ // when concurrent CRUD operations are run against it. With every refineCollectionShardKey,
+ // call this.latch.countDown() and run CRUD operations against the new collection suffixed
+ // with this.latch.getCount(). This bypasses the need to drop and reshard the current
+ // collection with every refineCollectionShardKey since it cannot be achieved in an atomic
+ // fashion under the FSM infrastructure (meaning CRUD operations would fail).
+ $config.data.latchCount = $config.iterations;
+ $config.data.latch = new CountDownLatch($config.data.latchCount);
+
+ $config.data.shardKey = {a: 1};
+ $config.data.defaultShardKeyField = 'a';
+ $config.data.defaultShardKey = {a: 1};
+
+ // The variables used by the random_moveChunk_base config in order to move chunks.
+ $config.data.newShardKey = {a: 1, b: 1};
+ $config.data.newShardKeyFields = ["a", "b"];
+
+ $config.setup = function setup(db, collName, cluster) {
+ // Proactively create and shard all possible collections suffixed with this.latch.getCount()
+ // that could receive CRUD operations over the course of the FSM workload. This prevents the
+ // race that could occur between sharding a collection and creating an index on the new
+ // shard key (if this step were done after every refineCollectionShardKey).
+ for (let i = this.latchCount; i >= 0; --i) {
+ const latchCollName = collName + '_' + i;
+ let coll = db.getCollection(latchCollName);
+ assertAlways.commandWorked(
+ db.adminCommand({shardCollection: coll.getFullName(), key: this.defaultShardKey}));
+ assertAlways.commandWorked(coll.createIndex(this.newShardKey));
+ $super.setup.apply(this, [db, latchCollName, cluster]);
+ }
+ };
+
+ // Occasionally flush the router's cached metadata to verify the metadata for the refined
+ // collections can be successfully loaded.
+ $config.states.flushRouterConfig = function flushRouterConfig(db, collName, connCache) {
+ jsTestLog("Running flushRouterConfig state");
+ assert.commandWorked(db.adminCommand({flushRouterConfig: db.getName()}));
+ };
+
+ $config.data.getCurrentLatchCollName = function(collName) {
+ return collName + '_' + this.latch.getCount().toString();
+ };
+
+ $config.states.refineCollectionShardKey = function refineCollectionShardKey(
+ db, collName, connCache) {
+ jsTestLog("Running refineCollectionShardKey state.");
+ const latchCollName = this.getCurrentLatchCollName(collName);
+
+ try {
+ const cmdObj = {
+ refineCollectionShardKey: db.getCollection(latchCollName).getFullName(),
+ key: this.newShardKey
+ };
+
+ assertAlways.commandWorked(db.adminCommand(cmdObj));
+ } catch (e) {
+ // There is a race that could occur where two threads run refineCollectionShardKey
+ // concurrently on the same collection. Since the epoch of the collection changes,
+ // the later thread may receive a StaleEpoch error, which is an acceptable error.
+ //
+ // It is also possible to receive a LockBusy error if refineCollectionShardKey is unable
+ // to acquire the distlock before timing out due to ongoing migrations acquiring the
+ // distlock first.
+ // TODO SERVER-68551: Remove lockbusy error since the balancer won't acquire anymore the
+ // DDL lock for migrations
+ if (e.code == ErrorCodes.StaleEpoch || e.code == ErrorCodes.LockBusy) {
+ print("Ignoring acceptable refineCollectionShardKey error: " + tojson(e));
+ return;
+ }
+ throw e;
+ }
+
+ this.shardKeyField[latchCollName] = this.newShardKeyFields;
+ this.latch.countDown();
+ };
+
+ $config.states.findAndModify = function findAndModify(db, collName, connCache) {
+ $super.states.findAndModify.apply(this,
+ [db, this.getCurrentLatchCollName(collName), connCache]);
+ };
+
+ $config.states.updateOne = function findAndModify(db, collName, connCache) {
+ $super.states.updateOne.apply(this,
+ [db, this.getCurrentLatchCollName(collName), connCache]);
+ };
+
+ $config.states.deleteOne = function findAndModify(db, collName, connCache) {
+ $super.states.deleteOne.apply(this,
+ [db, this.getCurrentLatchCollName(collName), connCache]);
+ };
+
+ $config.transitions = {
+ init:
+ {refineCollectionShardKey: 0.25, updateOne: 0.25, deleteOne: 0.25, findAndModify: 0.25},
+ updateOne: {
+ refineCollectionShardKey: 0.2,
+ updateOne: 0.2,
+ deleteOne: 0.2,
+ findAndModify: 0.2,
+ flushRouterConfig: 0.2
+ },
+ deleteOne: {
+ refineCollectionShardKey: 0.2,
+ updateOne: 0.2,
+ deleteOne: 0.2,
+ findAndModify: 0.2,
+ flushRouterConfig: 0.2
+ },
+ findAndModify: {
+ refineCollectionShardKey: 0.2,
+ updateOne: 0.2,
+ deleteOne: 0.2,
+ findAndModify: 0.2,
+ flushRouterConfig: 0.2
+ },
+ refineCollectionShardKey: {
+ refineCollectionShardKey: 0.2,
+ updateOne: 0.2,
+ deleteOne: 0.2,
+ findAndModify: 0.2,
+ flushRouterConfig: 0.2
+ },
+ flushRouterConfig:
+ {refineCollectionShardKey: 0.25, updateOne: 0.25, deleteOne: 0.25, findAndModify: 0.25},
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/write_without_shard_key_with_resharding.js b/jstests/concurrency/fsm_workloads/write_without_shard_key_with_resharding.js
new file mode 100644
index 0000000000000..fd23c2d4a4300
--- /dev/null
+++ b/jstests/concurrency/fsm_workloads/write_without_shard_key_with_resharding.js
@@ -0,0 +1,129 @@
+/**
+ * Runs updateOne, deleteOne, and findAndModify without shard key against a sharded cluster while
+ * the collection reshards concurrently.
+ *
+ * @tags: [
+ * requires_fcv_71,
+ * requires_sharding,
+ * uses_transactions,
+ * ]
+ */
+
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {
+ $config as $baseConfig
+} from "jstests/concurrency/fsm_workloads/write_without_shard_key_base.js";
+
+export const $config = extendWorkload($baseConfig, function($config, $super) {
+ $config.startState = "init";
+
+ // reshardingMinimumOperationDurationMillis is set to 30 seconds when there are stepdowns.
+ // So in order to limit the overall time for the test, we limit the number of resharding
+ // operations to maxReshardingExecutions.
+ const maxReshardingExecutions = TestData.runningWithShardStepdowns ? 4 : $config.iterations;
+ const customShardKeyFieldName = "customShardKey";
+
+ $config.data.shardKeys = [];
+ $config.data.currentShardKeyIndex = -1;
+ $config.data.reshardingCount = 0;
+
+ $config.states.init = function init(db, collName, connCache) {
+ $super.states.init.apply(this, arguments);
+ this.shardKeys.push({[this.defaultShardKeyField]: 1});
+ this.shardKeys.push({[customShardKeyFieldName]: 1});
+ this.currentShardKeyIndex = 0;
+ };
+
+ $config.data.generateRandomDocument = function generateRandomDocument(tid, partition) {
+ const doc = $super.data.generateRandomDocument.apply(this, arguments);
+ assert.neq(partition, null);
+ doc[customShardKeyFieldName] = this.generateRandomInt(partition.lower, partition.upper - 1);
+ return doc;
+ };
+
+ /**
+ * Returns a random boolean.
+ */
+ $config.data.generateRandomBool = function generateRandomBool() {
+ return Math.random() > 0.5;
+ };
+
+ $config.data.shouldSkipWriteResponseValidation = function shouldSkipWriteResponseValidation(
+ res) {
+ let shouldSkip = $super.data.shouldSkipWriteResponseValidation.apply(this, arguments);
+
+ // This workload does in-place resharding so a retry that is sent
+ // reshardingMinimumOperationDurationMillis after resharding completes is expected to fail
+ // with IncompleteTransactionHistory.
+ if (!shouldSkip && (res.code == ErrorCodes.IncompleteTransactionHistory)) {
+ return res.errmsg.includes("Incomplete history detected for transaction");
+ }
+
+ return shouldSkip;
+ };
+
+ $config.states.reshardCollection = function reshardCollection(db, collName, connCache) {
+ const collection = db.getCollection(collName);
+ const ns = collection.getFullName();
+ jsTestLog("Running reshardCollection state on: " + tojson(ns));
+
+ if (this.tid === 0 && (this.reshardingCount <= maxReshardingExecutions)) {
+ const newShardKeyIndex = (this.currentShardKeyIndex + 1) % this.shardKeys.length;
+ const newShardKey = this.shardKeys[newShardKeyIndex];
+ const reshardCollectionCmdObj = {
+ reshardCollection: ns,
+ key: newShardKey,
+ };
+
+ print(`Started resharding collection ${ns}: ${tojson({newShardKey})}`);
+ if (TestData.runningWithShardStepdowns) {
+ assert.soon(function() {
+ var res = db.adminCommand(reshardCollectionCmdObj);
+ if (res.ok) {
+ return true;
+ }
+ assert(res.hasOwnProperty("code"));
+
+ // Race to retry.
+ if (res.code === ErrorCodes.ReshardCollectionInProgress) {
+ return false;
+ }
+ // Unexpected error.
+ doassert(`Failed with unexpected ${tojson(res)}`);
+ }, "Reshard command failed", 10 * 1000);
+ } else {
+ assert.commandWorked(db.adminCommand(reshardCollectionCmdObj));
+ }
+ print(`Finished resharding collection ${ns}: ${tojson({newShardKey})}`);
+
+ // If resharding fails with SnapshotUnavailable, then this will be incorrect. But
+ // its fine since reshardCollection will succeed if the new shard key matches the
+ // existing one.
+ this.currentShardKeyIndex = newShardKeyIndex;
+ this.reshardingCount += 1;
+
+ db.printShardingStatus();
+
+ connCache.mongos.forEach(mongos => {
+ if (this.generateRandomBool()) {
+ // Without explicitly refreshing mongoses, retries of retryable write statements
+ // would always be routed to the donor shards. Non-deterministically refreshing
+ // enables us to have test coverage for retrying against both the donor and
+ // recipient shards.
+ assert.commandWorked(mongos.adminCommand({flushRouterConfig: 1}));
+ }
+ });
+ }
+ };
+
+ $config.transitions = {
+ init: {reshardCollection: 0.3, updateOne: 0.3, deleteOne: 0.2, findAndModify: 0.2},
+ updateOne: {reshardCollection: 0.3, updateOne: 0.3, deleteOne: 0.2, findAndModify: 0.2},
+ deleteOne: {reshardCollection: 0.3, updateOne: 0.3, deleteOne: 0.2, findAndModify: 0.2},
+ findAndModify: {reshardCollection: 0.3, updateOne: 0.3, deleteOne: 0.2, findAndModify: 0.2},
+ reshardCollection:
+ {reshardCollection: 0.3, updateOne: 0.3, deleteOne: 0.2, findAndModify: 0.2}
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/yield.js b/jstests/concurrency/fsm_workloads/yield.js
index 5202e5e4ee879..101bdc0aec1d1 100644
--- a/jstests/concurrency/fsm_workloads/yield.js
+++ b/jstests/concurrency/fsm_workloads/yield.js
@@ -1,12 +1,10 @@
-'use strict';
-
/**
* yield.js
*
* Designed to execute queries and make them yield as much as possible while also updating and
* removing documents that they operate on.
*/
-var $config = (function() {
+export const $config = (function() {
// The explain used to build the assertion message in advanceCursor() is the only command not
// allowed in a transaction used in the query state function. With shard stepdowns, getMores
// aren't allowed outside a transaction, so if the explain runs when the suite is configured to
diff --git a/jstests/concurrency/fsm_workloads/yield_and_hashed.js b/jstests/concurrency/fsm_workloads/yield_and_hashed.js
index 21d43a6d536da..2d8a432b333d3 100644
--- a/jstests/concurrency/fsm_workloads/yield_and_hashed.js
+++ b/jstests/concurrency/fsm_workloads/yield_and_hashed.js
@@ -1,15 +1,13 @@
-'use strict';
-
/*
* yield_and_hashed.js (extends yield_rooted_or.js)
*
* Intersperse queries which use the AND_HASH stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield_rooted_or.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
/*
* Issue a query that will use the AND_HASH stage. This is a little tricky, so use
* stagedebug to force it to happen. Unfortunately this means it can't be batched.
diff --git a/jstests/concurrency/fsm_workloads/yield_and_sorted.js b/jstests/concurrency/fsm_workloads/yield_and_sorted.js
index 480b9258d7863..465491153c335 100644
--- a/jstests/concurrency/fsm_workloads/yield_and_sorted.js
+++ b/jstests/concurrency/fsm_workloads/yield_and_sorted.js
@@ -1,15 +1,13 @@
-'use strict';
-
/*
* yield_and_sorted.js (extends yield_rooted_or.js)
*
* Intersperse queries which use the AND_SORTED stage with updates and deletes of documents they
* may match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield_rooted_or.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
/*
* Issue a query that will use the AND_SORTED stage. This is a little tricky, so use
* stagedebug to force it to happen. Unfortunately this means it can't be batched.
diff --git a/jstests/concurrency/fsm_workloads/yield_fetch.js b/jstests/concurrency/fsm_workloads/yield_fetch.js
index 7b5a000704235..b74c06b9cf7ca 100644
--- a/jstests/concurrency/fsm_workloads/yield_fetch.js
+++ b/jstests/concurrency/fsm_workloads/yield_fetch.js
@@ -1,15 +1,13 @@
-'use strict';
-
/*
* yield_fetch.js (extends yield_rooted_or.js)
*
* Intersperse queries which use the FETCH stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield_rooted_or.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
/*
* Issue a query that will use the FETCH stage.
*/
diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near.js b/jstests/concurrency/fsm_workloads/yield_geo_near.js
index f3016c5332e90..6a95d952e9c76 100644
--- a/jstests/concurrency/fsm_workloads/yield_geo_near.js
+++ b/jstests/concurrency/fsm_workloads/yield_geo_near.js
@@ -1,13 +1,11 @@
-'use strict';
-
/*
* Intersperses $geoNear aggregations with updates and deletes of documents they may match.
* @tags: [requires_non_retryable_writes]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.query = function geoNear(db, collName) {
// This distance gets about 80 docs around the origin. There is one doc inserted
// every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
index 7d3af9d2c2477..78d84660ecb75 100644
--- a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
+++ b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
@@ -1,13 +1,11 @@
-'use strict';
-
/*
* Intersperses $geoNear aggregations with updates of non-geo fields to test deduplication.
* @tags: [requires_non_retryable_writes]
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_geo_near.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield_geo_near.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
$config.states.remove = function remove(db, collName) {
var id = Random.randInt(this.nDocs);
var doc = db[collName].findOne({_id: id});
diff --git a/jstests/concurrency/fsm_workloads/yield_id_hack.js b/jstests/concurrency/fsm_workloads/yield_id_hack.js
index a0ba2ffcf1606..310d0cb62ec87 100644
--- a/jstests/concurrency/fsm_workloads/yield_id_hack.js
+++ b/jstests/concurrency/fsm_workloads/yield_id_hack.js
@@ -1,15 +1,13 @@
-'use strict';
-
/*
* yield_id_hack.js (extends yield.js)
*
* Intersperse queries which use the ID_HACK stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
/*
* Issue a query that will use the ID_HACK stage. This cannot be
* batched, so issue a
diff --git a/jstests/concurrency/fsm_workloads/yield_rooted_or.js b/jstests/concurrency/fsm_workloads/yield_rooted_or.js
index 7b5cd4b3cddb5..09bc1fd4b2c97 100644
--- a/jstests/concurrency/fsm_workloads/yield_rooted_or.js
+++ b/jstests/concurrency/fsm_workloads/yield_rooted_or.js
@@ -1,5 +1,3 @@
-'use strict';
-
/*
* yield_rooted_or.js (extends yield.js)
*
@@ -7,10 +5,10 @@
* match.
* Other workloads that need an index on c and d can inherit from this.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
/*
* Issue a query with an or stage as the root.
*/
diff --git a/jstests/concurrency/fsm_workloads/yield_sort.js b/jstests/concurrency/fsm_workloads/yield_sort.js
index af8fef20510fb..fba02611c2337 100644
--- a/jstests/concurrency/fsm_workloads/yield_sort.js
+++ b/jstests/concurrency/fsm_workloads/yield_sort.js
@@ -1,15 +1,13 @@
-'use strict';
-
/*
* yield_sort.js (extends yield_sort_merge.js)
*
* Intersperse queries which use the SORT stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_sort_merge.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield_sort_merge.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
/*
* Execute a query that will use the SORT stage.
*/
diff --git a/jstests/concurrency/fsm_workloads/yield_sort_merge.js b/jstests/concurrency/fsm_workloads/yield_sort_merge.js
index dd57841d75e49..b17a16d1307c0 100644
--- a/jstests/concurrency/fsm_workloads/yield_sort_merge.js
+++ b/jstests/concurrency/fsm_workloads/yield_sort_merge.js
@@ -1,5 +1,3 @@
-'use strict';
-
/*
* yield_sort_merge.js (extends yield_fetch.js)
*
@@ -8,10 +6,10 @@
*
* Other workloads that need an index { a: 1, b: 1 } can extend this.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
/*
* Execute a query that will use the SORT_MERGE stage.
*/
diff --git a/jstests/concurrency/fsm_workloads/yield_text.js b/jstests/concurrency/fsm_workloads/yield_text.js
index 2bd4a5608ffe8..e129a0222d136 100644
--- a/jstests/concurrency/fsm_workloads/yield_text.js
+++ b/jstests/concurrency/fsm_workloads/yield_text.js
@@ -1,15 +1,13 @@
-'use strict';
-
/*
* yield_text.js (extends yield.js)
*
* Intersperse queries which use the TEXT stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
+import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield.js";
-var $config = extendWorkload($config, function($config, $super) {
+export const $config = extendWorkload($baseConfig, function($config, $super) {
/*
* Pick a random word and search for it using full text search.
*/
diff --git a/jstests/concurrency/fsm_workloads/yield_with_drop.js b/jstests/concurrency/fsm_workloads/yield_with_drop.js
index c2172f8c4869b..da84e737b7f74 100644
--- a/jstests/concurrency/fsm_workloads/yield_with_drop.js
+++ b/jstests/concurrency/fsm_workloads/yield_with_drop.js
@@ -4,7 +4,7 @@
* Executes query operations that can yield while the source collection is dropped and recreated.
*/
-var $config = (function() {
+export const $config = (function() {
const data = {
kAllowedErrors: [
ErrorCodes.ConflictingOperationInProgress,
diff --git a/jstests/concurrency/fsm_workloads_add_remove_shards/clusterwide_ops_with_add_remove_shards.js b/jstests/concurrency/fsm_workloads_add_remove_shards/clusterwide_ops_with_add_remove_shards.js
index b16ddcd319b7c..fc023262fc0d4 100644
--- a/jstests/concurrency/fsm_workloads_add_remove_shards/clusterwide_ops_with_add_remove_shards.js
+++ b/jstests/concurrency/fsm_workloads_add_remove_shards/clusterwide_ops_with_add_remove_shards.js
@@ -8,7 +8,7 @@
"use strict";
-var $config = (function() {
+export const $config = (function() {
// The 'setup' function is run once by the parent thread after the cluster has been initialized,
// before the worker threads have been spawned. The 'this' argument is bound as '$config.data'.
function setup(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads_no_passthrough_with_mongod/external_data_source.js b/jstests/concurrency/fsm_workloads_no_passthrough_with_mongod/external_data_source.js
index 75ffc15938953..2e67203928e31 100644
--- a/jstests/concurrency/fsm_workloads_no_passthrough_with_mongod/external_data_source.js
+++ b/jstests/concurrency/fsm_workloads_no_passthrough_with_mongod/external_data_source.js
@@ -5,7 +5,7 @@
*
* Runs multiple aggregations with $_externalDataSources option concurrently.
*/
-var $config = (function() {
+export const $config = (function() {
var data = (() => {
Random.setRandomSeed();
diff --git a/jstests/core/administrative/auth1.js b/jstests/core/administrative/auth1.js
index 7633644c0da0d..4b41c7e21bb91 100644
--- a/jstests/core/administrative/auth1.js
+++ b/jstests/core/administrative/auth1.js
@@ -15,7 +15,7 @@
var mydb = db.getSiblingDB('auth1_db');
mydb.dropAllUsers();
-pass = "a" + Math.random();
+let pass = "a" + Math.random();
// print( "password [" + pass + "]" );
mydb.createUser({user: "eliot", pwd: pass, roles: jsTest.basicUserRoles});
@@ -23,7 +23,7 @@ mydb.createUser({user: "eliot", pwd: pass, roles: jsTest.basicUserRoles});
assert(mydb.auth("eliot", pass), "auth failed");
assert(!mydb.auth("eliot", pass + "a"), "auth should have failed");
-pass2 = "b" + Math.random();
+let pass2 = "b" + Math.random();
mydb.changeUserPassword("eliot", pass2);
assert(!mydb.auth("eliot", pass), "failed to change password failed");
diff --git a/jstests/core/administrative/auth2.js b/jstests/core/administrative/auth2.js
index eb2b47f696f95..4851ac1057503 100644
--- a/jstests/core/administrative/auth2.js
+++ b/jstests/core/administrative/auth2.js
@@ -8,7 +8,7 @@
// SERVER-724
db.runCommand({logout: 1});
-x = db.runCommand({logout: 1});
+let x = db.runCommand({logout: 1});
assert.eq(1, x.ok, "A");
x = db.logout();
diff --git a/jstests/core/administrative/check_shard_index.js b/jstests/core/administrative/check_shard_index.js
index f6bb9f3ee7a4d..bf12d6e4417c6 100644
--- a/jstests/core/administrative/check_shard_index.js
+++ b/jstests/core/administrative/check_shard_index.js
@@ -11,7 +11,7 @@
// CHECKSHARDINGINDEX TEST UTILS
// -------------------------
-f = db.jstests_shardingindex;
+let f = db.jstests_shardingindex;
f.drop();
// -------------------------
@@ -22,7 +22,8 @@ f.drop();
f.createIndex({x: 1, y: 1});
assert.eq(0, f.count(), "1. initial count should be zero");
-res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
+let res =
+ db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
assert.eq(true, res.ok, "1a");
f.save({x: 1, y: 1});
diff --git a/jstests/core/administrative/current_op/currentop_cursors.js b/jstests/core/administrative/current_op/currentop_cursors.js
index 2d3608b21abb3..483d2d3eb62ba 100644
--- a/jstests/core/administrative/current_op/currentop_cursors.js
+++ b/jstests/core/administrative/current_op/currentop_cursors.js
@@ -10,8 +10,6 @@
* # This test contains assertions for the hostname that operations run on.
* tenant_migration_incompatible,
* docker_incompatible,
- * # TODO SERVER-70142: Populate planSummary field which is shown in output of $currentOp.
- * cqf_incompatible,
* ]
*/
diff --git a/jstests/core/administrative/current_op/currentop_waiting_for_latch.js b/jstests/core/administrative/current_op/currentop_waiting_for_latch.js
index b02f219fe02ed..d744050fc9c00 100644
--- a/jstests/core/administrative/current_op/currentop_waiting_for_latch.js
+++ b/jstests/core/administrative/current_op/currentop_waiting_for_latch.js
@@ -7,7 +7,9 @@
* not_allowed_with_security_token,
* assumes_read_concern_unchanged,
* assumes_read_preference_unchanged,
- * no_selinux
+ * no_selinux,
+ * multiversion_incompatible,
+ * requires_latch_analyzer
* ]
*/
(function() {
diff --git a/jstests/core/administrative/getlog1.js b/jstests/core/administrative/getlog1.js
index a5989e87c75db..07c623573ef75 100644
--- a/jstests/core/administrative/getlog1.js
+++ b/jstests/core/administrative/getlog1.js
@@ -5,7 +5,7 @@
// to run:
// ./mongo jstests/
-contains = function(arr, obj) {
+let contains = function(arr, obj) {
var i = arr.length;
while (i--) {
if (arr[i] === obj) {
diff --git a/jstests/core/administrative/list_all_local_sessions.js b/jstests/core/administrative/list_all_local_sessions.js
index 434e8ed660c2a..5bd896301a923 100644
--- a/jstests/core/administrative/list_all_local_sessions.js
+++ b/jstests/core/administrative/list_all_local_sessions.js
@@ -5,6 +5,9 @@
// # former operation must be routed to the primary in a replica set, whereas the latter may be
// # routed to a secondary.
// assumes_read_preference_unchanged,
+// # The config fuzzer may run logical session cache refreshes in the background, which interferes
+// # with this test.
+// does_not_support_config_fuzzer,
// # Sessions are asynchronously flushed to disk, so a stepdown immediately after calling
// # startSession may cause this test to fail to find the returned sessionId.
// does_not_support_stepdowns,
diff --git a/jstests/core/administrative/list_all_sessions.js b/jstests/core/administrative/list_all_sessions.js
index 7ecaefd00f9f6..a6b23c8340d3c 100644
--- a/jstests/core/administrative/list_all_sessions.js
+++ b/jstests/core/administrative/list_all_sessions.js
@@ -1,6 +1,9 @@
// Sessions are asynchronously flushed to disk, so a stepdown immediately after calling
// startSession may cause this test to fail to find the returned sessionId.
// @tags: [
+// # The config fuzzer may run logical session cache refreshes in the background, which interferes
+// # with this test.
+// does_not_support_config_fuzzer,
// does_not_support_stepdowns,
// uses_testing_only_commands,
// no_selinux,
diff --git a/jstests/core/administrative/list_sessions.js b/jstests/core/administrative/list_sessions.js
index 18f684ef11ded..316962e9b3ee0 100644
--- a/jstests/core/administrative/list_sessions.js
+++ b/jstests/core/administrative/list_sessions.js
@@ -6,6 +6,9 @@
// does_not_support_stepdowns,
// uses_testing_only_commands,
// no_selinux,
+// # The config fuzzer may run logical session cache refreshes in the background, which interferes
+// # with this test.
+// does_not_support_config_fuzzer,
// ]
// Basic tests for the $listSessions aggregation stage.
diff --git a/jstests/core/administrative/profile/profile3.js b/jstests/core/administrative/profile/profile3.js
index 68334e72b084f..1ab42d622168e 100644
--- a/jstests/core/administrative/profile/profile3.js
+++ b/jstests/core/administrative/profile/profile3.js
@@ -14,10 +14,10 @@ var stddb = db;
var db = db.getSiblingDB("profile3");
db.dropAllUsers();
-t = db.profile3;
+let t = db.profile3;
t.drop();
-profileCursor = function(query) {
+let profileCursor = function(query) {
print("----");
query = query || {};
Object.extend(query, {user: username + "@" + db.getName()});
@@ -25,7 +25,7 @@ profileCursor = function(query) {
};
try {
- username = "jstests_profile3_user";
+ var username = "jstests_profile3_user";
db.createUser({user: username, pwd: "password", roles: jsTest.basicUserRoles});
db.logout();
diff --git a/jstests/core/administrative/profile/profile_agg.js b/jstests/core/administrative/profile/profile_agg.js
index 1f273d47c006b..6e673a3fe43b8 100644
--- a/jstests/core/administrative/profile/profile_agg.js
+++ b/jstests/core/administrative/profile/profile_agg.js
@@ -4,6 +4,7 @@
// does_not_support_stepdowns,
// requires_fcv_70,
// requires_profiling,
+// references_foreign_collection,
// ]
// Confirms that profiled aggregation execution contains all expected metrics with proper values.
diff --git a/jstests/core/administrative/profile/profile_getmore.js b/jstests/core/administrative/profile/profile_getmore.js
index 26040af1934f2..cc682dde58a7b 100644
--- a/jstests/core/administrative/profile/profile_getmore.js
+++ b/jstests/core/administrative/profile/profile_getmore.js
@@ -5,7 +5,8 @@
// requires_getmore,
// requires_fcv_70,
// requires_profiling,
-// cqf_incompatible,
+// # TODO: SERVER-70142 populate planSummary.
+// cqf_experimental_incompatible,
// ]
// Confirms that profiled getMore execution contains all expected metrics with proper values.
diff --git a/jstests/core/administrative/profile/profile_query_hash.js b/jstests/core/administrative/profile/profile_query_hash.js
index d8ddaf06f36fd..735942250f370 100644
--- a/jstests/core/administrative/profile/profile_query_hash.js
+++ b/jstests/core/administrative/profile/profile_query_hash.js
@@ -8,7 +8,7 @@
// does_not_support_stepdowns,
// requires_profiling,
// # TODO SERVER-67607: Test plan cache with CQF enabled.
-// cqf_incompatible,
+// cqf_experimental_incompatible,
// ]
(function() {
"use strict";
diff --git a/jstests/core/administrative/set_param1.js b/jstests/core/administrative/set_param1.js
index dfe71da85d22a..35eaa68ca447f 100644
--- a/jstests/core/administrative/set_param1.js
+++ b/jstests/core/administrative/set_param1.js
@@ -9,6 +9,7 @@
// # migration hook.
// tenant_migration_incompatible,
// ]
+load("jstests/libs/fixture_helpers.js");
// Tests for accessing logLevel server parameter using getParameter/setParameter commands
// and shell helpers.
@@ -165,8 +166,7 @@ assert(!result,
assert.commandWorked(
db.adminCommand({"setParameter": 1, redactEncryptedFields: old.redactEncryptedFields}));
-const isMongos = (db.hello().msg === 'isdbgrid');
-if (!isMongos) {
+if (!FixtureHelpers.isMongos(db)) {
//
// oplogFetcherSteadyStateMaxFetcherRestarts
//
diff --git a/jstests/core/api/api_version_create.js b/jstests/core/api/api_version_create.js
index e3a4ed00f6671..f42dfd2c1b59d 100644
--- a/jstests/core/api/api_version_create.js
+++ b/jstests/core/api/api_version_create.js
@@ -36,4 +36,4 @@ assert.commandFailedWithCode(testDB.runCommand({
apiStrict: true,
}),
ErrorCodes.InvalidOptions);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/core/api/api_version_new_50_language_features.js b/jstests/core/api/api_version_new_50_language_features.js
index 14c4f7e6aa84b..e3a8255e29e12 100644
--- a/jstests/core/api/api_version_new_50_language_features.js
+++ b/jstests/core/api/api_version_new_50_language_features.js
@@ -11,9 +11,10 @@
"use strict";
load("jstests/libs/api_version_helpers.js"); // For 'APIVersionHelpers'.
+const testDb = db.getSiblingDB(jsTestName());
const collName = "api_version_new_50_language_features";
const viewName = collName + "_view";
-const coll = db[collName];
+const coll = testDb[collName];
coll.drop();
assert.commandWorked(coll.insert({a: 1, date: new ISODate()}));
@@ -35,7 +36,7 @@ for (let pipeline of stablePipelines) {
APIVersionHelpers.assertViewSucceedsWithAPIStrict(pipeline, viewName, collName);
// Assert error is not thrown when running without apiStrict=true.
- assert.commandWorked(db.runCommand({
+ assert.commandWorked(testDb.runCommand({
aggregate: coll.getName(),
pipeline: pipeline,
apiVersion: "1",
@@ -62,12 +63,12 @@ APIVersionHelpers.assertViewSucceedsWithAPIStrict(setWindowFieldsPipeline, viewN
// Creating a collection with dotted paths is allowed with apiStrict:true.
-assert.commandWorked(db.runCommand({
+assert.commandWorked(testDb.runCommand({
create: 'new_50_features_validator',
validator: {$expr: {$eq: [{$getField: {input: "$$ROOT", field: "dotted.path"}}, 2]}},
apiVersion: "1",
apiStrict: true
}));
-assert.commandWorked(db.runCommand({drop: 'new_50_features_validator'}));
+assert.commandWorked(testDb.runCommand({drop: 'new_50_features_validator'}));
})();
diff --git a/jstests/core/api/api_version_new_51_language_features.js b/jstests/core/api/api_version_new_51_language_features.js
index 48e73d7e052b0..6e4f7280f9748 100644
--- a/jstests/core/api/api_version_new_51_language_features.js
+++ b/jstests/core/api/api_version_new_51_language_features.js
@@ -11,9 +11,10 @@
"use strict";
load("jstests/libs/api_version_helpers.js"); // For 'APIVersionHelpers'.
+const testDb = db.getSiblingDB(jsTestName());
const collName = "api_version_new_51_language_features";
const viewName = collName + "_view";
-const coll = db[collName];
+const coll = testDb[collName];
coll.drop();
assert.commandWorked(coll.insert({a: 1, date: new ISODate()}));
@@ -30,7 +31,7 @@ for (let pipeline of stablePipelines) {
APIVersionHelpers.assertViewSucceedsWithAPIStrict(pipeline, viewName, collName);
// Assert error is not thrown when running without apiStrict=true.
- assert.commandWorked(db.runCommand({
+ assert.commandWorked(testDb.runCommand({
aggregate: coll.getName(),
pipeline: pipeline,
apiVersion: "1",
diff --git a/jstests/core/api/api_version_new_52_language_features.js b/jstests/core/api/api_version_new_52_language_features.js
index b1ab2107fbf96..e2073d5ed2453 100644
--- a/jstests/core/api/api_version_new_52_language_features.js
+++ b/jstests/core/api/api_version_new_52_language_features.js
@@ -13,9 +13,10 @@
"use strict";
load("jstests/libs/api_version_helpers.js"); // For 'APIVersionHelpers'.
+const testDb = db.getSiblingDB(jsTestName());
const collName = "api_version_new_52_language_features";
const viewName = collName + "_view";
-const coll = db[collName];
+const coll = testDb[collName];
coll.drop();
assert.commandWorked(coll.insert({a: 1, arr: [2, 1, 4]}));
@@ -89,7 +90,7 @@ for (const pipeline of stablePipelines) {
APIVersionHelpers.assertViewSucceedsWithAPIStrict(pipeline, viewName, collName);
// Assert error is not thrown when running without apiStrict=true.
- assert.commandWorked(db.runCommand({
+ assert.commandWorked(testDb.runCommand({
aggregate: coll.getName(),
pipeline: pipeline,
apiVersion: "1",
diff --git a/jstests/core/api/api_version_parameters.js b/jstests/core/api/api_version_parameters.js
index cb397395d7ff7..2d41aadf5f674 100644
--- a/jstests/core/api/api_version_parameters.js
+++ b/jstests/core/api/api_version_parameters.js
@@ -12,74 +12,77 @@
(function() {
"use strict";
+const testDb = db.getSiblingDB(jsTestName());
+
// Test parsing logic on command included in API V1.
// If the client passed apiStrict, they must also pass apiVersion.
-assert.commandFailedWithCode(db.runCommand({ping: 1, apiStrict: true}),
+assert.commandFailedWithCode(testDb.runCommand({ping: 1, apiStrict: true}),
4886600,
"Provided apiStrict without passing apiVersion");
// If the client passed apiDeprecationErrors, they must also pass apiVersion.
-assert.commandFailedWithCode(db.runCommand({ping: 1, apiDeprecationErrors: false}),
+assert.commandFailedWithCode(testDb.runCommand({ping: 1, apiDeprecationErrors: false}),
4886600,
"Provided apiDeprecationErrors without passing apiVersion");
// If the client passed apiVersion, it must be of type string.
-assert.commandFailedWithCode(db.runCommand({ping: 1, apiVersion: 1}),
+assert.commandFailedWithCode(testDb.runCommand({ping: 1, apiVersion: 1}),
ErrorCodes.TypeMismatch,
"apiVersion' is the wrong type 'double', expected type 'string'");
// If the client passed apiVersion, its value must be "1".
-assert.commandFailedWithCode(db.runCommand({ping: 1, apiVersion: "2"}),
+assert.commandFailedWithCode(testDb.runCommand({ping: 1, apiVersion: "2"}),
ErrorCodes.APIVersionError,
"API version must be \"1\"");
// If the client passed apiStrict, it must be of type boolean.
-assert.commandFailedWithCode(db.runCommand({ping: 1, apiVersion: "1", apiStrict: "true"}),
+assert.commandFailedWithCode(testDb.runCommand({ping: 1, apiVersion: "1", apiStrict: "true"}),
ErrorCodes.TypeMismatch,
"apiStrict' is the wrong type 'string', expected type 'boolean'");
// If the client passed apiDeprecationErrors, it must be of type boolean.
assert.commandFailedWithCode(
- db.runCommand({ping: 1, apiVersion: "1", apiDeprecationErrors: "false"}),
+ testDb.runCommand({ping: 1, apiVersion: "1", apiDeprecationErrors: "false"}),
ErrorCodes.TypeMismatch,
"apiDeprecationErrors' is the wrong type 'string', expected type 'boolean'");
// Sanity check that command works with proper parameters.
assert.commandWorked(
- db.runCommand({ping: 1, apiVersion: "1", apiStrict: true, apiDeprecationErrors: true}));
+ testDb.runCommand({ping: 1, apiVersion: "1", apiStrict: true, apiDeprecationErrors: true}));
assert.commandWorked(
- db.runCommand({ping: 1, apiVersion: "1", apiStrict: false, apiDeprecationErrors: false}));
-assert.commandWorked(db.runCommand({ping: 1, apiVersion: "1"}));
+ testDb.runCommand({ping: 1, apiVersion: "1", apiStrict: false, apiDeprecationErrors: false}));
+assert.commandWorked(testDb.runCommand({ping: 1, apiVersion: "1"}));
// Test parsing logic on command not included in API V1.
-assert.commandWorked(db.runCommand({listCommands: 1, apiVersion: "1"}));
+assert.commandWorked(testDb.runCommand({listCommands: 1, apiVersion: "1"}));
// If the client passed apiStrict: true, but the command is not in V1, reply with
// APIStrictError.
-assert.commandFailedWithCode(db.runCommand({listCommands: 1, apiVersion: "1", apiStrict: true}),
+assert.commandFailedWithCode(testDb.runCommand({listCommands: 1, apiVersion: "1", apiStrict: true}),
ErrorCodes.APIStrictError);
-assert.commandFailedWithCode(db.runCommand({isMaster: 1, apiVersion: "1", apiStrict: true}),
+assert.commandFailedWithCode(testDb.runCommand({isMaster: 1, apiVersion: "1", apiStrict: true}),
ErrorCodes.APIStrictError);
-assert.commandWorked(db.runCommand({listCommands: 1, apiVersion: "1", apiDeprecationErrors: true}));
+assert.commandWorked(
+ testDb.runCommand({listCommands: 1, apiVersion: "1", apiDeprecationErrors: true}));
// Test parsing logic of command deprecated in API V1.
-assert.commandWorked(db.runCommand({testDeprecation: 1, apiVersion: "1"}));
-assert.commandWorked(db.runCommand({testDeprecation: 1, apiVersion: "1", apiStrict: true}));
+assert.commandWorked(testDb.runCommand({testDeprecation: 1, apiVersion: "1"}));
+assert.commandWorked(testDb.runCommand({testDeprecation: 1, apiVersion: "1", apiStrict: true}));
// If the client passed apiDeprecationErrors: true, but the command is
// deprecated in API Version 1, reply with APIDeprecationError.
assert.commandFailedWithCode(
- db.runCommand({testDeprecation: 1, apiVersion: "1", apiDeprecationErrors: true}),
+ testDb.runCommand({testDeprecation: 1, apiVersion: "1", apiDeprecationErrors: true}),
ErrorCodes.APIDeprecationError,
"Provided apiDeprecationErrors: true, but the invoked command's deprecatedApiVersions() does not include \"1\"");
// Assert APIStrictError message for unsupported commands contains link to docs site
var err = assert.commandFailedWithCode(
- db.runCommand({buildInfo: 1, apiStrict: true, apiVersion: "1"}), ErrorCodes.APIStrictError);
+ testDb.runCommand({buildInfo: 1, apiStrict: true, apiVersion: "1"}), ErrorCodes.APIStrictError);
assert.includes(err.errmsg, 'buildInfo');
assert.includes(err.errmsg, 'dochub.mongodb.org');
// Test writing to system.js fails.
assert.commandFailedWithCode(
- db.runCommand({
+ testDb.runCommand({
insert: "system.js",
documents: [{
_id: "shouldntExist",
@@ -93,7 +96,7 @@ assert.commandFailedWithCode(
ErrorCodes.APIStrictError,
"Provided apiStrict:true, but the command insert attempts to write to system.js");
assert.commandFailedWithCode(
- db.runCommand({
+ testDb.runCommand({
update: "system.js",
updates: [{
q: {
@@ -115,7 +118,7 @@ assert.commandFailedWithCode(
ErrorCodes.APIStrictError,
"Provided apiStrict:true, but the command update attempts to write to system.js");
assert.commandFailedWithCode(
- db.runCommand({
+ testDb.runCommand({
delete: "system.js",
deletes: [{
q: {
@@ -132,7 +135,7 @@ assert.commandFailedWithCode(
ErrorCodes.APIStrictError,
"Provided apiStrict:true, but the command delete attempts to write to system.js");
assert.commandFailedWithCode(
- db.runCommand({
+ testDb.runCommand({
findAndModify: "system.js",
query: {
_id: "shouldExist",
@@ -147,6 +150,6 @@ assert.commandFailedWithCode(
ErrorCodes.APIStrictError,
"Provided apiStrict:true, but the command findAndModify attempts to write to system.js");
// Test reading from system.js succeeds.
-assert.commandWorked(db.runCommand(
+assert.commandWorked(testDb.runCommand(
{find: "system.js", filter: {_id: "shouldExist"}, apiVersion: "1", apiStrict: true}));
})();
diff --git a/jstests/core/api/api_version_pipeline_stages.js b/jstests/core/api/api_version_pipeline_stages.js
index c9772e4fa0c01..f86274d783060 100644
--- a/jstests/core/api/api_version_pipeline_stages.js
+++ b/jstests/core/api/api_version_pipeline_stages.js
@@ -13,8 +13,9 @@
(function() {
"use strict";
+const testDb = db.getSiblingDB(jsTestName());
const collName = "api_version_pipeline_stages";
-const coll = db[collName];
+const coll = testDb[collName];
coll.drop();
coll.insert({a: 1});
@@ -32,7 +33,7 @@ const unstablePipelines = [
];
function assertAggregateFailsWithAPIStrict(pipeline) {
- assert.commandFailedWithCode(db.runCommand({
+ assert.commandFailedWithCode(testDb.runCommand({
aggregate: collName,
pipeline: pipeline,
cursor: {},
@@ -47,7 +48,7 @@ for (let pipeline of unstablePipelines) {
assertAggregateFailsWithAPIStrict(pipeline);
// Assert error thrown when creating a view on a pipeline with stages not in API Version 1.
- assert.commandFailedWithCode(db.runCommand({
+ assert.commandFailedWithCode(testDb.runCommand({
create: 'api_version_pipeline_stages_should_fail',
viewOn: collName,
pipeline: pipeline,
@@ -67,14 +68,14 @@ assertAggregateFailsWithAPIStrict([{$collStats: {latencyStats: {}, queryExecStat
assertAggregateFailsWithAPIStrict(
[{$collStats: {latencyStats: {}, storageStats: {scale: 1024}, queryExecStats: {}}}]);
-assert.commandWorked(db.runCommand({
+assert.commandWorked(testDb.runCommand({
aggregate: collName,
pipeline: [{$collStats: {}}],
cursor: {},
apiVersion: "1",
apiStrict: true
}));
-assert.commandWorked(db.runCommand({
+assert.commandWorked(testDb.runCommand({
aggregate: collName,
pipeline: [{$collStats: {count: {}}}],
cursor: {},
@@ -86,7 +87,7 @@ assert.commandWorked(db.runCommand({
// compute the count, we get back a single result in the first batch - no getMore is required.
// This test is meant to mimic a drivers test and serve as a warning if we may be making a breaking
// change for the drivers.
-const cmdResult = assert.commandWorked(db.runCommand({
+const cmdResult = assert.commandWorked(testDb.runCommand({
aggregate: collName,
pipeline: [{$collStats: {count: {}}}, {$group: {_id: 1, count: {$sum: "$count"}}}],
cursor: {},
diff --git a/jstests/core/api/api_version_test_expression.js b/jstests/core/api/api_version_test_expression.js
index 41bbd9c040270..23edac27c4db2 100644
--- a/jstests/core/api/api_version_test_expression.js
+++ b/jstests/core/api/api_version_test_expression.js
@@ -8,17 +8,19 @@
* assumes_unsharded_collection,
* uses_api_parameters,
* no_selinux,
+ * references_foreign_collection,
* ]
*/
(function() {
"use strict";
+const testDb = db.getSiblingDB(jsTestName());
const collName = "api_version_test_expression";
-const coll = db[collName];
+const coll = testDb[collName];
coll.drop();
const collForeignName = collName + "_foreign";
-const collForeign = db[collForeignName];
+const collForeign = testDb[collForeignName];
collForeign.drop();
for (let i = 0; i < 5; i++) {
@@ -30,14 +32,14 @@ for (let i = 0; i < 5; i++) {
// true}.
let pipeline = [{$project: {v: {$_testApiVersion: {unstable: true}}}}];
assert.commandFailedWithCode(
- db.runCommand(
+ testDb.runCommand(
{aggregate: collName, pipeline: pipeline, cursor: {}, apiStrict: true, apiVersion: "1"}),
ErrorCodes.APIStrictError);
// Assert error thrown when command specifies {apiDeprecationErrors: true} and expression specifies
// {deprecated: true}
pipeline = [{$project: {v: {$_testApiVersion: {deprecated: true}}}}];
-assert.commandFailedWithCode(db.runCommand({
+assert.commandFailedWithCode(testDb.runCommand({
aggregate: collName,
pipeline: pipeline,
cursor: {},
@@ -49,7 +51,7 @@ assert.commandFailedWithCode(db.runCommand({
// Assert error thrown when the command specifies apiStrict:true and an inner pipeline contains an
// unstable expression.
const unstableInnerPipeline = [{$project: {v: {$_testApiVersion: {unstable: true}}}}];
-assert.commandFailedWithCode(db.runCommand({
+assert.commandFailedWithCode(testDb.runCommand({
aggregate: collName,
pipeline: [{$lookup: {from: collForeignName, as: "output", pipeline: unstableInnerPipeline}}],
cursor: {},
@@ -57,7 +59,7 @@ assert.commandFailedWithCode(db.runCommand({
apiVersion: "1"
}),
ErrorCodes.APIStrictError);
-assert.commandFailedWithCode(db.runCommand({
+assert.commandFailedWithCode(testDb.runCommand({
aggregate: collName,
pipeline: [{$unionWith: {coll: collForeignName, pipeline: unstableInnerPipeline}}],
cursor: {},
@@ -68,14 +70,14 @@ assert.commandFailedWithCode(db.runCommand({
// Assert command worked when the command specifies apiStrict:false and an inner pipeline contains
// an unstable expression.
-assert.commandWorked(db.runCommand({
+assert.commandWorked(testDb.runCommand({
aggregate: collName,
pipeline: [{$lookup: {from: collForeignName, as: "output", pipeline: unstableInnerPipeline}}],
cursor: {},
apiStrict: false,
apiVersion: "1"
}));
-assert.commandWorked(db.runCommand({
+assert.commandWorked(testDb.runCommand({
aggregate: collName,
pipeline: [{$unionWith: {coll: collForeignName, pipeline: unstableInnerPipeline}}],
cursor: {},
@@ -86,7 +88,7 @@ assert.commandWorked(db.runCommand({
// Assert error thrown when the command specifies apiDeprecationErrors:true and an inner pipeline
// contains a deprecated expression.
const deprecatedInnerPipeline = [{$project: {v: {$_testApiVersion: {deprecated: true}}}}];
-assert.commandFailedWithCode(db.runCommand({
+assert.commandFailedWithCode(testDb.runCommand({
aggregate: collName,
pipeline: [{$lookup: {from: collForeignName, as: "output", pipeline: deprecatedInnerPipeline}}],
cursor: {},
@@ -94,7 +96,7 @@ assert.commandFailedWithCode(db.runCommand({
apiVersion: "1"
}),
ErrorCodes.APIDeprecationError);
-assert.commandFailedWithCode(db.runCommand({
+assert.commandFailedWithCode(testDb.runCommand({
aggregate: collName,
pipeline: [{$unionWith: {coll: collForeignName, pipeline: deprecatedInnerPipeline}}],
cursor: {},
@@ -105,14 +107,14 @@ assert.commandFailedWithCode(db.runCommand({
// Assert command worked when the command specifies apiDeprecationErrors:false and an inner pipeline
// contains a deprecated expression.
-assert.commandWorked(db.runCommand({
+assert.commandWorked(testDb.runCommand({
aggregate: collName,
pipeline: [{$lookup: {from: collForeignName, as: "output", pipeline: deprecatedInnerPipeline}}],
cursor: {},
apiDeprecationErrors: false,
apiVersion: "1"
}));
-assert.commandWorked(db.runCommand({
+assert.commandWorked(testDb.runCommand({
aggregate: collName,
pipeline: [{$unionWith: {coll: collForeignName, pipeline: deprecatedInnerPipeline}}],
cursor: {},
@@ -122,24 +124,24 @@ assert.commandWorked(db.runCommand({
// Test that command successfully runs to completion without any API parameters.
pipeline = [{$project: {v: {$_testApiVersion: {unstable: true}}}}];
-assert.commandWorked(db.runCommand({aggregate: collName, pipeline: pipeline, cursor: {}}));
+assert.commandWorked(testDb.runCommand({aggregate: collName, pipeline: pipeline, cursor: {}}));
// Create a view with {apiStrict: true}.
-db.view.drop();
-assert.commandWorked(db.runCommand(
+testDb.view.drop();
+assert.commandWorked(testDb.runCommand(
{create: "view", viewOn: collName, pipeline: [], apiStrict: true, apiVersion: "1"}));
// find() on views should work normally if 'apiStrict' is true.
-assert.commandWorked(db.runCommand({find: "view", apiStrict: true, apiVersion: "1"}));
+assert.commandWorked(testDb.runCommand({find: "view", apiStrict: true, apiVersion: "1"}));
// This command will work because API parameters are not inherited from views.
-assert.commandWorked(db.runCommand({aggregate: "view", pipeline: pipeline, cursor: {}}));
+assert.commandWorked(testDb.runCommand({aggregate: "view", pipeline: pipeline, cursor: {}}));
assert.commandFailedWithCode(
- db.runCommand(
+ testDb.runCommand(
{aggregate: "view", pipeline: pipeline, cursor: {}, apiVersion: "1", apiStrict: true}),
ErrorCodes.APIStrictError);
// Create a view with 'unstable' parameter should fail with 'apiStrict'.
-db.unstableView.drop();
-assert.commandFailedWithCode(db.runCommand({
+testDb.unstableView.drop();
+assert.commandFailedWithCode(testDb.runCommand({
create: "unstableView",
viewOn: collName,
pipeline: pipeline,
@@ -149,18 +151,18 @@ assert.commandFailedWithCode(db.runCommand({
ErrorCodes.APIStrictError);
// Create a view with 'unstable' should be allowed without 'apiStrict'.
-assert.commandWorked(db.runCommand({
+assert.commandWorked(testDb.runCommand({
create: "unstableView",
viewOn: collName,
pipeline: pipeline,
apiVersion: "1",
apiStrict: false
}));
-assert.commandWorked(db.runCommand({aggregate: "unstableView", pipeline: [], cursor: {}}));
+assert.commandWorked(testDb.runCommand({aggregate: "unstableView", pipeline: [], cursor: {}}));
// This commmand will fail even with the empty pipeline because of the view.
assert.commandFailedWithCode(
- db.runCommand(
+ testDb.runCommand(
{aggregate: "unstableView", pipeline: [], cursor: {}, apiVersion: "1", apiStrict: true}),
ErrorCodes.APIStrictError);
@@ -169,33 +171,33 @@ let validator = {$expr: {$_testApiVersion: {unstable: true}}};
let validatedCollName = collName + "_validated";
// Creating a collection with the unstable validator is not allowed with apiStrict:true.
-db[validatedCollName].drop();
+testDb[validatedCollName].drop();
assert.commandFailedWithCode(
- db.runCommand(
+ testDb.runCommand(
{create: validatedCollName, validator: validator, apiVersion: "1", apiStrict: true}),
ErrorCodes.APIStrictError);
// Run create and insert commands without apiStrict:true and verify that it is successful.
-assert.commandWorked(db.runCommand(
+assert.commandWorked(testDb.runCommand(
{create: validatedCollName, validator: validator, apiVersion: "1", apiStrict: false}));
assert.commandWorked(
- db[validatedCollName].runCommand({insert: validatedCollName, documents: [{num: 1}]}));
+ testDb[validatedCollName].runCommand({insert: validatedCollName, documents: [{num: 1}]}));
// Specifying apiStrict: true results in an error.
assert.commandFailedWithCode(
- db[validatedCollName].runCommand(
+ testDb[validatedCollName].runCommand(
{insert: validatedCollName, documents: [{num: 1}], apiVersion: "1", apiStrict: true}),
ErrorCodes.APIStrictError);
// Recreate the validator containing a deprecated test expression.
-db[validatedCollName].drop();
+testDb[validatedCollName].drop();
validator = {
$expr: {$_testApiVersion: {deprecated: true}}
};
// Creating a collection with the deprecated validator is not allowed with
// apiDeprecationErrors:true.
-assert.commandFailedWithCode(db.runCommand({
+assert.commandFailedWithCode(testDb.runCommand({
create: validatedCollName,
validator: validator,
apiVersion: "1",
@@ -205,17 +207,17 @@ assert.commandFailedWithCode(db.runCommand({
// Run create and insert commands without apiDeprecationErrors:true and verify that it is
// successful.
-assert.commandWorked(db.runCommand({
+assert.commandWorked(testDb.runCommand({
create: validatedCollName,
validator: validator,
apiVersion: "1",
apiDeprecationErrors: false,
}));
assert.commandWorked(
- db[validatedCollName].runCommand({insert: validatedCollName, documents: [{num: 1}]}));
+ testDb[validatedCollName].runCommand({insert: validatedCollName, documents: [{num: 1}]}));
// Specifying apiDeprecationErrors: true results in an error.
-assert.commandFailedWithCode(db[validatedCollName].runCommand({
+assert.commandFailedWithCode(testDb[validatedCollName].runCommand({
insert: validatedCollName,
documents: [{num: 1}],
apiVersion: "1",
@@ -226,12 +228,13 @@ assert.commandFailedWithCode(db[validatedCollName].runCommand({
// Test that API version parameters are inherited into the inner command of the explain command.
function checkExplainInnerCommandGetsAPIVersionParameters(explainedCmd, errCode) {
assert.commandFailedWithCode(
- db.runCommand(
+ testDb.runCommand(
{explain: explainedCmd, apiVersion: "1", apiDeprecationErrors: true, apiStrict: true}),
errCode);
// If 'apiStrict: false' the inner aggregate command will execute successfully.
- const explainRes = db.runCommand({explain: explainedCmd, apiVersion: "1", apiStrict: false});
+ const explainRes =
+ testDb.runCommand({explain: explainedCmd, apiVersion: "1", apiStrict: false});
assert(explainRes.hasOwnProperty('executionStats'), explainRes);
assert.eq(explainRes['executionStats']['executionSuccess'], true, explainRes);
}
@@ -257,6 +260,6 @@ findCmd = {
};
checkExplainInnerCommandGetsAPIVersionParameters(findCmd, ErrorCodes.APIDeprecationError);
-db[validatedCollName].drop();
-db.unstableView.drop();
+testDb[validatedCollName].drop();
+testDb.unstableView.drop();
})();
diff --git a/jstests/core/api/api_version_unstable_fields.js b/jstests/core/api/api_version_unstable_fields.js
index c34189c759df9..768665c3e476e 100644
--- a/jstests/core/api/api_version_unstable_fields.js
+++ b/jstests/core/api/api_version_unstable_fields.js
@@ -12,8 +12,9 @@
(function() {
"use strict";
+const testDb = db.getSiblingDB(jsTestName());
const collName = "api_version_unstable_fields";
-assert.commandWorked(db[collName].insert({a: 1}));
+assert.commandWorked(testDb[collName].insert({a: 1}));
const unstableFieldsForAggregate = {
isMapReduceCommand: false,
@@ -44,8 +45,9 @@ function testCommandWithUnstableFields(command, containsUnstableFields) {
const cmd = JSON.parse(JSON.stringify(command));
const cmdWithUnstableField = Object.assign(cmd, {[field]: containsUnstableFields[field]});
- assert.commandFailedWithCode(
- db.runCommand(cmdWithUnstableField), ErrorCodes.APIStrictError, cmdWithUnstableField);
+ assert.commandFailedWithCode(testDb.runCommand(cmdWithUnstableField),
+ ErrorCodes.APIStrictError,
+ cmdWithUnstableField);
}
}
@@ -73,16 +75,16 @@ let createIndexesCmd = {
apiStrict: true,
};
assert.commandFailedWithCode(
- db.runCommand(createIndexesCmd), ErrorCodes.APIStrictError, createIndexesCmd);
+ testDb.runCommand(createIndexesCmd), ErrorCodes.APIStrictError, createIndexesCmd);
createIndexesCmd["indexes"] = [{key: {a: "geoHaystack"}, name: "a_1"}];
assert.commandFailedWithCode(
- db.runCommand(createIndexesCmd), ErrorCodes.CannotCreateIndex, createIndexesCmd);
+ testDb.runCommand(createIndexesCmd), ErrorCodes.CannotCreateIndex, createIndexesCmd);
// Test that collMod command with an unstable field ('prepareUnique') in an inner struct throws when
// 'apiStrict' is set to true.
assert.commandWorked(
- db.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}));
+ testDb.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}));
let collModCommand = {collMod: "col", apiVersion: "1", apiStrict: true};
testCommandWithUnstableFields(collModCommand, {index: {name: "a_1", prepareUnique: true}});
}());
diff --git a/jstests/core/api/api_version_unstable_indexes.js b/jstests/core/api/api_version_unstable_indexes.js
index cbd612654349c..5064e6edc0671 100644
--- a/jstests/core/api/api_version_unstable_indexes.js
+++ b/jstests/core/api/api_version_unstable_indexes.js
@@ -14,15 +14,13 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For 'getWinningPlan'.
-load("jstests/libs/fixture_helpers.js"); // For 'isMongos'.
-load("jstests/libs/columnstore_util.js"); // For 'setUpServerForColumnStoreIndexTest'.
+import {getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js";
+load("jstests/libs/fixture_helpers.js"); // For 'isMongos'.
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
+const testDb = db.getSiblingDB(jsTestName());
const collName = "api_verision_unstable_indexes";
-const coll = db[collName];
+const coll = testDb[collName];
coll.drop();
assert.commandWorked(coll.insert([
@@ -36,14 +34,14 @@ assert.commandWorked(coll.createIndex({subject: "text"}));
assert.commandWorked(coll.createIndex({"views": 1}, {sparse: true}));
// The "text" index, "subject_text", can be used normally.
-if (!FixtureHelpers.isMongos(db)) {
+if (!FixtureHelpers.isMongos(testDb)) {
const explainRes = assert.commandWorked(
- db.runCommand({explain: {"find": collName, "filter": {$text: {$search: "coffee"}}}}));
+ testDb.runCommand({explain: {"find": collName, "filter": {$text: {$search: "coffee"}}}}));
assert.eq(getWinningPlan(explainRes.queryPlanner).indexName, "subject_text", explainRes);
}
// No "text" index can be used for $text search as the "text" index is excluded from API version 1.
-assert.commandFailedWithCode(db.runCommand({
+assert.commandFailedWithCode(testDb.runCommand({
explain: {"find": collName, "filter": {$text: {$search: "coffee"}}},
apiVersion: "1",
apiStrict: true
@@ -51,7 +49,7 @@ assert.commandFailedWithCode(db.runCommand({
ErrorCodes.NoQueryExecutionPlans);
// Can not hint a sparse index which is excluded from API version 1 with 'apiStrict: true'.
-assert.commandFailedWithCode(db.runCommand({
+assert.commandFailedWithCode(testDb.runCommand({
"find": collName,
"filter": {views: 50},
"hint": {views: 1},
@@ -60,15 +58,15 @@ assert.commandFailedWithCode(db.runCommand({
}),
ErrorCodes.BadValue);
-if (!FixtureHelpers.isMongos(db)) {
- const explainRes = assert.commandWorked(
- db.runCommand({explain: {"find": collName, "filter": {views: 50}, "hint": {views: 1}}}));
+if (!FixtureHelpers.isMongos(testDb)) {
+ const explainRes = assert.commandWorked(testDb.runCommand(
+ {explain: {"find": collName, "filter": {views: 50}, "hint": {views: 1}}}));
assert.eq(getWinningPlan(explainRes.queryPlanner).inputStage.indexName, "views_1", explainRes);
}
-if (setUpServerForColumnStoreIndexTest(db)) {
+if (setUpServerForColumnStoreIndexTest(testDb)) {
// Column store indexes cannot be created with apiStrict: true.
- assert.commandFailedWithCode(db.runCommand({
+ assert.commandFailedWithCode(testDb.runCommand({
createIndexes: coll.getName(),
indexes: [{key: {"$**": "columnstore"}, name: "$**_columnstore"}],
apiVersion: "1",
@@ -82,14 +80,14 @@ if (setUpServerForColumnStoreIndexTest(db)) {
const projection = {_id: 0, x: 1};
// Sanity check that this query can use column scan.
- assert(planHasStage(db, coll.find({}, projection).explain(), "COLUMN_SCAN"));
+ assert(planHasStage(testDb, coll.find({}, projection).explain(), "COLUMN_SCAN"));
// No hint should work (but redirect to coll scan).
- assert.commandWorked(db.runCommand(
+ assert.commandWorked(testDb.runCommand(
{find: coll.getName(), projection: {_id: 0, x: 1}, apiVersion: "1", apiStrict: true}));
// Hint should fail.
- assert.commandFailedWithCode(db.runCommand({
+ assert.commandFailedWithCode(testDb.runCommand({
find: coll.getName(),
projection: projection,
hint: {"$**": "columnstore"},
@@ -98,4 +96,3 @@ if (setUpServerForColumnStoreIndexTest(db)) {
}),
ErrorCodes.BadValue);
}
-})();
diff --git a/jstests/core/api/apitest_db.js b/jstests/core/api/apitest_db.js
index 805e2c8f99051..a2f293400884a 100644
--- a/jstests/core/api/apitest_db.js
+++ b/jstests/core/api/apitest_db.js
@@ -8,7 +8,7 @@
assert("test" == db, "wrong database currently not test");
-dd = function(x) {
+let dd = function(x) {
// print( x );
};
diff --git a/jstests/core/capped/capped.js b/jstests/core/capped/capped.js
index 6c62ba755606d..1a26f5a1e22a8 100644
--- a/jstests/core/capped/capped.js
+++ b/jstests/core/capped/capped.js
@@ -9,7 +9,7 @@
db.jstests_capped.drop();
db.createCollection("jstests_capped", {capped: true, size: 30000});
-t = db.jstests_capped;
+let t = db.jstests_capped;
assert.eq(1, t.getIndexes().length, "expected a count of one index for new capped collection");
t.save({x: 1});
diff --git a/jstests/core/capped/capped1.js b/jstests/core/capped/capped1.js
index df9b12ddf8989..5a02fb6c55f26 100644
--- a/jstests/core/capped/capped1.js
+++ b/jstests/core/capped/capped1.js
@@ -6,11 +6,11 @@
* ]
*/
-t = db.capped1;
+let t = db.capped1;
t.drop();
db.createCollection("capped1", {capped: true, size: 1024});
-v = t.validate();
+let v = t.validate();
assert(v.valid, "A : " + tojson(v)); // SERVER-485
t.save({x: 1});
diff --git a/jstests/core/capped/capped5.js b/jstests/core/capped/capped5.js
index a276baf043d33..d2b704c6cb97b 100644
--- a/jstests/core/capped/capped5.js
+++ b/jstests/core/capped/capped5.js
@@ -6,9 +6,9 @@
* ]
*/
-tn = "capped5";
+let tn = "capped5";
-t = db[tn];
+let t = db[tn];
t.drop();
db.createCollection(tn, {capped: true, size: 1024 * 1024 * 1});
diff --git a/jstests/core/capped/capped_empty.js b/jstests/core/capped/capped_empty.js
index e0515967ca84d..020be638c61b2 100644
--- a/jstests/core/capped/capped_empty.js
+++ b/jstests/core/capped/capped_empty.js
@@ -15,7 +15,7 @@
* ]
*/
-t = db.capped_empty;
+let t = db.capped_empty;
t.drop();
db.createCollection(t.getName(), {capped: true, size: 100});
diff --git a/jstests/core/capped/capped_resize.js b/jstests/core/capped/capped_resize.js
index e1ebac19ac1c5..17125589817ce 100644
--- a/jstests/core/capped/capped_resize.js
+++ b/jstests/core/capped/capped_resize.js
@@ -10,9 +10,6 @@
* assumes_unsharded_collection,
* ]
*/
-(function() {
-load("jstests/libs/feature_flag_util.js");
-
const testDB = db.getSiblingDB(jsTestName());
const cappedColl = testDB["capped_coll"];
@@ -106,12 +103,9 @@ let verifyLimitUpdate = function(updates) {
// We modify the collection to have a size multiple of 256, then
// we modify the collection to have a size non multiple of 256 and finally
// we modify the collection to have a size multiple of 256
- // TODO SERVER-74653: Remove feature flag check.
- if (FeatureFlagUtil.isPresentAndEnabled(testDB, "CappedCollectionsRelaxedSize")) {
- verifyLimitUpdate({cappedSize: 25 * 1024});
- verifyLimitUpdate({cappedSize: 50 * 1023});
- verifyLimitUpdate({cappedSize: 50 * 1024});
- }
+ verifyLimitUpdate({cappedSize: 25 * 1024});
+ verifyLimitUpdate({cappedSize: 50 * 1023});
+ verifyLimitUpdate({cappedSize: 50 * 1024});
})();
(function updateMaxLimit() {
@@ -169,5 +163,4 @@ let verifyLimitUpdate = function(updates) {
stats = assert.commandWorked(cappedColl.stats());
assert.eq(stats.count, initialDocSize);
assert.lte(stats.size, maxSize);
-})();
-}());
+})();
\ No newline at end of file
diff --git a/jstests/core/capped/cappeda.js b/jstests/core/capped/cappeda.js
index 3ec0074eefe43..816f5be4e0d3a 100644
--- a/jstests/core/capped/cappeda.js
+++ b/jstests/core/capped/cappeda.js
@@ -6,13 +6,13 @@
* ]
*/
-t = db.scan_capped_id;
+let t = db.scan_capped_id;
t.drop();
-x = t.runCommand("create", {capped: true, size: 10000});
+let x = t.runCommand("create", {capped: true, size: 10000});
assert(x.ok);
-for (i = 0; i < 100; i++)
+for (let i = 0; i < 100; i++)
t.insert({_id: i, x: 1});
function q() {
diff --git a/jstests/core/clustered/clustered_collection_bounded_scan.js b/jstests/core/clustered/clustered_collection_bounded_scan.js
index 8a6a0a1a9d595..90abe0ba8bf25 100644
--- a/jstests/core/clustered/clustered_collection_bounded_scan.js
+++ b/jstests/core/clustered/clustered_collection_bounded_scan.js
@@ -8,15 +8,13 @@
* assumes_unsharded_collection,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/clustered_collections/clustered_collection_util.js");
-load("jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js");
+import {
+ testClusteredCollectionBoundedScan
+} from "jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js";
const replicatedDB = db.getSiblingDB(jsTestName());
const collName = "coll";
const replicatedColl = replicatedDB[collName];
-testClusteredCollectionBoundedScan(replicatedColl, {_id: 1});
-})();
+testClusteredCollectionBoundedScan(replicatedColl, {_id: 1});
\ No newline at end of file
diff --git a/jstests/core/clustered/clustered_collection_collation.js b/jstests/core/clustered/clustered_collection_collation.js
index 92c032c3b80e0..f09c35c150e80 100644
--- a/jstests/core/clustered/clustered_collection_collation.js
+++ b/jstests/core/clustered/clustered_collection_collation.js
@@ -10,12 +10,12 @@
* ]
*/
-(function() {
-"use strict";
-
+import {getWinningPlan} from "jstests/libs/analyze_plan.js";
load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
load("jstests/libs/clustered_collections/clustered_collection_util.js");
-load("jstests/libs/clustered_collections/clustered_collection_hint_common.js");
+import {
+ validateClusteredCollectionHint
+} from "jstests/libs/clustered_collections/clustered_collection_hint_common.js";
const collatedName = 'clustered_collection_with_collation';
const collated = db[collatedName];
@@ -121,8 +121,9 @@ const verifyHasBoundsAndFindsN = function(coll, expected, predicate, queryCollat
const res = queryCollation === undefined
? assert.commandWorked(coll.find(predicate).explain())
: assert.commandWorked(coll.find(predicate).collation(queryCollation).explain());
- const min = assert(res.queryPlanner.winningPlan.minRecord, "No min bound");
- const max = assert(res.queryPlanner.winningPlan.maxRecord, "No max bound");
+ const queryPlan = getWinningPlan(res.queryPlanner);
+ const min = assert(queryPlan.minRecord, "No min bound");
+ const max = assert(queryPlan.maxRecord, "No max bound");
assert.eq(min, max, "COLLSCAN bounds are not equal");
assert.eq(expected, coll.find(predicate).count(), "Didn't find the expected records");
};
@@ -131,8 +132,9 @@ const verifyNoBoundsAndFindsN = function(coll, expected, predicate, queryCollati
const res = queryCollation === undefined
? assert.commandWorked(coll.find(predicate).explain())
: assert.commandWorked(coll.find(predicate).collation(queryCollation).explain());
- assert.eq(null, res.queryPlanner.winningPlan.minRecord, "There's a min bound");
- assert.eq(null, res.queryPlanner.winningPlan.maxRecord, "There's a max bound");
+ const queryPlan = getWinningPlan(res.queryPlanner);
+ assert.eq(null, queryPlan.minRecord, "There's a min bound");
+ assert.eq(null, queryPlan.maxRecord, "There's a max bound");
assert.eq(expected, coll.find(predicate).count(), "Didn't find the expected records");
};
@@ -140,8 +142,9 @@ const verifyNoTightBoundsAndFindsN = function(coll, expected, predicate, queryCo
const res = queryCollation === undefined
? assert.commandWorked(coll.find(predicate).explain())
: assert.commandWorked(coll.find(predicate).collation(queryCollation).explain());
- const min = res.queryPlanner.winningPlan.minRecord;
- const max = res.queryPlanner.winningPlan.maxRecord;
+ const queryPlan = getWinningPlan(res.queryPlanner);
+ const min = queryPlan.minRecord;
+ const max = queryPlan.maxRecord;
assert.neq(null, min, "No min bound");
assert.neq(null, max, "No max bound");
assert(min !== max, "COLLSCAN bounds are equal");
@@ -330,4 +333,3 @@ validateClusteredCollectionHint(noncollated, {
expectedWinningPlanStats:
{stage: "CLUSTERED_IXSCAN", direction: "forward", minRecord: 5, maxRecord: 11}
});
-})();
diff --git a/jstests/core/clustered/clustered_collection_hint.js b/jstests/core/clustered/clustered_collection_hint.js
index ba7820b24635a..b1f24fc39f9c6 100644
--- a/jstests/core/clustered/clustered_collection_hint.js
+++ b/jstests/core/clustered/clustered_collection_hint.js
@@ -8,14 +8,13 @@
* requires_non_retryable_writes,
* ]
*/
-(function() {
-"use strict";
load("jstests/libs/clustered_collections/clustered_collection_util.js");
-load("jstests/libs/clustered_collections/clustered_collection_hint_common.js");
+import {
+ testClusteredCollectionHint
+} from "jstests/libs/clustered_collections/clustered_collection_hint_common.js";
const replicatedDB = db.getSiblingDB(jsTestName());
const collName = "coll";
const replicatedColl = replicatedDB[collName];
testClusteredCollectionHint(replicatedColl, {_id: 1}, "_id_");
-})();
diff --git a/jstests/core/collation.js b/jstests/core/collation.js
index df72d6bd6177a..5b9b1b9130b64 100644
--- a/jstests/core/collation.js
+++ b/jstests/core/collation.js
@@ -18,10 +18,14 @@
// ]
// Integration tests for the collation feature.
-(function() {
-'use strict';
+import {
+ getPlanStage,
+ getWinningPlan,
+ isCollscan,
+ isIxscan,
+ planHasStage
+} from "jstests/libs/analyze_plan.js";
-load("jstests/libs/analyze_plan.js");
load("jstests/libs/index_catalog_helpers.js");
// For isWiredTiger.
load("jstests/concurrency/fsm_workload_helpers/server_types.js");
@@ -1860,5 +1864,4 @@ assert.throws(() => coll.find({}, {_id: 0})
res = testDb.runCommand({create: 'view', viewOn: 'coll'});
assert(res.ok == 1 || res.errmsg == ErrorCodes.NamespaceExists);
res = testDb.runCommand({create: 'view', viewOn: 'coll', collation: {locale: 'en'}});
-assert.commandFailedWithCode(res, ErrorCodes.NamespaceExists);
-})();
+assert.commandFailedWithCode(res, ErrorCodes.NamespaceExists);
\ No newline at end of file
diff --git a/jstests/core/columnstore/column_scan_skip_row_store_projection.js b/jstests/core/columnstore/column_scan_skip_row_store_projection.js
index 8cfee05c075a7..eedb619e7d5f4 100644
--- a/jstests/core/columnstore/column_scan_skip_row_store_projection.js
+++ b/jstests/core/columnstore/column_scan_skip_row_store_projection.js
@@ -21,24 +21,22 @@
* not_allowed_with_security_token,
* ]
*/
-(function() {
-"use strict";
-
load('jstests/aggregation/extras/utils.js'); // For assertArrayEq.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-// For areAllCollectionsClustered.
-load("jstests/libs/clustered_collections/clustered_collection_util.js");
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+load(
+ "jstests/libs/clustered_collections/clustered_collection_util.js"); // For
+ // areAllCollectionsClustered.
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const columnstoreEnabled =
checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"], true /* checkAllNodes */);
if (!columnstoreEnabled) {
jsTestLog("Skipping columnstore index test since the feature flag is not enabled.");
- return;
+ quit();
}
if (!setUpServerForColumnStoreIndexTest(db)) {
- return;
+ quit();
}
const indexedColl = db.column_scan_skip_row_store_projection_indexed;
@@ -254,4 +252,3 @@ function runAllAggregations() {
setupCollections();
runAllAggregations();
-}());
diff --git a/jstests/core/columnstore/column_store_index_compression.js b/jstests/core/columnstore/column_store_index_compression.js
index f59a78d5696c7..a022797ae1260 100644
--- a/jstests/core/columnstore/column_store_index_compression.js
+++ b/jstests/core/columnstore/column_store_index_compression.js
@@ -17,24 +17,21 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
load("jstests/libs/discover_topology.js"); // For findNonConfigNodes
load("jstests/libs/fixture_helpers.js"); // For isMongos
load("jstests/libs/index_catalog_helpers.js"); // For IndexCatalogHelpers
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const columnstoreEnabled =
checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"], true /* checkAllNodes */);
if (!columnstoreEnabled) {
jsTestLog("Skipping columnstore index test since the feature flag is not enabled.");
- return;
+ quit();
}
if (!setUpServerForColumnStoreIndexTest(db)) {
- return;
+ quit();
}
const coll = db.column_store_index_compression;
@@ -178,4 +175,3 @@ for (let {node, indexDetails} of reader.statsForEachMongod(coll, zstdIndex)) {
"zstd",
{node, indexDetails});
}
-}());
diff --git a/jstests/core/columnstore/column_store_projection.js b/jstests/core/columnstore/column_store_projection.js
index 65a3f78388ffd..48dbe03da264d 100644
--- a/jstests/core/columnstore/column_store_projection.js
+++ b/jstests/core/columnstore/column_store_projection.js
@@ -15,16 +15,13 @@
* not_allowed_with_security_token,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/fail_point_util.js");
-load("jstests/libs/analyze_plan.js"); // For "planHasStage."
+import {planHasStage} from "jstests/libs/analyze_plan.js";
load("jstests/aggregation/extras/utils.js"); // For "resultsEq."
-load("jstests/libs/columnstore_util.js"); // For "setUpServerForColumnStoreIndexTest."
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
if (!setUpServerForColumnStoreIndexTest(db)) {
- return;
+ quit();
}
//
@@ -209,5 +206,4 @@ runTestWithDocsAndIndexes("sibling_paths_5",
{keys: {"a.$**": "columnstore"}});
// Note that this test does not drop any of its test collections or indexes, so that they will be
-// available to follow-on index validation tests.
-})();
+// available to follow-on index validation tests.
\ No newline at end of file
diff --git a/jstests/core/columnstore/columnstore_eligibility.js b/jstests/core/columnstore/columnstore_eligibility.js
index 0ae6e9b0e01a1..02679fa844383 100644
--- a/jstests/core/columnstore/columnstore_eligibility.js
+++ b/jstests/core/columnstore/columnstore_eligibility.js
@@ -17,15 +17,12 @@
* requires_fcv_70,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
-load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
+import {aggPlanHasStage, planHasStage} from "jstests/libs/analyze_plan.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos.
if (!setUpServerForColumnStoreIndexTest(db)) {
- return;
+ quit();
}
const coll = db.columnstore_eligibility;
@@ -254,4 +251,3 @@ assert.commandFailedWithCode(db.runCommand({
hint: {"a.$**": "columnstore"}
}),
6714002);
-}());
diff --git a/jstests/core/columnstore/columnstore_index.js b/jstests/core/columnstore/columnstore_index.js
index 895f93ae93f93..e261b974b96bc 100644
--- a/jstests/core/columnstore/columnstore_index.js
+++ b/jstests/core/columnstore/columnstore_index.js
@@ -21,16 +21,14 @@
* tenant_migration_incompatible,
* does_not_support_stepdowns,
* not_allowed_with_security_token,
+ * uses_full_validation,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {planHasStage} from "jstests/libs/analyze_plan.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
if (!setUpServerForColumnStoreIndexTest(db)) {
- return;
+ quit();
}
const coll = db.columnstore_index;
@@ -127,4 +125,3 @@ assert(
{v: 2, key: {"$**": "columnstore"}, name: "$**_columnstore", columnstoreProjection: {x: 1}},
listIndexesResult),
listIndexesResult);
-}());
diff --git a/jstests/core/columnstore/columnstore_index_correctness.js b/jstests/core/columnstore/columnstore_index_correctness.js
index f6fc1621fb3af..b3362fb324fd5 100644
--- a/jstests/core/columnstore/columnstore_index_correctness.js
+++ b/jstests/core/columnstore/columnstore_index_correctness.js
@@ -12,18 +12,16 @@
* tenant_migration_incompatible,
* does_not_support_stepdowns,
* not_allowed_with_security_token,
+ * uses_full_validation,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/fail_point_util.js");
-load("jstests/libs/analyze_plan.js"); // For "planHasStage."
+import {getPlanStages, aggPlanHasStage, planHasStage} from "jstests/libs/analyze_plan.js";
load("jstests/aggregation/extras/utils.js"); // For "resultsEq."
-load("jstests/libs/columnstore_util.js"); // For "setUpServerForColumnStoreIndexTest."
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
if (!setUpServerForColumnStoreIndexTest(db)) {
- return;
+ quit();
}
const coll = db.columnstore_index_correctness;
@@ -180,102 +178,102 @@ const coll = db.columnstore_index_correctness;
// Multiple tests in this file use the same dataset. Intentionally not using _id as the unique
// identifier, to avoid getting IDHACK plans when we query by it.
const docs = [
- {num: 0},
- {num: 1, a: null},
- {num: 2, a: "scalar"},
- {num: 3, a: {}},
- {num: 4, a: {x: 1, b: "scalar"}},
- {num: 5, a: {b: {}}},
- {num: 6, a: {x: 1, b: {}}},
- {num: 7, a: {x: 1, b: {x: 1}}},
- {num: 8, a: {b: {c: "scalar"}}},
- {num: 9, a: {b: {c: null}}},
- {num: 10, a: {b: {c: [[1, 2], [{}], 2]}}},
- {num: 11, a: {x: 1, b: {x: 1, c: ["scalar"]}}},
- {num: 12, a: {x: 1, b: {c: {x: 1}}}},
- {num: 13, a: {b: []}},
- {num: 14, a: {b: [null]}},
- {num: 15, a: {b: ["scalar"]}},
- {num: 16, a: {b: [[]]}},
- {num: 17, a: {b: [1, {}, 2]}},
- {num: 18, a: {b: [[1, 2], [{}], 2]}},
- {num: 19, a: {x: 1, b: [[1, 2], [{}], 2]}},
- {num: 20, a: {b: [{c: "scalar"}]}},
- {num: 21, a: {b: [{c: "scalar"}, {c: "scalar2"}]}},
- {num: 22, a: {b: [{c: [[1, 2], [{}], 2]}]}},
- {num: 23, a: {b: [1, {c: "scalar"}, 2]}},
- {num: 24, a: {b: [1, {c: [[1, 2], [{}], 2]}, 2]}},
- {num: 25, a: {x: 1, b: [1, {c: [[1, 2], [{}], 2]}, 2]}},
- {num: 26, a: {b: [[1, 2], [{c: "scalar"}], 2]}},
- {num: 27, a: {b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}},
- {num: 28, a: {x: 1, b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}},
- {num: 29, a: []},
- {num: 30, a: [null]},
- {num: 31, a: ["scalar"]},
- {num: 32, a: [[]]},
- {num: 33, a: [{}]},
- {num: 34, a: [1, {}, 2]},
- {num: 35, a: [[1, 2], [{}], 2]},
- {num: 36, a: [{b: "scalar"}]},
- {num: 37, a: [{b: null}]},
- {num: 38, a: [1, {b: "scalar"}, 2]},
- {num: 39, a: [1, {b: []}, 2]},
- {num: 40, a: [1, {b: [null]}, 2]},
- {num: 41, a: [1, {b: ["scalar"]}, 2]},
- {num: 42, a: [1, {b: [[]]}, 2]},
- {num: 43, a: [{b: []}]},
- {num: 44, a: [{b: ["scalar"]}]},
- {num: 45, a: [{b: [[]]}]},
- {num: 46, a: [{b: {}}]},
- {num: 47, a: [{b: {c: "scalar"}}]},
- {num: 48, a: [{b: {c: [[1, 2], [{}], 2]}}]},
- {num: 49, a: [{b: {x: 1}}]},
- {num: 50, a: [{b: {x: 1, c: "scalar"}}]},
- {num: 51, a: [{b: [{c: "scalar"}]}]},
- {num: 52, a: [{b: [{c: ["scalar"]}]}]},
- {num: 53, a: [{b: [1, {c: ["scalar"]}, 2]}]},
- {num: 54, a: [{b: [{}]}]},
- {num: 55, a: [{b: [[1, 2], [{}], 2]}]},
- {num: 56, a: [{b: [[1, 2], [{c: "scalar"}], 2]}]},
- {num: 57, a: [{b: [[1, 2], [{c: ["scalar"]}], 2]}]},
- {num: 58, a: [1, {b: {}}, 2]},
- {num: 59, a: [1, {b: {c: "scalar"}}, 2]},
- {num: 60, a: [1, {b: {c: {x: 1}}}, 2]},
- {num: 61, a: [1, {b: {c: [1, {}, 2]}}, 2]},
- {num: 62, a: [1, {b: {x: 1}}, 2]},
- {num: 63, a: [1, {b: {x: 1, c: "scalar"}}, 2]},
- {num: 64, a: [1, {b: {x: 1, c: [[]]}}, 2]},
- {num: 65, a: [1, {b: {x: 1, c: [1, {}, 2]}}, 2]},
- {num: 66, a: [1, {b: [{}]}, 2]},
- {num: 67, a: [1, {b: [{c: "scalar"}]}, 2]},
- {num: 68, a: [1, {b: [{c: {x: 1}}]}, 2]},
- {num: 69, a: [1, {b: [{c: [1, {}, 2]}]}, 2]},
- {num: 70, a: [1, {b: [1, {}, 2]}, 2]},
- {num: 71, a: [1, {b: [1, {c: null}, 2]}, 2]},
- {num: 72, a: [1, {b: [1, {c: "scalar"}, 2]}, 2]},
- {num: 73, a: [1, {b: [1, {c: [1, {}, 2]}, 2]}, 2]},
- {num: 74, a: [1, {b: [[1, 2], [{}], 2]}, 2]},
- {num: 75, a: [1, {b: [[1, 2], [{c: "scalar"}], 2]}, 2]},
- {num: 76, a: [1, {b: [[1, 2], [{c: [1, {}, 2]}], 2]}, 2]},
- {num: 77, a: [[1, 2], [{b: "scalar"}], 2]},
- {num: 78, a: [[1, 2], [{b: {x: 1, c: "scalar"}}], 2]},
- {num: 79, a: [[1, 2], [{b: {x: 1, c: [1, {}, 2]}}], 2]},
- {num: 80, a: [[1, 2], [{b: []}], 2]},
- {num: 81, a: [[1, 2], [{b: [1, {c: "scalar"}, 2]}], 2]},
- {num: 82, a: [[1, 2], [{b: [[1, 2], [{c: "scalar"}], 2]}], 2]},
- {num: 83, a: [[1, 2], [{b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}], 2]},
- {num: 84, a: [{b: [{c: 1}, {}]}]},
- {num: 85, a: [{b: [{c: 1}, {d: 1}]}]},
- {num: 86, a: [{b: {c: 1}}, {b: {}}]},
- {num: 87, a: [{b: {c: 1}}, {b: {d: 1}}]},
- {num: 88, a: [{b: {c: 1}}, {}]},
- {num: 89, a: [{b: {c: 1}}, {b: null}]},
- {num: 90, a: [{b: {c: 1}}, {b: []}]},
- {num: 91, a: [{b: []}, {b: []}]},
- {num: 92, a: {b: [{c: [1, 2]}]}},
- {num: 93, a: {b: {c: [1, 2]}}},
- {num: 94, a: [[1, 2], [{b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}], 2]},
- {num: 95, a: [{m: 1, n: 2}, {m: 2, o: 1}]},
+ {_num: 0},
+ {_num: 1, a: null},
+ {_num: 2, a: "scalar"},
+ {_num: 3, a: {}},
+ {_num: 4, a: {x: 1, b: "scalar"}},
+ {_num: 5, a: {b: {}}},
+ {_num: 6, a: {x: 1, b: {}}},
+ {_num: 7, a: {x: 1, b: {x: 1}}},
+ {_num: 8, a: {b: {c: "scalar"}}},
+ {_num: 9, a: {b: {c: null}}},
+ {_num: 10, a: {b: {c: [[1, 2], [{}], 2]}}},
+ {_num: 11, a: {x: 1, b: {x: 1, c: ["scalar"]}}},
+ {_num: 12, a: {x: 1, b: {c: {x: 1}}}},
+ {_num: 13, a: {b: []}},
+ {_num: 14, a: {b: [null]}},
+ {_num: 15, a: {b: ["scalar"]}},
+ {_num: 16, a: {b: [[]]}},
+ {_num: 17, a: {b: [1, {}, 2]}},
+ {_num: 18, a: {b: [[1, 2], [{}], 2]}},
+ {_num: 19, a: {x: 1, b: [[1, 2], [{}], 2]}},
+ {_num: 20, a: {b: [{c: "scalar"}]}},
+ {_num: 21, a: {b: [{c: "scalar"}, {c: "scalar2"}]}},
+ {_num: 22, a: {b: [{c: [[1, 2], [{}], 2]}]}},
+ {_num: 23, a: {b: [1, {c: "scalar"}, 2]}},
+ {_num: 24, a: {b: [1, {c: [[1, 2], [{}], 2]}, 2]}},
+ {_num: 25, a: {x: 1, b: [1, {c: [[1, 2], [{}], 2]}, 2]}},
+ {_num: 26, a: {b: [[1, 2], [{c: "scalar"}], 2]}},
+ {_num: 27, a: {b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}},
+ {_num: 28, a: {x: 1, b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}},
+ {_num: 29, a: []},
+ {_num: 30, a: [null]},
+ {_num: 31, a: ["scalar"]},
+ {_num: 32, a: [[]]},
+ {_num: 33, a: [{}]},
+ {_num: 34, a: [1, {}, 2]},
+ {_num: 35, a: [[1, 2], [{}], 2]},
+ {_num: 36, a: [{b: "scalar"}]},
+ {_num: 37, a: [{b: null}]},
+ {_num: 38, a: [1, {b: "scalar"}, 2]},
+ {_num: 39, a: [1, {b: []}, 2]},
+ {_num: 40, a: [1, {b: [null]}, 2]},
+ {_num: 41, a: [1, {b: ["scalar"]}, 2]},
+ {_num: 42, a: [1, {b: [[]]}, 2]},
+ {_num: 43, a: [{b: []}]},
+ {_num: 44, a: [{b: ["scalar"]}]},
+ {_num: 45, a: [{b: [[]]}]},
+ {_num: 46, a: [{b: {}}]},
+ {_num: 47, a: [{b: {c: "scalar"}}]},
+ {_num: 48, a: [{b: {c: [[1, 2], [{}], 2]}}]},
+ {_num: 49, a: [{b: {x: 1}}]},
+ {_num: 50, a: [{b: {x: 1, c: "scalar"}}]},
+ {_num: 51, a: [{b: [{c: "scalar"}]}]},
+ {_num: 52, a: [{b: [{c: ["scalar"]}]}]},
+ {_num: 53, a: [{b: [1, {c: ["scalar"]}, 2]}]},
+ {_num: 54, a: [{b: [{}]}]},
+ {_num: 55, a: [{b: [[1, 2], [{}], 2]}]},
+ {_num: 56, a: [{b: [[1, 2], [{c: "scalar"}], 2]}]},
+ {_num: 57, a: [{b: [[1, 2], [{c: ["scalar"]}], 2]}]},
+ {_num: 58, a: [1, {b: {}}, 2]},
+ {_num: 59, a: [1, {b: {c: "scalar"}}, 2]},
+ {_num: 60, a: [1, {b: {c: {x: 1}}}, 2]},
+ {_num: 61, a: [1, {b: {c: [1, {}, 2]}}, 2]},
+ {_num: 62, a: [1, {b: {x: 1}}, 2]},
+ {_num: 63, a: [1, {b: {x: 1, c: "scalar"}}, 2]},
+ {_num: 64, a: [1, {b: {x: 1, c: [[]]}}, 2]},
+ {_num: 65, a: [1, {b: {x: 1, c: [1, {}, 2]}}, 2]},
+ {_num: 66, a: [1, {b: [{}]}, 2]},
+ {_num: 67, a: [1, {b: [{c: "scalar"}]}, 2]},
+ {_num: 68, a: [1, {b: [{c: {x: 1}}]}, 2]},
+ {_num: 69, a: [1, {b: [{c: [1, {}, 2]}]}, 2]},
+ {_num: 70, a: [1, {b: [1, {}, 2]}, 2]},
+ {_num: 71, a: [1, {b: [1, {c: null}, 2]}, 2]},
+ {_num: 72, a: [1, {b: [1, {c: "scalar"}, 2]}, 2]},
+ {_num: 73, a: [1, {b: [1, {c: [1, {}, 2]}, 2]}, 2]},
+ {_num: 74, a: [1, {b: [[1, 2], [{}], 2]}, 2]},
+ {_num: 75, a: [1, {b: [[1, 2], [{c: "scalar"}], 2]}, 2]},
+ {_num: 76, a: [1, {b: [[1, 2], [{c: [1, {}, 2]}], 2]}, 2]},
+ {_num: 77, a: [[1, 2], [{b: "scalar"}], 2]},
+ {_num: 78, a: [[1, 2], [{b: {x: 1, c: "scalar"}}], 2]},
+ {_num: 79, a: [[1, 2], [{b: {x: 1, c: [1, {}, 2]}}], 2]},
+ {_num: 80, a: [[1, 2], [{b: []}], 2]},
+ {_num: 81, a: [[1, 2], [{b: [1, {c: "scalar"}, 2]}], 2]},
+ {_num: 82, a: [[1, 2], [{b: [[1, 2], [{c: "scalar"}], 2]}], 2]},
+ {_num: 83, a: [[1, 2], [{b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}], 2]},
+ {_num: 84, a: [{b: [{c: 1}, {}]}]},
+ {_num: 85, a: [{b: [{c: 1}, {d: 1}]}]},
+ {_num: 86, a: [{b: {c: 1}}, {b: {}}]},
+ {_num: 87, a: [{b: {c: 1}}, {b: {d: 1}}]},
+ {_num: 88, a: [{b: {c: 1}}, {}]},
+ {_num: 89, a: [{b: {c: 1}}, {b: null}]},
+ {_num: 90, a: [{b: {c: 1}}, {b: []}]},
+ {_num: 91, a: [{b: []}, {b: []}]},
+ {_num: 92, a: {b: [{c: [1, 2]}]}},
+ {_num: 93, a: {b: {c: [1, 2]}}},
+ {_num: 94, a: [[1, 2], [{b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}], 2]},
+ {_num: 95, a: [{m: 1, n: 2}, {m: 2, o: 1}]},
];
coll.drop();
@@ -283,7 +281,7 @@ let bulk = coll.initializeUnorderedBulkOp();
for (let doc of docs) {
let insertObj = {};
Object.assign(insertObj, doc);
- if (doc.num % 2 == 0) {
+ if (doc._num % 2 == 0) {
insertObj.optionalField = "foo";
}
bulk.insert(insertObj);
@@ -293,7 +291,7 @@ bulk.execute();
assert.commandWorked(coll.createIndex({"$**": "columnstore"}));
(function testProjectionOfIndependentPaths() {
- const kProjection = {_id: 0, "a.b.c": 1, num: 1, optionalField: 1};
+ const kProjection = {_id: 0, _num: 1, "a.b.c": 1, optionalField: 1};
let explain = coll.find({}, kProjection).explain();
assert(planHasStage(db, explain, "COLUMN_SCAN"),
@@ -303,15 +301,16 @@ assert.commandWorked(coll.createIndex({"$**": "columnstore"}));
assert.eq(results.length, docs.length, "With no filter should have returned all docs");
for (let res of results) {
- const trueResult = coll.find({num: res.num}, kProjection).hint({$natural: 1}).toArray()[0];
- const originalDoc = coll.findOne({num: res.num});
+ const trueResult =
+ coll.find({_num: res._num}, kProjection).hint({$natural: 1}).toArray()[0];
+ const originalDoc = coll.findOne({_num: res._num});
assert.docEq(res, trueResult, "Mismatched projection of " + tojson(originalDoc));
}
})();
// Run a similar query that projects multiple fields with a shared parent object.
(function testProjectionOfSiblingPaths() {
- const kSiblingProjection = {_id: 0, "a.m": 1, "a.n": 1, num: 1};
+ const kSiblingProjection = {_id: 0, _num: 1, "a.m": 1, "a.n": 1};
let explain = coll.find({}, kSiblingProjection).explain();
assert(planHasStage(db, explain, "COLUMN_SCAN"),
@@ -322,15 +321,15 @@ assert.commandWorked(coll.createIndex({"$**": "columnstore"}));
for (let res of results) {
const trueResult =
- coll.find({num: res.num}, kSiblingProjection).hint({$natural: 1}).toArray()[0];
- const originalDoc = coll.findOne({num: res.num});
+ coll.find({_num: res._num}, kSiblingProjection).hint({$natural: 1}).toArray()[0];
+ const originalDoc = coll.findOne({_num: res._num});
assert.eq(res, trueResult, "Mismatched projection of " + tojson(originalDoc));
}
})();
// Run a query that tests the SERVER-67742 fix.
(function testPrefixPath() {
- const kPrefixProjection = {_id: 0, "a": 1, num: 1};
+ const kPrefixProjection = {_id: 0, _num: 1, "a": 1};
// Have to use the index hint because SERVER-67264 blocks selection of CSI.
let explain = coll.find({"a.m": 1}, kPrefixProjection).hint({"$**": "columnstore"}).explain();
@@ -346,8 +345,8 @@ assert.commandWorked(coll.createIndex({"$**": "columnstore"}));
for (let res of results) {
const trueResult =
- coll.find({num: res.num}, kPrefixProjection).hint({$natural: 1}).toArray()[0];
- const originalDoc = coll.findOne({num: res.num});
+ coll.find({_num: res._num}, kPrefixProjection).hint({$natural: 1}).toArray()[0];
+ const originalDoc = coll.findOne({_num: res._num});
assert.eq(res, trueResult, "Mismatched projection of " + tojson(originalDoc));
}
})();
@@ -357,7 +356,7 @@ assert.commandWorked(coll.createIndex({"$**": "columnstore"}));
(function testGroup() {
// Sanity check that we are comparing the plans we expect to be.
let pipeline = [
- {$group: {_id: "$a.b.c", docs: {$push: "$num"}}},
+ {$group: {_id: "$a.b.c", docs: {$push: "$_num"}}},
{$set: {docs: {$sortArray: {input: "$docs", sortBy: 1}}}}
];
let naturalExplain = coll.explain().aggregate(pipeline, {hint: {$natural: 1}});
@@ -485,4 +484,3 @@ assert.commandWorked(coll.createIndex({"$**": "columnstore"}));
runTest({locale: "en", strength: 3}, 1); // case sensitive
runTest({locale: "en", strength: 2}, 3); // case insensitive
})();
-})();
diff --git a/jstests/core/columnstore/columnstore_index_per_path_filters.js b/jstests/core/columnstore/columnstore_index_per_path_filters.js
index 32b457c6b6a61..c96fff5aa699c 100644
--- a/jstests/core/columnstore/columnstore_index_per_path_filters.js
+++ b/jstests/core/columnstore/columnstore_index_per_path_filters.js
@@ -16,16 +16,12 @@
* not_allowed_with_security_token,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For "resultsEq."
-load("jstests/libs/analyze_plan.js"); // For "planHasStage."
-load("jstests/libs/sbe_explain_helpers.js"); // For getSbePlanStages.
-load("jstests/libs/columnstore_util.js"); // For "setUpServerForColumnStoreIndexTest."
+import {getSbePlanStages} from "jstests/libs/sbe_explain_helpers.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
if (!setUpServerForColumnStoreIndexTest(db)) {
- return;
+ quit();
}
const coll_filters = db.columnstore_index_per_path_filters;
@@ -1060,5 +1056,4 @@ function testInExpr(test) {
assert.lt(x.numSeeks + y.numSeeks,
2 * expectedToMatchCount,
"Number of seeks in filtered columns should be small");
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/core/columnstore/columnstore_large_array_index_correctness.js b/jstests/core/columnstore/columnstore_large_array_index_correctness.js
index 3c15b62e24712..8f01cdcb4065d 100644
--- a/jstests/core/columnstore/columnstore_large_array_index_correctness.js
+++ b/jstests/core/columnstore/columnstore_large_array_index_correctness.js
@@ -12,30 +12,27 @@
* not_allowed_with_security_token,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For "planHasStage."
-load("jstests/libs/columnstore_util.js"); // For "setUpServerForColumnStoreIndexTest."
+import {planHasStage} from "jstests/libs/analyze_plan.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
if (!setUpServerForColumnStoreIndexTest(db)) {
- return;
+ quit();
}
const coll = db.columnstore_large_array_index_correctness;
coll.drop();
const uint8 = {
- num: 0,
- a: Array.from({length: 50}, (_, i) => ({b: [2 * i, 2 * i + 1]}))
+ _num: 0,
+ o: Array.from({length: 50}, (_, i) => ({b: [2 * i, 2 * i + 1]})),
};
const uint16 = {
- num: 1,
- a: Array.from({length: 150}, (_, i) => ({b: [2 * i, 2 * i + 1]}))
+ _num: 1,
+ o: Array.from({length: 150}, (_, i) => ({b: [2 * i, 2 * i + 1]})),
};
const uint32 = {
- num: 2,
- a: Array.from({length: 15000}, (_, i) => ({b: [2 * i, 2 * i + 1]}))
+ _num: 2,
+ o: Array.from({length: 15000}, (_, i) => ({b: [2 * i, 2 * i + 1]})),
};
const docs = [uint8, uint16, uint32];
@@ -47,27 +44,26 @@ for (let doc of docs) {
assert.commandWorked(coll.createIndex({"$**": "columnstore"}));
const kProjection = {
_id: 0,
- "a.b": 1,
- num: 1,
+ _num: 1,
+ "o.b": 1,
};
// Ensure this test is exercising the column scan.
-let explain = coll.find({}, kProjection).sort({num: 1}).explain();
+let explain = coll.find({}, kProjection).sort({_num: 1}).explain();
assert(planHasStage(db, explain, "COLUMN_SCAN"), explain);
// Run a query getting all of the results using the column index.
-let results = coll.find({}, kProjection).sort({num: 1}).toArray();
+let results = coll.find({}, kProjection).sort({_num: 1}).toArray();
assert.gt(results.length, 0);
// Run a query getting all results without column index
-let trueResults = coll.find({}, kProjection).hint({$natural: 1}).sort({num: 1}).toArray();
+let trueResults = coll.find({}, kProjection).hint({$natural: 1}).sort({_num: 1}).toArray();
assert.eq(results.length, trueResults.length);
for (let i = 0; i < results.length; i++) {
- const originalDoc = coll.findOne({num: results[i].num});
+ const originalDoc = coll.findOne({_num: results[i]._num});
assert.eq(results[i], trueResults[i], () =>
- `column store index output number: ${results[i].num}, collection scan output number: ${trueResults[i].num},
- original document number was: ${originalDoc.num}`);
+ `column store index output number: ${results[i]._num}, collection scan output number: ${trueResults[i]._num},
+ original document number was: ${originalDoc._num}`);
}
-})();
diff --git a/jstests/core/columnstore/columnstore_validindex.js b/jstests/core/columnstore/columnstore_validindex.js
index 421ea4a6edc06..8015a891d342e 100644
--- a/jstests/core/columnstore/columnstore_validindex.js
+++ b/jstests/core/columnstore/columnstore_validindex.js
@@ -13,15 +13,12 @@
* not_allowed_with_security_token,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/index_catalog_helpers.js"); // For "IndexCatalogHelpers."
load("jstests/libs/collection_drop_recreate.js"); // For "assertDropCollection."
-load("jstests/libs/columnstore_util.js"); // For "setUpServerForColumnStoreIndexTest."
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
if (!setUpServerForColumnStoreIndexTest(db)) {
- return;
+ quit();
}
const kCollectionName = "columnstore_validindex";
@@ -35,12 +32,6 @@ const kKeyPattern = {
// Can create a valid columnstore index.
IndexCatalogHelpers.createIndexAndVerifyWithDrop(coll, kKeyPattern, {name: kIndexName});
-// Can create a columnstore index with foreground & background construction.
-IndexCatalogHelpers.createIndexAndVerifyWithDrop(
- coll, kKeyPattern, {background: false, name: kIndexName});
-IndexCatalogHelpers.createIndexAndVerifyWithDrop(
- coll, kKeyPattern, {background: true, name: kIndexName});
-
// Test that you cannot create a columnstore index with a collation - either with the argument or
// because the collection has a default collation specified.
assert.commandFailedWithCode(
@@ -197,5 +188,4 @@ assert.commandFailedWithCode(
ErrorCodes.InvalidIndexSpecificationOption);
assert.commandFailedWithCode(
db.runCommand({create: clusteredCollName, clusteredIndex: {key: kKeyPattern, unique: false}}),
- 5979700);
-})();
+ 5979700);
\ No newline at end of file
diff --git a/jstests/core/command_let_variables.js b/jstests/core/command_let_variables.js
index 1e4286dbc1908..a408b3a247c52 100644
--- a/jstests/core/command_let_variables.js
+++ b/jstests/core/command_let_variables.js
@@ -3,16 +3,16 @@
// @tags: [
// ]
//
-(function() {
-"use strict";
+import {getPlanStage} from "jstests/libs/analyze_plan.js";
load("jstests/libs/fixture_helpers.js"); // For 'isMongos' and 'isSharded'.
const testDB = db.getSiblingDB("command_let_variables");
const coll = testDB.command_let_variables;
-const targetColl = testDB.command_let_variables_target;
+coll.drop();
-assert.commandWorked(testDB.dropDatabase());
+const isMongos = FixtureHelpers.isMongos(testDB);
+const isCollSharded = FixtureHelpers.isSharded(coll);
const testDocs = [
{
@@ -82,7 +82,20 @@ expectedResults = [
assert.eq(coll.aggregate(pipeline, {let : {target_trend: "weak decline"}}).toArray(),
expectedResults);
-if (!FixtureHelpers.isMongos(testDB)) {
+// Test that running explain on the agg command works as expected.
+let explain = assert.commandWorked(testDB.runCommand({
+ explain:
+ {aggregate: coll.getName(), pipeline, let : {target_trend: "weak decline"}, cursor: {}},
+ verbosity: "executionStats"
+}));
+if (!isMongos) {
+ assert(explain.hasOwnProperty("stages"), explain);
+ assert.neq(explain.stages.length, 0, explain);
+ let lastStage = explain.stages[explain.stages.length - 1];
+ assert.eq(lastStage.nReturned, 2, explain);
+}
+
+if (!isMongos) {
// Test that if runtimeConstants and let are both specified, both will coexist.
// Runtime constants are not allowed on mongos passthroughs.
let constants = {
@@ -262,9 +275,23 @@ expectedResults = {
assert.eq(result.length, 1);
assert.eq(expectedResults, result[0]);
-// Delete tests with let params will delete a record, assert that a point-wise find yields an empty
-// result, and then restore the collection state for further tests down the line. We can't exercise
-// a multi-delete here (limit: 0) because of failures in sharded txn passthrough tests.
+// Test that let parameters work as expected when the find is run as an explain.
+explain = assert.commandWorked(testDB.runCommand({
+ explain: {
+ find: coll.getName(),
+ let : {target_species: "Song Thrush (Turdus philomelos)"},
+ filter: {$expr: {$eq: ["$Species", "$$target_species"]}},
+ projection: {_id: 0}
+ },
+ verbosity: "executionStats"
+}));
+if (!isMongos) {
+ assert.eq(explain.executionStats.nReturned, 1, explain);
+}
+
+// Delete tests with let params will delete a record, assert that a point-wise find yields an
+// empty result, and then restore the collection state for further tests down the line. We can't
+// exercise a multi-delete here (limit: 0) because of failures in sharded txn passthrough tests.
assert.commandWorked(testDB.runCommand({
delete: coll.getName(),
let : {target_species: "Song Thrush (Turdus philomelos)"},
@@ -277,8 +304,24 @@ result = assert
.cursor.firstBatch;
assert.eq(result.length, 0);
-// Test that the .remove() shell helper supports let parameters.
assert.commandWorked(coll.insert({_id: 4, Species: "bird_to_remove"}));
+
+// Test that explain of a delete command works as expected with 'let' parameters.
+explain = assert.commandWorked(testDB.runCommand({
+ explain: {
+ delete: coll.getName(),
+ let : {target_species: "bird_to_remove"},
+ deletes:
+ [{q: {$and: [{_id: 4}, {$expr: {$eq: ["$Species", "$$target_species"]}}]}, limit: 1}]
+ },
+ verbosity: "executionStats"
+}));
+if (!isMongos) {
+ let deleteStage = getPlanStage(explain.executionStats.executionStages, "DELETE");
+ assert.eq(deleteStage.nWouldDelete, 1, explain);
+}
+
+// Test that the .remove() shell helper supports let parameters.
result = assert.commandWorked(
coll.remove({$and: [{_id: 4}, {$expr: {$eq: ["$Species", "$$target_species"]}}]},
{justOne: true, let : {target_species: "bird_to_remove"}}));
@@ -333,30 +376,49 @@ assert.commandWorked(testDB.runCommand({
cursor: {}
}));
-// Test that findAndModify works correctly with let parameter arguments.
assert.commandWorked(coll.insert({_id: 5, Species: "spy_bird"}));
-result = testDB.runCommand({
+
+// Test that explain of findAndModify works correctly with let parameters.
+explain = assert.commandWorked(testDB.runCommand({
+ explain: {
+ findAndModify: coll.getName(),
+ let : {target_species: "spy_bird"},
+ // Querying on _id field for sharded collection passthroughs.
+ query: {$and: [{_id: 5}, {$expr: {$eq: ["$Species", "$$target_species"]}}]},
+ update: {Species: "questionable_bird"},
+ new: true
+ },
+ verbosity: "executionStats"
+}));
+if (!isMongos) {
+ let updateStage = getPlanStage(explain.executionStats.executionStages, "UPDATE");
+ assert.eq(updateStage.nMatched, 1, explain);
+ assert.eq(updateStage.nWouldModify, 1, explain);
+}
+
+// Test that findAndModify works correctly with let parameter arguments.
+result = assert.commandWorked(testDB.runCommand({
findAndModify: coll.getName(),
let : {target_species: "spy_bird"},
// Querying on _id field for sharded collection passthroughs.
query: {$and: [{_id: 5}, {$expr: {$eq: ["$Species", "$$target_species"]}}]},
update: {Species: "questionable_bird"},
new: true
-});
+}));
expectedResults = {
_id: 5,
Species: "questionable_bird"
};
assert.eq(expectedResults, result.value, result);
-result = testDB.runCommand({
+result = assert.commandWorked(testDB.runCommand({
findAndModify: coll.getName(),
let : {species_name: "not_a_bird", realSpecies: "dino"},
// Querying on _id field for sharded collection passthroughs.
query: {$and: [{_id: 5}, {$expr: {$eq: ["$Species", "questionable_bird"]}}]},
update: [{$project: {Species: "$$species_name"}}, {$addFields: {suspect: "$$realSpecies"}}],
new: true
-});
+}));
expectedResults = {
_id: 5,
Species: "not_a_bird",
@@ -364,12 +426,31 @@ expectedResults = {
};
assert.eq(expectedResults, result.value, result);
+// Test that explain of update works correctly with let parameters.
+explain = assert.commandWorked(testDB.runCommand({
+ explain: {
+ update: coll.getName(),
+ updates: [{
+ q: {_id: 3, $expr: {$eq: ["$Species", "$$target_species"]}},
+ u: [{$set: {Species: "$$new_name"}}],
+ }],
+ let : {target_species: "Chaffinch (Fringilla coelebs)", new_name: "Chaffinch"}
+ },
+ verbosity: "executionStats"
+}));
+if (!isMongos) {
+ let updateStage = getPlanStage(explain.executionStats.executionStages, "UPDATE");
+ assert.eq(updateStage.nMatched, 1, explain);
+ assert.eq(updateStage.nWouldModify, 1, explain);
+}
+
// Test that update respects different parameters in both the query and update part.
result = assert.commandWorked(testDB.runCommand({
update: coll.getName(),
- updates: [
- {q: {$expr: {$eq: ["$Species", "$$target_species"]}}, u: [{$set: {Species: "$$new_name"}}]}
- ],
+ updates: [{
+ q: {_id: 3, $expr: {$eq: ["$Species", "$$target_species"]}},
+ u: [{$set: {Species: "$$new_name"}}],
+ }],
let : {target_species: "Chaffinch (Fringilla coelebs)", new_name: "Chaffinch"}
}));
assert.eq(result.n, 1);
@@ -387,8 +468,8 @@ assert.eq(result.cursor.firstBatch.length, 1);
result = assert.commandWorked(testDB.runCommand({
update: coll.getName(),
updates: [{
- q: {$expr: {$eq: ["$Species", "$$target_species"]}},
- u: [{$set: {Timestamp: "$$NOW"}}, {$set: {Species: "$$new_name"}}]
+ q: {_id: 3, $expr: {$eq: ["$Species", "$$target_species"]}},
+ u: [{$set: {Timestamp: "$$NOW"}}, {$set: {Species: "$$new_name"}}],
}],
let : {target_species: "Chaffinch", new_name: "Pied Piper"}
}));
@@ -403,6 +484,12 @@ result = assert.commandWorked(
testDB.runCommand({find: coll.getName(), filter: {$expr: {$eq: ["$Species", "Pied Piper"]}}}));
assert.eq(result.cursor.firstBatch.length, 1, result);
+// This forces a multi-statement transaction to commit if this test is running in one of the
+// multi-statement transaction passthrough suites. We need to do this to ensure the updates
+// above commit before running an update that will fail, as the failed update aborts the entire
+// transaction and rolls back the updates above.
+assert.commandWorked(testDB.runCommand({ping: 1}));
+
// Test that undefined let params in the update's query part fail gracefully.
assert.commandFailedWithCode(testDB.runCommand({
update: coll.getName(),
@@ -418,8 +505,8 @@ assert.commandFailedWithCode(testDB.runCommand({
assert.commandFailedWithCode(testDB.runCommand({
update: coll.getName(),
updates: [{
- q: {$expr: {$eq: ["$Species", "Chaffinch (Fringilla coelebs)"]}},
- u: [{$set: {Species: "$$new_name"}}]
+ q: {_id: 3, $expr: {$eq: ["$Species", "Chaffinch (Fringilla coelebs)"]}},
+ u: [{$set: {Species: "$$new_name"}}],
}],
let : {cat: "not_a_bird"}
}),
@@ -427,7 +514,7 @@ assert.commandFailedWithCode(testDB.runCommand({
// Test that the .update() shell helper supports let parameters.
result = assert.commandWorked(
- coll.update({$expr: {$eq: ["$Species", "$$target_species"]}},
+ coll.update({_id: 3, $expr: {$eq: ["$Species", "$$target_species"]}},
[{$set: {Species: "$$new_name"}}],
{let : {target_species: "Pied Piper", new_name: "Chaffinch"}}));
assert.eq(result.nMatched, 1);
@@ -519,7 +606,10 @@ assert.between(0, result, 1);
}
// Test that the expressions are evaluated once up front.
-{
+//
+// TODO SERVER-75927: This does not work as expected when the collection is sharded. Once the bug
+// is fixed, we should re-enable this test case when the collection is sharded.
+if (!isCollSharded) {
const values = assert
.commandWorked(testDB.runCommand({
find: coll.getName(),
@@ -539,5 +629,4 @@ assert.between(0, result, 1);
result = coll.aggregate([{$match: {$expr: {$eq: ["$_id", 2]}}}, {$project: {a: "$$b"}}],
{let : {b: {$literal: "$notAFieldPath"}}})
.toArray();
-assert.eq(result, [{_id: 2, a: "$notAFieldPath"}]);
-}());
+assert.eq(result, [{_id: 2, a: "$notAFieldPath"}]);
\ No newline at end of file
diff --git a/jstests/core/command_let_variables_merge_only.js b/jstests/core/command_let_variables_merge_only.js
index 884533314c996..4bb6bfe7fac5f 100644
--- a/jstests/core/command_let_variables_merge_only.js
+++ b/jstests/core/command_let_variables_merge_only.js
@@ -6,6 +6,7 @@
* does_not_support_stepdowns,
* does_not_support_causal_consistency,
* uses_$out,
+ * references_foreign_collection,
* ]
*/
(function() {
diff --git a/jstests/core/commands_namespace_parsing.js b/jstests/core/commands_namespace_parsing.js
index d352a0e3576c9..b48801beef590 100644
--- a/jstests/core/commands_namespace_parsing.js
+++ b/jstests/core/commands_namespace_parsing.js
@@ -19,6 +19,7 @@
// does_not_support_causal_consistency,
// uses_compact,
// ]
+load("jstests/libs/fixture_helpers.js");
// This file tests that commands namespace parsing rejects embedded null bytes.
// Note that for each command, a properly formatted command object must be passed to the helper
@@ -63,9 +64,7 @@ function assertFailsWithInvalidNamespacesForField(
}
}
-const hello = db.runCommand("hello");
-assert.commandWorked(hello);
-const isMongos = (hello.msg === "isdbgrid");
+const runningOnMongos = FixtureHelpers.isMongos(db);
db.commands_namespace_parsing.drop();
assert.commandWorked(db.commands_namespace_parsing.insert({a: 1}));
@@ -182,7 +181,7 @@ assertFailsWithInvalidNamespacesForField("collection",
isNotFullyQualified,
isNotAdminCommand);
-if (!isMongos) {
+if (!runningOnMongos) {
// Test godinsert fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
"godinsert", {godinsert: "", obj: {_id: 1}}, isNotFullyQualified, isNotAdminCommand);
@@ -206,13 +205,13 @@ assertFailsWithInvalidNamespacesForField(
assertFailsWithInvalidNamespacesForField(
"planCacheClear", {planCacheClear: ""}, isNotFullyQualified, isNotAdminCommand);
-if (!isMongos) {
+if (!runningOnMongos) {
// Test cleanupOrphaned fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
"cleanupOrphaned", {cleanupOrphaned: ""}, isFullyQualified, isAdminCommand);
}
-if (isMongos) {
+if (runningOnMongos) {
// Test enableSharding fails with an invalid database name.
assertFailsWithInvalidNamespacesForField(
"enableSharding", {enableSharding: ""}, isNotFullyQualified, isAdminCommand);
@@ -266,7 +265,7 @@ assertFailsWithInvalidNamespacesForField(
assertFailsWithInvalidNamespacesForField(
"create", {create: ""}, isNotFullyQualified, isNotAdminCommand);
-if (!isMongos) {
+if (!runningOnMongos) {
// Test cloneCollectionAsCapped fails with an invalid source collection name.
assertFailsWithInvalidNamespacesForField(
"cloneCollectionAsCapped",
@@ -308,7 +307,7 @@ assertFailsWithInvalidNamespacesForField(
assertFailsWithInvalidNamespacesForField(
"dropIndexes", {dropIndexes: "", index: "*"}, isNotFullyQualified, isNotAdminCommand);
-if (!isMongos) {
+if (!runningOnMongos) {
// Test compact fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
"compact", {compact: ""}, isNotFullyQualified, isNotAdminCommand);
@@ -322,7 +321,7 @@ assertFailsWithInvalidNamespacesForField(
isNotAdminCommand);
// Test reIndex fails with an invalid collection name.
-if (!isMongos) {
+if (!runningOnMongos) {
assertFailsWithInvalidNamespacesForField(
"reIndex", {reIndex: ""}, isNotFullyQualified, isNotAdminCommand);
}
diff --git a/jstests/core/commands_with_uuid.js b/jstests/core/commands_with_uuid.js
index b0260e0bc3180..3744e038992f5 100644
--- a/jstests/core/commands_with_uuid.js
+++ b/jstests/core/commands_with_uuid.js
@@ -10,6 +10,7 @@
* assumes_no_implicit_index_creation
* ]
*/
+load("jstests/libs/fixture_helpers.js");
(function() {
'use strict';
@@ -29,10 +30,7 @@ if (uuid == null) {
}
// No support for UUIDs on mongos.
-const hello = db.runCommand("hello");
-assert.commandWorked(hello);
-const isMongos = (hello.msg === "isdbgrid");
-if (isMongos) {
+if (FixtureHelpers.isMongos(db)) {
return;
}
diff --git a/jstests/core/compound_wildcard_index_validation.js b/jstests/core/compound_wildcard_index_validation.js
index e10b0d8883690..c64074342aad7 100644
--- a/jstests/core/compound_wildcard_index_validation.js
+++ b/jstests/core/compound_wildcard_index_validation.js
@@ -7,6 +7,7 @@
* does_not_support_stepdowns,
* featureFlagCompoundWildcardIndexes,
* requires_fcv_70,
+ * uses_full_validation,
* ]
*/
diff --git a/jstests/core/connection_string_validation.js b/jstests/core/connection_string_validation.js
index 6236e445a775d..43a7373b97301 100644
--- a/jstests/core/connection_string_validation.js
+++ b/jstests/core/connection_string_validation.js
@@ -5,7 +5,7 @@
// ]
// Related to SERVER-8030.
-port = "27017";
+let port = "27017";
if (db.getMongo().host.indexOf(":") >= 0) {
var idx = db.getMongo().host.indexOf(":");
diff --git a/jstests/core/cover_null_queries.js b/jstests/core/cover_null_queries.js
index 30b8025ecc936..7ac2d5d6d746f 100644
--- a/jstests/core/cover_null_queries.js
+++ b/jstests/core/cover_null_queries.js
@@ -10,11 +10,8 @@
* cqf_incompatible,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq().
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages() and getPlanStages().
+import {getPlanStages, getAggPlanStages} from "jstests/libs/analyze_plan.js";
load("jstests/libs/clustered_collections/clustered_collection_util.js");
const coll = db.cover_null_queries;
@@ -1020,5 +1017,4 @@ validateGroupCountAggCmdOutputAndPlan({
filter: {"a.b": {$in: [null, []]}},
expectedCount: 10,
expectedStages: {"IXSCAN": 1, "FETCH": 1},
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/covered_query_with_sort.js b/jstests/core/covered_query_with_sort.js
index 2f299c55d13a5..1aa32308331cc 100644
--- a/jstests/core/covered_query_with_sort.js
+++ b/jstests/core/covered_query_with_sort.js
@@ -7,10 +7,7 @@
// assumes_unsharded_collection,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For 'isIndexOnly', 'getPlanStage' and 'getWinningPlan'.
+import {getPlanStage, getWinningPlan, isIndexOnly} from "jstests/libs/analyze_plan.js";
const coll = db.covered_query_with_sort;
coll.drop();
@@ -39,5 +36,4 @@ const ixScanStage = getPlanStage(projectionCoveredStage, "IXSCAN");
assert.neq(ixScanStage, null, plan);
const results = buildQuery().toArray();
-assert.eq(results, [{y: 0, x: 1}, {y: 0, x: 0}], results);
-}());
\ No newline at end of file
+assert.eq(results, [{y: 0, x: 1}, {y: 0, x: 0}], results);
\ No newline at end of file
diff --git a/jstests/core/dbcase.js b/jstests/core/dbcase.js
index 23b5bccd30503..78d6e05f8abd4 100644
--- a/jstests/core/dbcase.js
+++ b/jstests/core/dbcase.js
@@ -5,15 +5,15 @@
// multiple_tenants_incompatible,
// ]
-a = db.getSiblingDB("dbcasetest_dbnamea");
-b = db.getSiblingDB("dbcasetest_dbnameA");
+let a = db.getSiblingDB("dbcasetest_dbnamea");
+let b = db.getSiblingDB("dbcasetest_dbnameA");
a.dropDatabase();
b.dropDatabase();
assert.commandWorked(a.foo.save({x: 1}));
-res = b.foo.save({x: 1});
+let res = b.foo.save({x: 1});
assert.writeError(res);
assert.neq(-1, db.getMongo().getDBNames().indexOf(a.getName()));
@@ -23,8 +23,8 @@ printjson(db.getMongo().getDBs().databases);
a.dropDatabase();
b.dropDatabase();
-ai = db.getMongo().getDBNames().indexOf(a.getName());
-bi = db.getMongo().getDBNames().indexOf(b.getName());
+let ai = db.getMongo().getDBNames().indexOf(a.getName());
+let bi = db.getMongo().getDBNames().indexOf(b.getName());
// One of these dbs may exist if there is a secondary active, but they must
// not both exist.
assert(ai == -1 || bi == -1);
diff --git a/jstests/core/dbhash.js b/jstests/core/dbhash.js
index c601c721f5be9..33ee736e4b114 100644
--- a/jstests/core/dbhash.js
+++ b/jstests/core/dbhash.js
@@ -3,15 +3,15 @@
// not_allowed_with_security_token,
// ]
-a = db.dbhasha;
-b = db.dbhashb;
+let a = db.dbhasha;
+let b = db.dbhashb;
a.drop();
b.drop();
// debug SERVER-761
db.getCollectionNames().forEach(function(x) {
- v = db[x].validate();
+ let v = db[x].validate();
if (!v.valid) {
print(x);
printjson(v);
@@ -45,8 +45,8 @@ assert.neq(gh(a), gh(b), "A2");
b.insert({_id: 5});
assert.eq(gh(a), gh(b), "A3");
-dba = db.getSiblingDB("dbhasha");
-dbb = db.getSiblingDB("dbhashb");
+let dba = db.getSiblingDB("dbhasha");
+let dbb = db.getSiblingDB("dbhashb");
dba.dropDatabase();
dbb.dropDatabase();
diff --git a/jstests/core/dbhash2.js b/jstests/core/dbhash2.js
index 93ec2abf99c3b..cf86a1a6d031d 100644
--- a/jstests/core/dbhash2.js
+++ b/jstests/core/dbhash2.js
@@ -4,20 +4,20 @@
// assumes_superuser_permissions,
// ]
-mydb = db.getSiblingDB("config");
+let mydb = db.getSiblingDB("config");
-t = mydb.foo;
+let t = mydb.foo;
t.drop();
assert.commandWorked(t.insert({x: 1}));
-res1 = mydb.runCommand("dbhash");
-res2 = mydb.runCommand("dbhash");
+let res1 = mydb.runCommand("dbhash");
+let res2 = mydb.runCommand("dbhash");
assert.commandWorked(res1);
assert.commandWorked(res2);
assert.eq(res1.collections.foo, res2.collections.foo);
assert.commandWorked(t.insert({x: 2}));
-res3 = mydb.runCommand("dbhash");
+let res3 = mydb.runCommand("dbhash");
assert.commandWorked(res3);
assert.neq(res1.collections.foo, res3.collections.foo);
diff --git a/jstests/core/ddl/background_unique_indexes.js b/jstests/core/ddl/background_unique_indexes.js
index fb1d0d9aee7ef..819dd1be89183 100644
--- a/jstests/core/ddl/background_unique_indexes.js
+++ b/jstests/core/ddl/background_unique_indexes.js
@@ -52,12 +52,10 @@ for (let iteration = 0; iteration < nIterations; iteration++) {
assert.commandWorked(testDB.runCommand({update: collName, updates: updates}));
// Create a background unique index on the collection.
- assert.commandWorked(testDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {x: 1}, name: "x_1", background: true, unique: true}]
- }));
+ assert.commandWorked(testDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1", unique: true}]}));
- // Generate updates that increment x on each document backwards by _id to avoid conficts
+ // Generate updates that increment x on each document backwards by _id to avoid conflicts
// when applied in-order.
updates = [];
for (let i = 0; i < nOps; i++) {
diff --git a/jstests/core/ddl/bad_index_plugin.js b/jstests/core/ddl/bad_index_plugin.js
index c9cd549cc03e0..cb52ef0c1abcf 100644
--- a/jstests/core/ddl/bad_index_plugin.js
+++ b/jstests/core/ddl/bad_index_plugin.js
@@ -1,5 +1,5 @@
// SERVER-5826 ensure you can't build an index with a non-existent plugin
-t = db.bad_index_plugin;
+let t = db.bad_index_plugin;
assert.commandWorked(t.createIndex({good: 1}));
assert.eq(t.getIndexes().length, 2); // good + _id
diff --git a/jstests/core/ddl/capped_convertToCapped1.js b/jstests/core/ddl/capped_convertToCapped1.js
index 137705c8661c2..4f51edc6c247e 100644
--- a/jstests/core/ddl/capped_convertToCapped1.js
+++ b/jstests/core/ddl/capped_convertToCapped1.js
@@ -17,21 +17,21 @@
* ]
*/
-source = db.capped_convertToCapped1;
-dest = db.capped_convertToCapped1_clone;
+let source = db.capped_convertToCapped1;
+let dest = db.capped_convertToCapped1_clone;
source.drop();
dest.drop();
-N = 1000;
+let N = 1000;
-for (i = 0; i < N; ++i) {
+for (let i = 0; i < N; ++i) {
source.save({i: i});
}
assert.eq(N, source.count());
// should all fit
-res = db.runCommand(
+let res = db.runCommand(
{cloneCollectionAsCapped: source.getName(), toCollection: dest.getName(), size: 100000});
assert.commandWorked(res);
assert.eq(source.count(), dest.count());
diff --git a/jstests/core/ddl/collection_uuid_index_commands.js b/jstests/core/ddl/collection_uuid_index_commands.js
index 85c9f0a91247f..fae47998e33a2 100644
--- a/jstests/core/ddl/collection_uuid_index_commands.js
+++ b/jstests/core/ddl/collection_uuid_index_commands.js
@@ -31,7 +31,7 @@ const validateErrorResponse = function(
};
const testCommand = function(cmd, cmdObj) {
- const testDB = db.getSiblingDB(jsTestName());
+ const testDB = db.getSiblingDB("coll_uuid_index_cmds");
assert.commandWorked(testDB.dropDatabase());
const coll = testDB['coll'];
assert.commandWorked(coll.insert({x: 1, y: 2}));
@@ -104,4 +104,4 @@ const testCommand = function(cmd, cmdObj) {
testCommand("createIndexes", {createIndexes: "", indexes: [{name: "x_1", key: {x: 1}}]});
testCommand("dropIndexes", {dropIndexes: "", index: {y: 1}});
-})();
\ No newline at end of file
+})();
diff --git a/jstests/core/ddl/collection_uuid_rename_collection.js b/jstests/core/ddl/collection_uuid_rename_collection.js
index bc294fd7aabc6..81d695cc03929 100644
--- a/jstests/core/ddl/collection_uuid_rename_collection.js
+++ b/jstests/core/ddl/collection_uuid_rename_collection.js
@@ -11,7 +11,7 @@
(function() {
'use strict';
-const testDB = db.getSiblingDB(jsTestName());
+const testDB = db.getSiblingDB("coll_uuid_rename_coll");
assert.commandWorked(testDB.dropDatabase());
const coll = testDB.coll;
diff --git a/jstests/core/ddl/collmod_convert_index_uniqueness.js b/jstests/core/ddl/collmod_convert_index_uniqueness.js
index ab2d82377889d..738b20a7d9e27 100644
--- a/jstests/core/ddl/collmod_convert_index_uniqueness.js
+++ b/jstests/core/ddl/collmod_convert_index_uniqueness.js
@@ -14,14 +14,11 @@
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
if (!FeatureFlagUtil.isEnabled(db, "CollModIndexUnique")) {
jsTestLog('Skipping test because the collMod unique index feature flag is disabled.');
- return;
+ quit();
}
const collName = 'collmod_convert_to_unique';
@@ -183,5 +180,4 @@ if (db.getMongo().isMongos()) {
}
// Tests the index now accepts duplicate keys.
-assert.commandWorked(coll.insert({_id: 100, a: 100}));
-})();
+assert.commandWorked(coll.insert({_id: 100, a: 100}));
\ No newline at end of file
diff --git a/jstests/core/ddl/collmod_convert_to_unique_apply_ops.js b/jstests/core/ddl/collmod_convert_to_unique_apply_ops.js
index 658382999941d..ce7548a7fa75f 100644
--- a/jstests/core/ddl/collmod_convert_to_unique_apply_ops.js
+++ b/jstests/core/ddl/collmod_convert_to_unique_apply_ops.js
@@ -20,14 +20,11 @@
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
if (!FeatureFlagUtil.isEnabled(db, "CollModIndexUnique")) {
jsTestLog('Skipping test because the collMod unique index feature flag is disabled.');
- return;
+ quit();
}
const collName = 'collmod_convert_to_unique_apply_ops';
@@ -97,5 +94,4 @@ assert.sameMembers([true], result.results, tojson(result));
assert.eq(countUnique({a: 1}), 1, 'index should be unique now: ' + tojson(coll.getIndexes()));
// Test uniqueness constraint.
-assert.commandFailedWithCode(coll.insert({_id: 100, a: 100}), ErrorCodes.DuplicateKey);
-})();
+assert.commandFailedWithCode(coll.insert({_id: 100, a: 100}), ErrorCodes.DuplicateKey);
\ No newline at end of file
diff --git a/jstests/core/ddl/collmod_convert_to_unique_violations.js b/jstests/core/ddl/collmod_convert_to_unique_violations.js
index 53b8abeec6e65..02d055dfa3bf2 100644
--- a/jstests/core/ddl/collmod_convert_to_unique_violations.js
+++ b/jstests/core/ddl/collmod_convert_to_unique_violations.js
@@ -15,15 +15,13 @@
* ]
*/
-(function() {
-'use strict';
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
-load("jstests/libs/feature_flag_util.js");
load("jstests/libs/fixture_helpers.js"); // For 'isMongos'
if (!FeatureFlagUtil.isEnabled(db, "CollModIndexUnique")) {
jsTestLog('Skipping test because the collMod unique index feature flag is disabled.');
- return;
+ quit();
}
function sortViolationsArray(arr) {
@@ -97,5 +95,4 @@ assert.commandWorked(coll.insert({_id: 9, a: 101, b: 4}));
assertFailedWithViolations({a: 1, b: 1}, [{ids: [4, 9]}, {ids: [6, 7, 8]}]);
assert.commandWorked(coll.insert({_id: "10", a: 101, b: 4}));
-assertFailedWithViolations({a: 1, b: 1}, [{ids: [4, 9, "10"]}, {ids: [6, 7, 8]}]);
-})();
+assertFailedWithViolations({a: 1, b: 1}, [{ids: [4, 9, "10"]}, {ids: [6, 7, 8]}]);
\ No newline at end of file
diff --git a/jstests/core/ddl/create_collection.js b/jstests/core/ddl/create_collection.js
index a38d9efb97004..e0a09b70b8aab 100644
--- a/jstests/core/ddl/create_collection.js
+++ b/jstests/core/ddl/create_collection.js
@@ -12,13 +12,8 @@
load("jstests/libs/index_catalog_helpers.js");
load("jstests/libs/clustered_collections/clustered_collection_util.js");
-// TODO SERVER-73934: Change assertions on 'drop' command results throughout this file to
-// always expect the command worked. Currently, they can return NamespaceNotFound on
-// server versions < 7.0.
-
// "create" command rejects invalid options.
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandFailedWithCode(db.createCollection("create_collection", {unknown: 1}), 40415);
// Cannot create a collection with null characters.
@@ -29,8 +24,7 @@ assert.commandFailedWithCode(db.createCollection("ab\0"), ErrorCodes.InvalidName
// The collection name length limit was upped in 4.4, try creating a collection with a longer
// name than previously allowed.
const longCollName = 'a'.repeat(200);
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: longCollName}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: longCollName}));
assert.commandWorked(db.createCollection(longCollName));
//
@@ -38,8 +32,7 @@ assert.commandWorked(db.createCollection(longCollName));
//
// "idIndex" field not allowed with "viewOn".
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandWorked(db.createCollection("create_collection"));
assert.commandFailedWithCode(db.runCommand({
create: "create_view",
@@ -49,42 +42,36 @@ assert.commandFailedWithCode(db.runCommand({
ErrorCodes.InvalidOptions);
// "idIndex" field not allowed with "autoIndexId".
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandFailedWithCode(
db.createCollection("create_collection",
{autoIndexId: false, idIndex: {key: {_id: 1}, name: "_id_"}}),
ErrorCodes.InvalidOptions);
// "idIndex" field must be an object.
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandFailedWithCode(db.createCollection("create_collection", {idIndex: 1}),
ErrorCodes.TypeMismatch);
// "idIndex" field cannot be empty.
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandFailedWithCode(db.createCollection("create_collection", {idIndex: {}}),
ErrorCodes.FailedToParse);
// "idIndex" field must be a specification for an _id index.
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandFailedWithCode(
db.createCollection("create_collection", {idIndex: {key: {a: 1}, name: "a_1"}}),
ErrorCodes.BadValue);
// "idIndex" field must have "key" equal to {_id: 1}.
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandFailedWithCode(
db.createCollection("create_collection", {idIndex: {key: {a: 1}, name: "_id_"}}),
ErrorCodes.BadValue);
// The name of an _id index gets corrected to "_id_".
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandWorked(
db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "a_1"}}));
var indexSpec = IndexCatalogHelpers.findByKeyPattern(db.create_collection.getIndexes(), {_id: 1});
@@ -92,16 +79,14 @@ assert.neq(indexSpec, null);
assert.eq(indexSpec.name, "_id_", tojson(indexSpec));
// "idIndex" field must only contain fields that are allowed for an _id index.
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandFailedWithCode(
db.createCollection("create_collection",
{idIndex: {key: {_id: 1}, name: "_id_", sparse: true}}),
ErrorCodes.InvalidIndexSpecificationOption);
// "create" creates v=2 _id index when "v" is not specified in "idIndex".
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandWorked(
db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_"}}));
indexSpec = IndexCatalogHelpers.findByName(db.create_collection.getIndexes(), "_id_");
@@ -109,8 +94,7 @@ assert.neq(indexSpec, null);
assert.eq(indexSpec.v, 2, tojson(indexSpec));
// "create" creates v=1 _id index when "idIndex" has "v" equal to 1.
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandWorked(
db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
indexSpec = IndexCatalogHelpers.findByName(db.create_collection.getIndexes(), "_id_");
@@ -118,8 +102,7 @@ assert.neq(indexSpec, null);
assert.eq(indexSpec.v, 1, tojson(indexSpec));
// "create" creates v=2 _id index when "idIndex" has "v" equal to 2.
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandWorked(
db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", v: 2}}));
indexSpec = IndexCatalogHelpers.findByName(db.create_collection.getIndexes(), "_id_");
@@ -127,31 +110,27 @@ assert.neq(indexSpec, null);
assert.eq(indexSpec.v, 2, tojson(indexSpec));
// "collation" field of "idIndex" must match collection default collation.
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandFailedWithCode(
db.createCollection("create_collection",
{idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}}}),
ErrorCodes.BadValue);
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandFailedWithCode(db.createCollection("create_collection", {
collation: {locale: "fr_CA"},
idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}}
}),
ErrorCodes.BadValue);
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandFailedWithCode(db.createCollection("create_collection", {
collation: {locale: "fr_CA"},
idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "simple"}}
}),
ErrorCodes.BadValue);
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandWorked(db.createCollection("create_collection", {
collation: {locale: "en_US", strength: 3},
idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}}
@@ -162,8 +141,7 @@ assert.eq(indexSpec.collation.locale, "en_US", tojson(indexSpec));
// If "collation" field is not present in "idIndex", _id index inherits collection default
// collation.
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
assert.commandWorked(db.createCollection(
"create_collection", {collation: {locale: "en_US"}, idIndex: {key: {_id: 1}, name: "_id_"}}));
indexSpec = IndexCatalogHelpers.findByName(db.create_collection.getIndexes(), "_id_");
@@ -179,14 +157,11 @@ assert.commandFailedWithCode(db.createCollection('capped_no_size_no_max', {cappe
ErrorCodes.InvalidOptions);
assert.commandFailedWithCode(db.createCollection('capped_no_size', {capped: true, max: 10}),
ErrorCodes.InvalidOptions);
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "no_capped"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "no_capped"}));
assert.commandWorked(db.createCollection('no_capped'), {capped: false});
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "capped_no_max"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "capped_no_max"}));
assert.commandWorked(db.createCollection('capped_no_max', {capped: true, size: 256}));
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "capped_with_max_and_size"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "capped_with_max_and_size"}));
assert.commandWorked(
db.createCollection('capped_with_max_and_size', {capped: true, max: 10, size: 256}));
@@ -203,14 +178,7 @@ if (ClusteredCollectionUtil.areAllCollectionsClustered(db.getMongo())) {
return;
}
-// The remainder of this test will not work on server versions < 7.0 as the 'create' command
-// is not idempotent there. TODO SERVER-74062: remove this.
-if (db.version().split('.')[0] < 7) {
- return;
-}
-
-assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}),
- ErrorCodes.NamespaceNotFound);
+assert.commandWorked(db.runCommand({drop: "create_collection"}));
// Creating a collection that already exists with no options specified reports success.
assert.commandWorked(db.createCollection("create_collection"));
diff --git a/jstests/core/ddl/create_index_helper_validation.js b/jstests/core/ddl/create_index_helper_validation.js
index 1b11a50c6ca83..854c82be064cc 100644
--- a/jstests/core/ddl/create_index_helper_validation.js
+++ b/jstests/core/ddl/create_index_helper_validation.js
@@ -10,20 +10,17 @@ assert.throws(() => coll.createIndexes(
/* keys */[{a: 1}],
/* options */ {},
/* commitQuorum */ "majority",
- {background: true},
{unique: true}));
assert.throws(() => coll.createIndex(
/* keys */ {a: 1},
/* options */ {},
/* commitQuorum */ "majority",
- {background: true},
{unique: true}));
assert.throws(() => coll.createIndex(
/* keys */ {a: 1},
/* options */ {},
/* commitQuorum */ "majority",
- {background: true},
{unique: true}));
-}());
\ No newline at end of file
+}());
diff --git a/jstests/core/ddl/create_indexes.js b/jstests/core/ddl/create_indexes.js
index 6459883846642..3ab29ffb6a40a 100644
--- a/jstests/core/ddl/create_indexes.js
+++ b/jstests/core/ddl/create_indexes.js
@@ -6,12 +6,13 @@
*/
(function() {
'use strict';
+load("jstests/libs/fixture_helpers.js");
const kUnknownIDLFieldError = 40415;
-const isMongos = ("isdbgrid" == db.runCommand("hello").msg);
+const runningOnMongos = FixtureHelpers.isMongos(db);
const extractResult = function(obj) {
- if (!isMongos)
+ if (!runningOnMongos)
return obj;
// Sample mongos format:
diff --git a/jstests/core/ddl/drop3.js b/jstests/core/ddl/drop3.js
index 78d4872a6c01a..7829b9cf3108f 100644
--- a/jstests/core/ddl/drop3.js
+++ b/jstests/core/ddl/drop3.js
@@ -4,8 +4,8 @@
// does_not_support_causal_consistency,
// ]
-t = db.jstests_drop3;
-sub = t.sub;
+let t = db.jstests_drop3;
+let sub = t.sub;
t.drop();
sub.drop();
diff --git a/jstests/core/ddl/drop_collection.js b/jstests/core/ddl/drop_collection.js
index faf4a2e64d2cf..35df17b10ffe8 100644
--- a/jstests/core/ddl/drop_collection.js
+++ b/jstests/core/ddl/drop_collection.js
@@ -28,9 +28,7 @@ const coll = db[jsTestName() + "_coll"];
jsTest.log("Drop Unexistent collection.");
{
// Drop the collection
- // TODO (SERVER-73934): NamespaceNotFound will be returned by mongod versions earlier than 7.0.
- assert.commandWorkedOrFailedWithCode(db.runCommand({drop: coll.getName()}),
- ErrorCodes.NamespaceNotFound);
+ assert.commandWorked(db.runCommand({drop: coll.getName()}));
assertCollectionDropped(coll.getFullName());
}
@@ -45,9 +43,7 @@ jsTest.log("Drop existing collection.");
assertCollectionDropped(coll.getFullName());
// Test idempotency
- // TODO (SERVER-73934): NamespaceNotFound will be returned by mongod versions earlier than 7.0.
- assert.commandWorkedOrFailedWithCode(db.runCommand({drop: coll.getName()}),
- ErrorCodes.NamespaceNotFound);
+ assert.commandWorked(db.runCommand({drop: coll.getName()}));
assertCollectionDropped(coll.getFullName());
}
diff --git a/jstests/core/ddl/drop_index.js b/jstests/core/ddl/drop_index.js
index 3f3e815d47dde..ed11b3a5e4677 100644
--- a/jstests/core/ddl/drop_index.js
+++ b/jstests/core/ddl/drop_index.js
@@ -3,6 +3,7 @@
// @tags: [assumes_no_implicit_index_creation]
(function() {
'use strict';
+load("jstests/libs/fixture_helpers.js");
const t = db.drop_index;
t.drop();
@@ -51,7 +52,7 @@ assertIndexes(['b_1', 'c_1', 'd_1', 'e_1'], 'dropping {a: 1} by name');
assert.commandWorked(t.dropIndex({b: 1}));
assertIndexes(['c_1', 'd_1', 'e_1'], 'dropping {b: 1} by key pattern');
-const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid";
+const runningOnMongos = FixtureHelpers.isMongos(db);
// Not allowed to drop _id index.
for (const dropIndexArg of ['_id_', {_id: 1}]) {
@@ -59,7 +60,7 @@ for (const dropIndexArg of ['_id_', {_id: 1}]) {
jsTestLog(`Reply to dropIndexes with arg ${tojson(dropIndexArg)}: ${tojson(dropIdIndexReply)}`);
assert.commandFailedWithCode(dropIdIndexReply, ErrorCodes.InvalidOptions);
assert(dropIdIndexReply.hasOwnProperty('errmsg'));
- if (isMongos) {
+ if (runningOnMongos) {
assert(dropIdIndexReply.hasOwnProperty('raw'));
}
}
diff --git a/jstests/core/ddl/index_prepareUnique.js b/jstests/core/ddl/index_prepareUnique.js
index 7e47db9840a2d..fa66168201ca6 100644
--- a/jstests/core/ddl/index_prepareUnique.js
+++ b/jstests/core/ddl/index_prepareUnique.js
@@ -3,14 +3,11 @@
*
* @tags: [assumes_no_implicit_collection_creation_after_drop]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
if (!FeatureFlagUtil.isEnabled(db, "CollModIndexUnique")) {
jsTestLog('Skipping test because the collMod unique index feature flag is disabled.');
- return;
+ quit();
}
const collName_prefix = "index_prepareUnique";
@@ -79,5 +76,4 @@ assert.commandWorked(coll4.createIndex({a: 1}, {unique: true, prepareUnique: fal
indexesWithPrepareUnique = coll4.getIndexes().filter(function(doc) {
return friendlyEqual(doc.prepareUnique, true);
});
-assert.eq(0, indexesWithPrepareUnique.length);
-})();
+assert.eq(0, indexesWithPrepareUnique.length);
\ No newline at end of file
diff --git a/jstests/core/ddl/killop_drop_collection.js b/jstests/core/ddl/killop_drop_collection.js
index 17e0154fbfbba..a3df160c99112 100644
--- a/jstests/core/ddl/killop_drop_collection.js
+++ b/jstests/core/ddl/killop_drop_collection.js
@@ -22,7 +22,7 @@ collection.drop();
for (let i = 0; i < 1000; i++) {
assert.commandWorked(collection.insert({x: i}));
}
-assert.commandWorked(collection.createIndex({x: 1}, {background: true}));
+assert.commandWorked(collection.createIndex({x: 1}));
// Attempt to fsyncLock the database, aborting early if the storage engine doesn't support it.
const storageEngine = jsTest.options().storageEngine;
diff --git a/jstests/core/ddl/rename_collection_capped.js b/jstests/core/ddl/rename_collection_capped.js
index 670e5ce611b29..e0eb2efa9c317 100644
--- a/jstests/core/ddl/rename_collection_capped.js
+++ b/jstests/core/ddl/rename_collection_capped.js
@@ -13,9 +13,9 @@
* ]
*/
-a = db.jstests_rename_a;
-b = db.jstests_rename_b;
-c = db.jstests_rename_c;
+let a = db.jstests_rename_a;
+let b = db.jstests_rename_b;
+let c = db.jstests_rename_c;
a.drop();
b.drop();
@@ -26,14 +26,14 @@ c.drop();
// note we use floats to make sure numbers are represented as doubles for SpiderMonkey, since test
// relies on record size
db.createCollection("jstests_rename_a", {capped: true, size: 10000});
-for (i = 0.1; i < 10; ++i) {
+for (let i = 0.1; i < 10; ++i) {
a.save({i: i});
}
assert.commandWorked(
db.adminCommand({renameCollection: "test.jstests_rename_a", to: "test.jstests_rename_b"}));
assert.eq(1, b.countDocuments({i: 9.1}));
printjson(b.stats());
-for (i = 10.1; i < 1000; ++i) {
+for (var i = 10.1; i < 1000; ++i) {
b.save({i: i});
}
printjson(b.stats());
diff --git a/jstests/core/ddl/rename_collection_staytemp.js b/jstests/core/ddl/rename_collection_staytemp.js
index 5db125f1a23db..5107b39962742 100644
--- a/jstests/core/ddl/rename_collection_staytemp.js
+++ b/jstests/core/ddl/rename_collection_staytemp.js
@@ -12,8 +12,8 @@
* ]
*/
-orig = 'rename_stayTemp_orig';
-dest = 'rename_stayTemp_dest';
+let orig = 'rename_stayTemp_orig';
+let dest = 'rename_stayTemp_dest';
db[orig].drop();
db[dest].drop();
diff --git a/jstests/core/delx.js b/jstests/core/delx.js
index 331f6a18b677a..b78e3051f8fdd 100644
--- a/jstests/core/delx.js
+++ b/jstests/core/delx.js
@@ -1,11 +1,11 @@
// @tags: [assumes_against_mongod_not_mongos, requires_getmore, requires_non_retryable_writes]
-a = db.getSiblingDB("delxa");
-b = db.getSiblingDB("delxb");
+let a = db.getSiblingDB("delxa");
+let b = db.getSiblingDB("delxb");
function setup(mydb) {
mydb.dropDatabase();
- for (i = 0; i < 100; i++) {
+ for (let i = 0; i < 100; i++) {
mydb.foo.insert({_id: i});
}
}
@@ -16,8 +16,8 @@ setup(b);
assert.eq(100, a.foo.find().itcount(), "A1");
assert.eq(100, b.foo.find().itcount(), "A2");
-x = a.foo.find().sort({_id: 1}).batchSize(60);
-y = b.foo.find().sort({_id: 1}).batchSize(60);
+let x = a.foo.find().sort({_id: 1}).batchSize(60);
+let y = b.foo.find().sort({_id: 1}).batchSize(60);
x.next();
y.next();
@@ -27,7 +27,7 @@ a.foo.remove({_id: {$gt: 50}});
assert.eq(51, a.foo.find().itcount(), "B1");
assert.eq(100, b.foo.find().itcount(), "B2");
-xCount = x.itcount();
+let xCount = x.itcount();
assert(xCount == 59 || xCount == 99, "C1 : " + xCount); // snapshot or not is ok
assert.eq(
99,
diff --git a/jstests/core/doc_validation/bypass_doc_validation.js b/jstests/core/doc_validation/bypass_doc_validation.js
index 4ba40069ee9c0..dff3a22847e87 100644
--- a/jstests/core/doc_validation/bypass_doc_validation.js
+++ b/jstests/core/doc_validation/bypass_doc_validation.js
@@ -12,6 +12,7 @@
// tenant_migration_incompatible,
// # This test has statements that do not support non-local read concern.
// does_not_support_causal_consistency,
+// references_foreign_collection,
// ]
/**
diff --git a/jstests/core/embedded_dollar_prefixed_field_validation.js b/jstests/core/embedded_dollar_prefixed_field_validation.js
new file mode 100644
index 0000000000000..2e58ccd057ebd
--- /dev/null
+++ b/jstests/core/embedded_dollar_prefixed_field_validation.js
@@ -0,0 +1,61 @@
+/**
+ * SERVER-75880 Test that _id cannot be an object with a deep nested element that has a $-prefixed
+ * field name.
+ *
+ * @tags: [
+ * assumes_unsharded_collection,
+ * requires_fcv_71,
+ * ]
+ */
+(function() {
+"use strict";
+
+const coll = db.field_name_validation;
+coll.drop();
+
+// Insert command field name validation
+assert.writeErrorWithCode(coll.insert({_id: {a: {$b: 1}}, x: 1}),
+ ErrorCodes.DollarPrefixedFieldName);
+
+// Update commands with $set
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, a: 1}));
+assert.writeErrorWithCode(coll.update({"_id.a.$b": 1}, {$set: {x: 1}}, {upsert: true}),
+ ErrorCodes.DollarPrefixedFieldName);
+assert.writeErrorWithCode(coll.update({x: 1}, {$set: {_id: {a: {$b: 1}}}}, {upsert: true}),
+ ErrorCodes.DollarPrefixedFieldName);
+assert.writeErrorWithCode(coll.update({a: 1}, {$set: {_id: {a: {$b: 1}}}}, {upsert: true}),
+ [ErrorCodes.DollarPrefixedFieldName, ErrorCodes.ImmutableField]);
+
+// Replacement-style updates
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, a: 1}));
+assert.writeErrorWithCode(coll.update({_id: 0}, {_id: {a: {$b: 1}}}),
+ ErrorCodes.DollarPrefixedFieldName);
+assert.writeErrorWithCode(coll.update({"_id.a.$b": 1}, {_id: {a: {$b: 1}}}, {upsert: true}),
+ ErrorCodes.NotExactValueField);
+assert.writeErrorWithCode(coll.update({_id: {a: {$b: 1}}}, {_id: {a: {$b: 1}}}, {upsert: true}),
+ ErrorCodes.DollarPrefixedFieldName);
+
+// Pipeline-style updates with $replaceWith
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, a: 1}));
+assert.writeErrorWithCode(coll.update({_id: 0}, [{$replaceWith: {$literal: {_id: {a: {$b: 1}}}}}]),
+ ErrorCodes.DollarPrefixedFieldName);
+assert.writeErrorWithCode(
+ coll.update({"_id.a.$b": 1}, [{$replaceWith: {$literal: {a: {$a: 1}}}}], {upsert: true}),
+ ErrorCodes.DollarPrefixedFieldName);
+
+// FindAndModify field name validation
+coll.drop();
+assert.commandWorked(coll.insert({_id: 0, a: 1}));
+assert.throwsWithCode(() => {
+ coll.findAndModify({query: {_id: 0}, update: {_id: {a: {$b: 1}}}});
+}, ErrorCodes.DollarPrefixedFieldName);
+assert.throwsWithCode(() => {
+ coll.findAndModify({query: {"_id.a.$b": 1}, update: {_id: {a: {$b: 1}}}, upsert: true});
+}, ErrorCodes.NotExactValueField);
+assert.throwsWithCode(() => {
+ coll.findAndModify({query: {_id: {a: {$b: 1}}}, update: {_id: {a: {$b: 1}}}, upsert: true});
+}, ErrorCodes.DollarPrefixedFieldName);
+})();
diff --git a/jstests/core/error5.js b/jstests/core/error5.js
index edcfa059d25bf..fe4efef15e3bb 100644
--- a/jstests/core/error5.js
+++ b/jstests/core/error5.js
@@ -1,6 +1,6 @@
// @tags: [requires_fastcount]
-t = db.error5;
+let t = db.error5;
t.drop();
assert.throws(function() {
diff --git a/jstests/core/exhaust.js b/jstests/core/exhaust.js
index 19b6c04d0fc5d..a2e9028bdd1dd 100644
--- a/jstests/core/exhaust.js
+++ b/jstests/core/exhaust.js
@@ -3,6 +3,7 @@
// # This test uses exhaust which does not use runCommand (required by the inject_tenant_prefix.js
// # override).
// tenant_migration_incompatible,
+// no_selinux
// ]
(function() {
diff --git a/jstests/core/find_with_resume_after_param.js b/jstests/core/find_with_resume_after_param.js
new file mode 100644
index 0000000000000..628a9e3a86f58
--- /dev/null
+++ b/jstests/core/find_with_resume_after_param.js
@@ -0,0 +1,133 @@
+/**
+ * Tests that the internal parameter "$_resumeAfter" validates the type of the 'recordId' for
+ * clustered and non clustered collections.
+ * @tags: [
+ * # Queries on mongoS may not request or provide a resume token.
+ * assumes_against_mongod_not_mongos,
+ * ]
+ */
+
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
+const clustered = db.clusteredColl;
+const nonClustered = db.normalColl;
+const clusteredName = clustered.getName();
+const nonClusteredName = nonClustered.getName();
+
+assertDropCollection(db, clusteredName);
+assertDropCollection(db, nonClusteredName);
+
+db.createCollection(clusteredName, {clusteredIndex: {key: {_id: 1}, unique: true}});
+db.createCollection(nonClusteredName);
+
+// Insert some documents.
+const docs = [{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}];
+assert.commandWorked(clustered.insertMany(docs));
+assert.commandWorked(nonClustered.insertMany(docs));
+
+function validateFailedResumeAfterInFind({collName, resumeAfterSpec, errorCode, explainFail}) {
+ const spec = {
+ find: collName,
+ filter: {},
+ $_requestResumeToken: true,
+ $_resumeAfter: resumeAfterSpec,
+ hint: {$natural: 1}
+ };
+ assert.commandFailedWithCode(db.runCommand(spec), errorCode);
+ // Run the same query under an explain.
+ if (explainFail) {
+ assert.commandFailedWithCode(db.runCommand({explain: spec}), errorCode);
+ } else {
+ assert.commandWorked(db.runCommand({explain: spec}));
+ }
+}
+
+function validateFailedResumeAfterInAggregate({collName, resumeAfterSpec, errorCode, explainFail}) {
+ const spec = {
+ aggregate: collName,
+ pipeline: [],
+ $_requestResumeToken: true,
+ $_resumeAfter: resumeAfterSpec,
+ hint: {$natural: 1},
+ cursor: {}
+ };
+ assert.commandFailedWithCode(db.runCommand(spec), errorCode);
+ // Run the same query under an explain.
+ if (explainFail) {
+ assert.commandFailedWithCode(db.runCommand({explain: spec}), errorCode);
+ } else {
+ assert.commandWorked(db.runCommand({explain: spec}));
+ }
+}
+
+function testResumeAfter(validateFunction) {
+ // Confirm $_resumeAfter will fail for clustered collections if the recordId is Long.
+ validateFunction({
+ collName: clusteredName,
+ resumeAfterSpec: {'$recordId': NumberLong(2)},
+ errorCode: 7738600,
+ explainFail: true
+ });
+
+ // Confirm $_resumeAfter will fail with 'KeyNotFound' if given a non existent recordId.
+ validateFunction({
+ collName: clusteredName,
+ resumeAfterSpec: {'$recordId': BinData(5, '1234')},
+ errorCode: ErrorCodes.KeyNotFound
+ });
+
+ // Confirm $_resumeAfter will fail for normal collections if it is of type BinData.
+ validateFunction({
+ collName: nonClusteredName,
+ resumeAfterSpec: {'$recordId': BinData(5, '1234')},
+ errorCode: 7738600,
+ explainFail: true
+ });
+
+ // Confirm $_resumeAfter token will fail with 'KeyNotFound' if given a non existent recordId.
+ validateFunction({
+ collName: nonClusteredName,
+ resumeAfterSpec: {'$recordId': NumberLong(8)},
+ errorCode: ErrorCodes.KeyNotFound
+ });
+
+ if (checkSBEEnabled(db)) {
+ // This case really means that 'forceClassicEngine' has not been set. It does not mean any
+ // SBE-specific feature flags are turned on.
+ validateFunction({
+ collName: nonClusteredName,
+ resumeAfterSpec: {'$recordId': null},
+ errorCode: ErrorCodes.KeyNotFound
+ });
+ } else {
+ assert.commandWorked(db.runCommand({
+ find: nonClusteredName,
+ filter: {},
+ $_requestResumeToken: true,
+ $_resumeAfter: {'$recordId': null},
+ hint: {$natural: 1}
+ }));
+ }
+
+ // Confirm $_resumeAfter will fail to parse if collection does not exist.
+ validateFunction({
+ collName: "random",
+ resumeAfterSpec: {'$recordId': null, "anotherField": null},
+ errorCode: ErrorCodes.BadValue,
+ explainFail: true
+ });
+ validateFunction({
+ collName: "random",
+ resumeAfterSpec: "string",
+ errorCode: ErrorCodes.TypeMismatch,
+ explainFail: true
+ });
+}
+
+testResumeAfter(validateFailedResumeAfterInFind);
+// TODO(SERVER-77873): remove "featureFlagReshardingImprovements"
+if (FeatureFlagUtil.isPresentAndEnabled(db, "ReshardingImprovements")) {
+ testResumeAfter(validateFailedResumeAfterInAggregate);
+}
\ No newline at end of file
diff --git a/jstests/core/geo_parse_err.js b/jstests/core/geo_parse_err.js
index 73bc451bd7ccd..a03d194932a68 100644
--- a/jstests/core/geo_parse_err.js
+++ b/jstests/core/geo_parse_err.js
@@ -121,4 +121,4 @@ err = t.insert({
});
assert.includes(err.getWriteError().errmsg,
"Element 1 of \"geometries\" must be an object, instead got type double:");
-})();
\ No newline at end of file
+})();
diff --git a/jstests/core/hashed_partial_and_sparse_index.js b/jstests/core/hashed_partial_and_sparse_index.js
index 4c57f3e1eee38..5036d999bbae8 100644
--- a/jstests/core/hashed_partial_and_sparse_index.js
+++ b/jstests/core/hashed_partial_and_sparse_index.js
@@ -6,11 +6,8 @@
* assumes_read_concern_local,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq().
-load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand().
+import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js";
const coll = db.hashed_partial_index;
coll.drop();
@@ -91,5 +88,4 @@ testSparseHashedIndex({a: "hashed", b: 1});
// Verify that index is used if the query predicate matches the 'partialFilterExpression'.
validateFindCmdOutputAndPlan(
{filter: {b: 6}, expectedOutput: [{a: 1, b: 6}], expectedStages: ["IXSCAN", "FETCH"]});
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/hint1.js b/jstests/core/hint1.js
index d584144e64170..09d328350c3da 100644
--- a/jstests/core/hint1.js
+++ b/jstests/core/hint1.js
@@ -1,4 +1,4 @@
-p = db.jstests_hint1;
+let p = db.jstests_hint1;
p.drop();
p.save({ts: new Date(1), cls: "entry", verticals: "alleyinsider", live: true});
diff --git a/jstests/core/id1.js b/jstests/core/id1.js
index 79e26e33e90e2..a3fbaea3bfcb8 100644
--- a/jstests/core/id1.js
+++ b/jstests/core/id1.js
@@ -1,6 +1,6 @@
// @tags: [requires_fastcount]
-t = db.id1;
+let t = db.id1;
t.drop();
t.save({_id: {a: 1, b: 2}, x: "a"});
diff --git a/jstests/core/index/bindata_indexonly.js b/jstests/core/index/bindata_indexonly.js
index 3fc83d0f2fd6e..d9386370672ad 100644
--- a/jstests/core/index/bindata_indexonly.js
+++ b/jstests/core/index/bindata_indexonly.js
@@ -6,10 +6,7 @@
* assumes_read_concern_local,
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/libs/analyze_plan.js");
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
var coll = db.jstests_bindata_indexonly;
@@ -73,5 +70,4 @@ assert(isIndexOnly(db, explain), "indexonly.$gte.3 - must be index-only");
assert.eq(
2, explain.executionStats.nReturned, "correctcount.$gte.3 - not returning correct documents");
-coll.drop();
-})();
+coll.drop();
\ No newline at end of file
diff --git a/jstests/core/index/covered/coveredIndex1.js b/jstests/core/index/covered/coveredIndex1.js
index 0ea6b523a26fa..ea4c223fa85e3 100644
--- a/jstests/core/index/covered/coveredIndex1.js
+++ b/jstests/core/index/covered/coveredIndex1.js
@@ -11,15 +11,12 @@
* assumes_no_implicit_index_creation,
* ]
*/
-(function() {
-"use strict";
+// Include helpers for analyzing explain output.
+import {getWinningPlan, isIndexOnly} from "jstests/libs/analyze_plan.js";
const coll = db["jstests_coveredIndex1"];
coll.drop();
-// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
-
assert.commandWorked(coll.insert({order: 0, fn: "john", ln: "doe"}));
assert.commandWorked(coll.insert({order: 1, fn: "jack", ln: "doe"}));
assert.commandWorked(coll.insert({order: 2, fn: "john", ln: "smith"}));
@@ -90,4 +87,3 @@ assertIfQueryIsCovered({obj: {a: 1, b: "blah"}}, {obj: 1, _id: 0}, true);
assert.commandWorked(coll.dropIndex({obj: 1}));
assert.commandWorked(coll.createIndex({"obj.a": 1, "obj.b": 1}));
assertIfQueryIsCovered({"obj.a": 1}, {obj: 1}, false);
-}());
diff --git a/jstests/core/index/covered/coveredIndex2.js b/jstests/core/index/covered/coveredIndex2.js
index 72724dede1fab..e3fb2a5936007 100644
--- a/jstests/core/index/covered/coveredIndex2.js
+++ b/jstests/core/index/covered/coveredIndex2.js
@@ -8,15 +8,12 @@
// # plans.
// assumes_no_implicit_index_creation,
// ]
-(function() {
-"use strict";
+// Include helpers for analyzing explain output.
+import {getWinningPlan, isIndexOnly} from "jstests/libs/analyze_plan.js";
const t = db["jstests_coveredIndex2"];
t.drop();
-// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
-
assert.commandWorked(t.insert({a: 1}));
assert.commandWorked(t.insert({a: 2}));
assert.eq(t.findOne({a: 1}).a, 1, "Cannot find right record");
@@ -38,4 +35,3 @@ assert.commandWorked(t.insert({a: [3, 4]}));
plan = t.find({a: 1}, {a: 1, _id: 0}).explain();
assert(!isIndexOnly(db, getWinningPlan(plan.queryPlanner)),
"Find is using covered index even after multikey insert");
-}());
diff --git a/jstests/core/index/covered/coveredIndex3.js b/jstests/core/index/covered/coveredIndex3.js
index 8b15b40e86259..dd44f2544eaef 100644
--- a/jstests/core/index/covered/coveredIndex3.js
+++ b/jstests/core/index/covered/coveredIndex3.js
@@ -6,8 +6,8 @@
if (0) { // SERVER-4975
- t = db.jstests_coveredIndex3;
- t2 = db.jstests_coveredIndex3_other;
+ let t = db.jstests_coveredIndex3;
+ let t2 = db.jstests_coveredIndex3_other;
t.drop();
t2.drop();
@@ -15,30 +15,30 @@ if (0) { // SERVER-4975
// Insert an array, which will make the { a:1 } index multikey and should disable covered
// index
// matching.
- p1 = startParallelShell(
+ let p1 = startParallelShell(
'for( i = 0; i < 60; ++i ) { \
db.jstests_coveredIndex3.save( { a:[ 2000, 2001 ] } ); \
sleep( 300 ); \
}');
// Frequent writes cause the find operation to yield.
- p2 = startParallelShell(
+ let p2 = startParallelShell(
'for( i = 0; i < 1800; ++i ) { \
db.jstests_coveredIndex3_other.save( {} ); \
sleep( 10 ); \
}');
- for (i = 0; i < 30; ++i) {
+ for (let i = 0; i < 30; ++i) {
t.drop();
t.createIndex({a: 1});
- for (j = 0; j < 1000; ++j) {
+ for (let j = 0; j < 1000; ++j) {
t.save({a: j});
}
- c = t.find({}, {_id: 0, a: 1}).hint({a: 1}).batchSize(batchSize);
+ let c = t.find({}, {_id: 0, a: 1}).hint({a: 1}).batchSize(batchSize);
while (c.hasNext()) {
- o = c.next();
+ let o = c.next();
// If o contains a high numeric 'a' value, it must come from an array saved in p1.
assert(!(o.a > 1500), 'improper object returned ' + tojson(o));
}
diff --git a/jstests/core/index/covered/coveredIndex4.js b/jstests/core/index/covered/coveredIndex4.js
index 7433ed7b9d60f..b415328474e0f 100644
--- a/jstests/core/index/covered/coveredIndex4.js
+++ b/jstests/core/index/covered/coveredIndex4.js
@@ -5,14 +5,14 @@
// Test covered index projection with $or clause, specifically in getMore
// SERVER-4980
-t = db.jstests_coveredIndex4;
+let t = db.jstests_coveredIndex4;
t.drop();
t.createIndex({a: 1});
t.createIndex({b: 1});
-orClause = [];
-for (i = 0; i < 200; ++i) {
+let orClause = [];
+for (let i = 0; i < 200; ++i) {
if (i % 2 == 0) {
t.save({a: i});
orClause.push({a: i});
@@ -22,11 +22,11 @@ for (i = 0; i < 200; ++i) {
}
}
-c = t.find({$or: orClause}, {_id: 0, a: 1});
+let c = t.find({$or: orClause}, {_id: 0, a: 1});
// No odd values of a were saved, so we should not see any in the results.
while (c.hasNext()) {
- o = c.next();
+ let o = c.next();
if (o.a) {
assert.eq(0, o.a % 2, 'unexpected result: ' + tojson(o));
}
@@ -36,7 +36,7 @@ c = t.find({$or: orClause}, {_id: 0, b: 1});
// No even values of b were saved, so we should not see any in the results.
while (c.hasNext()) {
- o = c.next();
+ let o = c.next();
if (o.b) {
assert.eq(1, o.b % 2, 'unexpected result: ' + tojson(o));
}
diff --git a/jstests/core/index/covered/covered_index_compound_1.js b/jstests/core/index/covered/covered_index_compound_1.js
index 0efff371059a1..27da21d5cebb9 100644
--- a/jstests/core/index/covered/covered_index_compound_1.js
+++ b/jstests/core/index/covered/covered_index_compound_1.js
@@ -8,11 +8,11 @@
// Compound index covered query tests
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
var coll = db.getCollection("covered_compound_1");
coll.drop();
-for (i = 0; i < 100; i++) {
+for (let i = 0; i < 100; i++) {
coll.insert({a: i, b: "strvar_" + (i % 13), c: NumberInt(i % 10)});
}
coll.createIndex({a: 1, b: -1, c: 1});
diff --git a/jstests/core/index/covered/covered_index_negative_1.js b/jstests/core/index/covered/covered_index_negative_1.js
index 2e2179d908a9d..826fa19e37ad1 100644
--- a/jstests/core/index/covered/covered_index_negative_1.js
+++ b/jstests/core/index/covered/covered_index_negative_1.js
@@ -8,10 +8,7 @@
// assumes_balancer_off,
// does_not_support_stepdowns,
// ]
-(function() {
-'use strict';
-
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan, isIndexOnly} from "jstests/libs/analyze_plan.js";
const coll = db.covered_negative_1;
coll.drop();
@@ -95,5 +92,4 @@ assert.neq(0,
plan.executionStats.totalDocsExamined,
"negative.1.8 - nscannedObjects should not be 0 for a non covered query");
-print('all tests passed');
-})();
+print('all tests passed');
\ No newline at end of file
diff --git a/jstests/core/index/covered/covered_index_simple_1.js b/jstests/core/index/covered/covered_index_simple_1.js
index 16a6e3cdc6a2b..688b4c4b9a00d 100644
--- a/jstests/core/index/covered/covered_index_simple_1.js
+++ b/jstests/core/index/covered/covered_index_simple_1.js
@@ -8,17 +8,17 @@
// Simple covered index query test
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
var coll = db.getCollection("covered_simple_1");
coll.drop();
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
coll.insert({foo: i});
}
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
coll.insert({foo: i});
}
-for (i = 0; i < 5; i++) {
+for (let i = 0; i < 5; i++) {
coll.insert({bar: i});
}
coll.insert({foo: "string"});
diff --git a/jstests/core/index/covered/covered_index_simple_2.js b/jstests/core/index/covered/covered_index_simple_2.js
index cf04f940ad0d9..fa59e70caeb4f 100644
--- a/jstests/core/index/covered/covered_index_simple_2.js
+++ b/jstests/core/index/covered/covered_index_simple_2.js
@@ -8,11 +8,11 @@
// Simple covered index query test with unique index
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
var coll = db.getCollection("covered_simple_2");
coll.drop();
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
coll.insert({foo: i});
}
coll.insert({foo: "string"});
diff --git a/jstests/core/index/covered/covered_index_simple_3.js b/jstests/core/index/covered/covered_index_simple_3.js
index 4beff2b3c5a50..1bc1070be9f79 100644
--- a/jstests/core/index/covered/covered_index_simple_3.js
+++ b/jstests/core/index/covered/covered_index_simple_3.js
@@ -8,14 +8,14 @@
// Simple covered index query test with a unique sparse index
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
var coll = db.getCollection("covered_simple_3");
coll.drop();
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
coll.insert({foo: i});
}
-for (i = 0; i < 5; i++) {
+for (let i = 0; i < 5; i++) {
coll.insert({bar: i});
}
coll.insert({foo: "string"});
diff --git a/jstests/core/index/covered/covered_index_simple_id.js b/jstests/core/index/covered/covered_index_simple_id.js
index 59efcd7f13aec..818398433c418 100644
--- a/jstests/core/index/covered/covered_index_simple_id.js
+++ b/jstests/core/index/covered/covered_index_simple_id.js
@@ -5,11 +5,11 @@
// Simple covered index query test
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
var coll = db.getCollection("covered_simple_id");
coll.drop();
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
coll.insert({_id: i});
}
coll.insert({_id: "string"});
diff --git a/jstests/core/index/covered/covered_index_sort_1.js b/jstests/core/index/covered/covered_index_sort_1.js
index 499bff128e7ec..e08e9061df660 100644
--- a/jstests/core/index/covered/covered_index_sort_1.js
+++ b/jstests/core/index/covered/covered_index_sort_1.js
@@ -8,17 +8,17 @@
// Simple covered index query test with sort
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
var coll = db.getCollection("covered_sort_1");
coll.drop();
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
coll.insert({foo: i});
}
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
coll.insert({foo: i});
}
-for (i = 0; i < 5; i++) {
+for (let i = 0; i < 5; i++) {
coll.insert({bar: i});
}
coll.insert({foo: "1"});
diff --git a/jstests/core/index/covered/covered_index_sort_2.js b/jstests/core/index/covered/covered_index_sort_2.js
index 736a48bb4526e..a2acff2ec5cce 100644
--- a/jstests/core/index/covered/covered_index_sort_2.js
+++ b/jstests/core/index/covered/covered_index_sort_2.js
@@ -5,11 +5,11 @@
// ]
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
var coll = db.getCollection("covered_sort_2");
coll.drop();
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
coll.insert({_id: i});
}
coll.insert({_id: "1"});
diff --git a/jstests/core/index/covered/covered_index_sort_3.js b/jstests/core/index/covered/covered_index_sort_3.js
index 9e57a79a2a2e6..1066103162531 100644
--- a/jstests/core/index/covered/covered_index_sort_3.js
+++ b/jstests/core/index/covered/covered_index_sort_3.js
@@ -8,11 +8,11 @@
// Compound index covered query tests with sort
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
var coll = db.getCollection("covered_sort_3");
coll.drop();
-for (i = 0; i < 100; i++) {
+for (let i = 0; i < 100; i++) {
coll.insert({a: i, b: "strvar_" + (i % 13), c: NumberInt(i % 10)});
}
diff --git a/jstests/core/index/covered/covered_index_sort_no_fetch_optimization.js b/jstests/core/index/covered/covered_index_sort_no_fetch_optimization.js
index 416549acb8753..ad2d11ecf8248 100644
--- a/jstests/core/index/covered/covered_index_sort_no_fetch_optimization.js
+++ b/jstests/core/index/covered/covered_index_sort_no_fetch_optimization.js
@@ -9,10 +9,7 @@
* assumes_unsharded_collection,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan, isIndexOnly, planHasStage} from "jstests/libs/analyze_plan.js";
const collName = "covered_index_sort_no_fetch_optimization";
const coll = db.getCollection(collName);
@@ -239,5 +236,4 @@ findCmd = {
};
expected =
[{"b": {"c": 1}}, {"b": {"c": 2}}, {"b": {"c": 3}}, {"b": {"c": "A"}}, {"b": {"c": "a"}}];
-assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
-})();
+assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort);
\ No newline at end of file
diff --git a/jstests/core/index/elemmatch_index.js b/jstests/core/index/elemmatch_index.js
index a1941620a484a..b8012988a30d7 100644
--- a/jstests/core/index/elemmatch_index.js
+++ b/jstests/core/index/elemmatch_index.js
@@ -6,10 +6,7 @@
* assumes_read_concern_local,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js";
const coll = db.elemMatch_index;
coll.drop();
@@ -111,5 +108,4 @@ assert.eq(count, 1);
const explain = coll.find(query).hint({"arr.x": 1, a: 1}).explain("executionStats");
assert.commandWorked(explain);
assert.eq(count, explain.executionStats.totalKeysExamined, explain);
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/core/index/fts/fts2.js b/jstests/core/index/fts/fts2.js
index 79be057fed835..418a8ca321529 100644
--- a/jstests/core/index/fts/fts2.js
+++ b/jstests/core/index/fts/fts2.js
@@ -1,6 +1,6 @@
load("jstests/libs/fts.js");
-t = db.text2;
+let t = db.text2;
t.drop();
t.save({_id: 1, x: "az b x", y: "c d m", z: 1});
diff --git a/jstests/core/index/fts/fts3.js b/jstests/core/index/fts/fts3.js
index 9b89cda029c64..b28227e57bbe9 100644
--- a/jstests/core/index/fts/fts3.js
+++ b/jstests/core/index/fts/fts3.js
@@ -1,6 +1,6 @@
load("jstests/libs/fts.js");
-t = db.text3;
+let t = db.text3;
t.drop();
t.save({_id: 1, x: "az b x", y: "c d m", z: 1});
diff --git a/jstests/core/index/fts/fts4.js b/jstests/core/index/fts/fts4.js
index bb19fba22211b..3e92b6f057458 100644
--- a/jstests/core/index/fts/fts4.js
+++ b/jstests/core/index/fts/fts4.js
@@ -1,6 +1,6 @@
load("jstests/libs/fts.js");
-t = db.text4;
+let t = db.text4;
t.drop();
t.save({_id: 1, x: ["az", "b", "x"], y: ["c", "d", "m"], z: 1});
diff --git a/jstests/core/index/fts/fts5.js b/jstests/core/index/fts/fts5.js
index 28d9b48d957ea..13bde38545b34 100644
--- a/jstests/core/index/fts/fts5.js
+++ b/jstests/core/index/fts/fts5.js
@@ -1,6 +1,6 @@
load("jstests/libs/fts.js");
-t = db.text5;
+let t = db.text5;
t.drop();
t.save({_id: 1, x: [{a: "az"}, {a: "b"}, {a: "x"}], y: ["c", "d", "m"], z: 1});
diff --git a/jstests/core/index/fts/fts_blog.js b/jstests/core/index/fts/fts_blog.js
index 5208c16625846..5b7b7559512e6 100644
--- a/jstests/core/index/fts/fts_blog.js
+++ b/jstests/core/index/fts/fts_blog.js
@@ -1,4 +1,4 @@
-t = db.text_blog;
+let t = db.text_blog;
t.drop();
t.save({_id: 1, title: "my blog post", text: "this is a new blog i am writing. yay"});
@@ -9,7 +9,7 @@ t.save({_id: 3, title: "knives are Fun", text: "this is a new blog i am writing.
// specify weights if you want a field to be more meaningull
t.createIndex({"title": "text", text: "text"}, {weights: {title: 10}});
-res = t.find({"$text": {"$search": "blog"}}, {score: {"$meta": "textScore"}}).sort({
+let res = t.find({"$text": {"$search": "blog"}}, {score: {"$meta": "textScore"}}).sort({
score: {"$meta": "textScore"}
});
assert.eq(3, res.length());
diff --git a/jstests/core/index/fts/fts_blogwild.js b/jstests/core/index/fts/fts_blogwild.js
index f449b6b827e89..ffb28533e117a 100644
--- a/jstests/core/index/fts/fts_blogwild.js
+++ b/jstests/core/index/fts/fts_blogwild.js
@@ -4,7 +4,7 @@
// assumes_no_implicit_index_creation,
// ]
-t = db.text_blogwild;
+let t = db.text_blogwild;
t.drop();
t.save({_id: 1, title: "my blog post", text: "this is a new blog i am writing. yay eliot"});
@@ -21,7 +21,7 @@ t.createIndex({dummy: "text"}, {weights: "$**"});
// ensure listIndexes can handle a string-valued "weights"
assert.eq(2, t.getIndexes().length);
-res = t.find({"$text": {"$search": "blog"}});
+let res = t.find({"$text": {"$search": "blog"}});
assert.eq(3, res.length(), "A1");
res = t.find({"$text": {"$search": "write"}});
diff --git a/jstests/core/index/fts/fts_index.js b/jstests/core/index/fts/fts_index.js
index c78301509f15b..be5035d0babbb 100644
--- a/jstests/core/index/fts/fts_index.js
+++ b/jstests/core/index/fts/fts_index.js
@@ -78,6 +78,17 @@ assert.eq(0,
})
.length);
+// $-prefixed fields cannot be indexed.
+coll = db.getCollection(collNamePrefix + collCount++);
+coll.drop();
+assert.commandFailed(coll.createIndex({"a.$custom": "text"}, {name: indexName}));
+assert.eq(0,
+ coll.getIndexes()
+ .filter(function(z) {
+ return z.name == indexName;
+ })
+ .length);
+
// SERVER-19519 Spec fails if '_fts' is specified on a non-text index.
coll = db.getCollection(collNamePrefix + collCount++);
coll.drop();
@@ -141,7 +152,7 @@ assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1}));
assert.eq(2, coll.getIndexes().length);
assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1}));
assert.eq(2, coll.getIndexes().length);
-assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1}, {background: true}));
+assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1}));
assert.eq(2, coll.getIndexes().length);
assert.commandFailedWithCode(coll.createIndex({a: 1, b: 1, c: "text"}),
ErrorCodes.CannotCreateIndex);
diff --git a/jstests/core/index/fts/fts_index3.js b/jstests/core/index/fts/fts_index3.js
index ac4730d0bd0a7..4d0efb2c1d949 100644
--- a/jstests/core/index/fts/fts_index3.js
+++ b/jstests/core/index/fts/fts_index3.js
@@ -110,6 +110,11 @@ assert.commandWorked(coll.update({}, {$set: {"a.language": "en"}}));
assert.eq(0, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(1, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
+// SERVER-78238: index with a dotted path should not index fields with a dot inside that make it
+// look like a dotted path.
+assert.commandWorked(coll.insert({"a.b": "ignored"}));
+assert.eq(0, coll.find({$text: {$search: "ignored"}}).itcount());
+
// 10) Same as #9, but with a wildcard text index.
coll = db.getCollection(collNamePrefix + collCount++);
coll.drop();
@@ -121,6 +126,11 @@ assert.commandWorked(coll.update({}, {$set: {"a.language": "en"}}));
assert.eq(0, coll.find({$text: {$search: "testing", $language: "es"}}).itcount());
assert.eq(1, coll.find({$text: {$search: "testing", $language: "en"}}).itcount());
+// SERVER-78238: index with a wildcard should not index fields with a dot inside or starting with $.
+assert.commandWorked(coll.insert({"a.b": "ignored"}));
+assert.commandWorked(coll.insert({"$personal": "ignored"}));
+assert.eq(0, coll.find({$text: {$search: "ignored"}}).itcount());
+
// 11) Create a text index on a single field with a custom language override, insert a document,
// update the language of the document (so as to change the stemming), and verify that $text with
// the new language returns the document.
diff --git a/jstests/core/index/fts/fts_partition1.js b/jstests/core/index/fts/fts_partition1.js
index 4a26a3ad62957..3791c461f7ddf 100644
--- a/jstests/core/index/fts/fts_partition1.js
+++ b/jstests/core/index/fts/fts_partition1.js
@@ -1,6 +1,6 @@
load("jstests/libs/fts.js");
-t = db.text_parition1;
+let t = db.text_parition1;
t.drop();
t.insert({_id: 1, x: 1, y: "foo"});
@@ -19,7 +19,7 @@ assert.throws(function() {
assert.eq([1], queryIDS(t, "foo", {x: 1}));
-res = t.find({"$text": {"$search": "foo"}, x: 1}, {score: {"$meta": "textScore"}});
+let res = t.find({"$text": {"$search": "foo"}, x: 1}, {score: {"$meta": "textScore"}});
assert(res[0].score > 0, tojson(res.toArray()));
// repeat "search" with "language" specified, SERVER-8999
diff --git a/jstests/core/index/fts/fts_partition_no_multikey.js b/jstests/core/index/fts/fts_partition_no_multikey.js
index b819c3abfbd83..c8adead6100c6 100644
--- a/jstests/core/index/fts/fts_partition_no_multikey.js
+++ b/jstests/core/index/fts/fts_partition_no_multikey.js
@@ -1,4 +1,4 @@
-t = db.fts_partition_no_multikey;
+let t = db.fts_partition_no_multikey;
t.drop();
t.createIndex({x: 1, y: "text"});
diff --git a/jstests/core/index/fts/fts_phrase.js b/jstests/core/index/fts/fts_phrase.js
index 1a3d747032682..31356e8a4ea05 100644
--- a/jstests/core/index/fts/fts_phrase.js
+++ b/jstests/core/index/fts/fts_phrase.js
@@ -1,4 +1,4 @@
-t = db.text_phrase;
+let t = db.text_phrase;
t.drop();
t.save({_id: 1, title: "my blog post", text: "i am writing a blog. yay"});
@@ -7,7 +7,7 @@ t.save({_id: 3, title: "knives are Fun", text: "this is a new blog i am writing.
t.createIndex({"title": "text", text: "text"}, {weights: {title: 10}});
-res = t.find({"$text": {"$search": "blog write"}}, {score: {"$meta": "textScore"}}).sort({
+let res = t.find({"$text": {"$search": "blog write"}}, {score: {"$meta": "textScore"}}).sort({
score: {"$meta": "textScore"}
});
assert.eq(3, res.length());
diff --git a/jstests/core/index/fts/fts_projection.js b/jstests/core/index/fts/fts_projection.js
index 6e4c99b92308d..67e69179e8549 100644
--- a/jstests/core/index/fts/fts_projection.js
+++ b/jstests/core/index/fts/fts_projection.js
@@ -3,10 +3,7 @@
// assumes_read_concern_local,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js";
var t = db.getSiblingDB("test").getCollection("fts_projection");
t.drop();
@@ -121,5 +118,4 @@ assert.eq(results.length, 2);
assert(results[0].score,
"invalid text score for " + tojson(results[0], '', true) + " when $text is in $or");
assert(results[1].score,
- "invalid text score for " + tojson(results[0], '', true) + " when $text is in $or");
-})();
+ "invalid text score for " + tojson(results[0], '', true) + " when $text is in $or");
\ No newline at end of file
diff --git a/jstests/core/index/geo/geo1.js b/jstests/core/index/geo/geo1.js
index 8a80f59d69237..126aae3505d82 100644
--- a/jstests/core/index/geo/geo1.js
+++ b/jstests/core/index/geo/geo1.js
@@ -5,13 +5,10 @@
// requires_fastcount,
// ]
-t = db.geo1;
+let t = db.geo1;
t.drop();
-idx = {
- loc: "2d",
- zip: 1
-};
+let idx = {loc: "2d", zip: 1};
t.insert({zip: "06525", loc: [41.352964, 73.01212]});
t.insert({zip: "10024", loc: [40.786387, 73.97709]});
@@ -31,7 +28,7 @@ assert.eq(3, t.count(), "B3");
// test normal access
-wb = t.findOne({zip: "06525"});
+let wb = t.findOne({zip: "06525"});
assert(wb, "C1");
assert.eq("06525", t.find({loc: wb.loc}).hint({"$natural": 1})[0].zip, "C2");
diff --git a/jstests/core/index/geo/geo10.js b/jstests/core/index/geo/geo10.js
index 45463711406f5..adbf0862cbedc 100644
--- a/jstests/core/index/geo/geo10.js
+++ b/jstests/core/index/geo/geo10.js
@@ -6,7 +6,7 @@
// Test for SERVER-2746
-coll = db.geo10;
+let coll = db.geo10;
coll.drop();
assert.commandWorked(db.geo10.createIndex({c: '2d', t: 1}, {min: 0, max: Math.pow(2, 40)}));
diff --git a/jstests/core/index/geo/geo2.js b/jstests/core/index/geo/geo2.js
index 34588acac9e43..e4521b5b45e00 100644
--- a/jstests/core/index/geo/geo2.js
+++ b/jstests/core/index/geo/geo2.js
@@ -3,11 +3,11 @@
// requires_fastcount,
// ]
-t = db.geo2;
+let t = db.geo2;
t.drop();
-n = 1;
-arr = [];
+let n = 1;
+let arr = [];
for (var x = -100; x < 100; x += 2) {
for (var y = -100; y < 100; y += 2) {
arr.push({_id: n++, loc: [x, y]});
diff --git a/jstests/core/index/geo/geo3.js b/jstests/core/index/geo/geo3.js
index 7e54fd7dc5f91..381ec12f3991d 100644
--- a/jstests/core/index/geo/geo3.js
+++ b/jstests/core/index/geo/geo3.js
@@ -4,11 +4,11 @@
// ]
(function() {
-t = db.geo3;
+let t = db.geo3;
t.drop();
-n = 1;
-arr = [];
+let n = 1;
+let arr = [];
for (var x = -100; x < 100; x += 2) {
for (var y = -100; y < 100; y += 2) {
arr.push({_id: n++, loc: [x, y], a: Math.abs(x) % 5, b: Math.abs(y) % 5});
@@ -67,9 +67,7 @@ assert.commandWorked(t.createIndex({loc: "2d", b: 1}));
testFiltering("loc and b");
-q = {
- loc: {$near: [50, 50]}
-};
+let q = {loc: {$near: [50, 50]}};
assert.eq(100, t.find(q).limit(100).itcount(), "D1");
assert.eq(100, t.find(q).limit(100).size(), "D2");
diff --git a/jstests/core/index/geo/geo6.js b/jstests/core/index/geo/geo6.js
index 8d32c066c88e2..98022ad7940ba 100644
--- a/jstests/core/index/geo/geo6.js
+++ b/jstests/core/index/geo/geo6.js
@@ -1,4 +1,4 @@
-t = db.geo6;
+let t = db.geo6;
t.drop();
t.createIndex({loc: "2d"});
diff --git a/jstests/core/index/geo/geo9.js b/jstests/core/index/geo/geo9.js
index 6b1bfb6063143..701c6bd6680f1 100644
--- a/jstests/core/index/geo/geo9.js
+++ b/jstests/core/index/geo/geo9.js
@@ -1,4 +1,4 @@
-t = db.geo9;
+let t = db.geo9;
t.drop();
t.save({_id: 1, a: [10, 10], b: [50, 50]});
@@ -15,7 +15,7 @@ t.createIndex({b: "2d"});
function check(field) {
var q = {};
q[field] = {$near: [11, 11]};
- arr = t.find(q).limit(3).map(function(z) {
+ let arr = t.find(q).limit(3).map(function(z) {
return Geo.distance([11, 11], z[field]);
});
assert.eq(2 * Math.sqrt(2), Array.sum(arr), "test " + field);
diff --git a/jstests/core/index/geo/geo_2d_explain.js b/jstests/core/index/geo/geo_2d_explain.js
index 36adad19a6c60..c115b958186ac 100644
--- a/jstests/core/index/geo/geo_2d_explain.js
+++ b/jstests/core/index/geo/geo_2d_explain.js
@@ -1,11 +1,10 @@
// @tags: [
// assumes_balancer_off,
// ]
+import {getPlanStages} from "jstests/libs/analyze_plan.js";
var t = db.geo_2d_explain;
-load("jstests/libs/analyze_plan.js");
-
t.drop();
var n = 1000;
diff --git a/jstests/core/index/geo/geo_allowedcomparisons.js b/jstests/core/index/geo/geo_allowedcomparisons.js
index 2f689f4be6870..80ea1e25fee09 100644
--- a/jstests/core/index/geo/geo_allowedcomparisons.js
+++ b/jstests/core/index/geo/geo_allowedcomparisons.js
@@ -1,33 +1,27 @@
// A test for what geometries can interact with what other geometries.
-t = db.geo_allowedcomparisons;
+let t = db.geo_allowedcomparisons;
// Any GeoJSON object can intersect with any geojson object.
-geojsonPoint = {
- "type": "Point",
- "coordinates": [0, 0]
-};
-oldPoint = [0, 0];
+let geojsonPoint = {"type": "Point", "coordinates": [0, 0]};
+let oldPoint = [0, 0];
// GeoJSON polygons can contain any geojson object and OLD points.
-geojsonPoly = {
+let geojsonPoly = {
"type": "Polygon",
"coordinates": [[[-5, -5], [-5, 5], [5, 5], [5, -5], [-5, -5]]]
};
// This can be contained by GJ polygons, intersected by anything GJ and old points.
-geojsonLine = {
- "type": "LineString",
- "coordinates": [[0, 0], [1, 1]]
-};
+let geojsonLine = {"type": "LineString", "coordinates": [[0, 0], [1, 1]]};
// $centerSphere can contain old or new points.
-oldCenterSphere = [[0, 0], Math.PI / 180];
+let oldCenterSphere = [[0, 0], Math.PI / 180];
// $box can contain old points.
-oldBox = [[-5, -5], [5, 5]];
+let oldBox = [[-5, -5], [5, 5]];
// $polygon can contain old points.
-oldPolygon = [[-5, -5], [-5, 5], [5, 5], [5, -5], [-5, -5]];
+let oldPolygon = [[-5, -5], [-5, 5], [5, 5], [5, -5], [-5, -5]];
// $center can contain old points.
-oldCenter = [[0, 0], 1];
+let oldCenter = [[0, 0], 1];
t.drop();
t.createIndex({geo: "2d"});
@@ -47,10 +41,7 @@ assert.writeError(t.insert({geo: oldCenter}));
// Verify that even if we can't index them, we can use them in a matcher.
t.insert({gj: geojsonLine});
t.insert({gj: geojsonPoly});
-geojsonPoint2 = {
- "type": "Point",
- "coordinates": [0, 0.001]
-};
+let geojsonPoint2 = {"type": "Point", "coordinates": [0, 0.001]};
t.insert({gjp: geojsonPoint2});
// We convert between old and new style points.
diff --git a/jstests/core/index/geo/geo_array0.js b/jstests/core/index/geo/geo_array0.js
index 0c9dc096afb79..29798952d6074 100644
--- a/jstests/core/index/geo/geo_array0.js
+++ b/jstests/core/index/geo/geo_array0.js
@@ -5,7 +5,7 @@
// ]
// Make sure the very basics of geo arrays are sane by creating a few multi location docs
-t = db.geoarray;
+let t = db.geoarray;
function test(index) {
t.drop();
diff --git a/jstests/core/index/geo/geo_array2.js b/jstests/core/index/geo/geo_array2.js
index 7da56a576eb6e..7e09608c07ebe 100644
--- a/jstests/core/index/geo/geo_array2.js
+++ b/jstests/core/index/geo/geo_array2.js
@@ -1,6 +1,6 @@
// Check the semantics of near calls with multiple locations
-t = db.geoarray2;
+let t = db.geoarray2;
t.drop();
var numObjs = 10;
@@ -12,7 +12,7 @@ Random.setRandomSeed();
for (var i = -1; i < 2; i++) {
for (var j = -1; j < 2; j++) {
- locObj = [];
+ let locObj = [];
if (i != 0 || j != 0)
locObj.push({x: i * 50 + Random.rand(), y: j * 50 + Random.rand()});
@@ -28,7 +28,7 @@ assert.commandWorked(t.createIndex({loc: "2d", type: 1}));
print("Starting testing phase... ");
-for (var t = 0; t < 2; t++) {
+for (let t = 0; t < 2; t++) {
var type = t == 0 ? "A" : "B";
for (var i = -1; i < 2; i++) {
diff --git a/jstests/core/index/geo/geo_big_polygon2.js b/jstests/core/index/geo/geo_big_polygon2.js
index 389bc9ef7bcf3..a4dc09a132e88 100644
--- a/jstests/core/index/geo/geo_big_polygon2.js
+++ b/jstests/core/index/geo/geo_big_polygon2.js
@@ -461,9 +461,9 @@ function nGonGenerator(N, D, clockwise, LON, LAT) {
lat = (-D / 2);
}
lon = Math.sqrt((D / 2) * (D / 2) - (lat * lat));
- newlat = lat + LAT;
- newlon = lon + LON;
- conjugateLon = LON - lon;
+ let newlat = lat + LAT;
+ let newlon = lon + LON;
+ let conjugateLon = LON - lon;
pts[i] = [newlon, newlat];
pts[N - i] = [conjugateLon, newlat];
}
diff --git a/jstests/core/index/geo/geo_borders.js b/jstests/core/index/geo/geo_borders.js
index 897697a88cb33..09b89c0d753d5 100644
--- a/jstests/core/index/geo/geo_borders.js
+++ b/jstests/core/index/geo/geo_borders.js
@@ -4,16 +4,16 @@
* ]
*/
-t = db.borders;
+let t = db.borders;
t.drop();
-epsilon = 0.0001;
+let epsilon = 0.0001;
// For these tests, *required* that step ends exactly on max
-min = -1;
-max = 1;
-step = 1;
-numItems = 0;
+let min = -1;
+let max = 1;
+let step = 1;
+let numItems = 0;
for (var x = min; x <= max; x += step) {
for (var y = min; y <= max; y += step) {
@@ -22,8 +22,8 @@ for (var x = min; x <= max; x += step) {
}
}
-overallMin = -1;
-overallMax = 1;
+let overallMin = -1;
+let overallMax = 1;
// Create a point index slightly smaller than the points we have
var res =
@@ -113,14 +113,14 @@ assert.eq(numItems, t.find({
// Circle tests
// **************
-center = (overallMax + overallMin) / 2;
+let center = (overallMax + overallMin) / 2;
center = [center, center];
-radius = overallMax;
+let radius = overallMax;
-offCenter = [center[0] + radius, center[1] + radius];
-onBounds = [offCenter[0] + epsilon, offCenter[1] + epsilon];
-offBounds = [onBounds[0] + epsilon, onBounds[1] + epsilon];
-onBoundsNeg = [-onBounds[0], -onBounds[1]];
+let offCenter = [center[0] + radius, center[1] + radius];
+let onBounds = [offCenter[0] + epsilon, offCenter[1] + epsilon];
+let offBounds = [onBounds[0] + epsilon, onBounds[1] + epsilon];
+let onBoundsNeg = [-onBounds[0], -onBounds[1]];
// Make sure we can get all points when radius is exactly at full bounds
assert.lt(0, t.find({loc: {$within: {$center: [center, radius + epsilon]}}}).count());
@@ -133,7 +133,7 @@ assert.lt(0, t.find({loc: {$within: {$center: [offCenter, radius + 2 * epsilon]}
// Make sure we get correct corner point when center is in bounds
// (x bounds wrap, so could get other corner)
-cornerPt = t.findOne({loc: {$within: {$center: [offCenter, step / 2]}}});
+let cornerPt = t.findOne({loc: {$within: {$center: [offCenter, step / 2]}}});
assert.eq(cornerPt.loc.y, overallMax);
// Make sure we get correct corner point when center is on bounds
diff --git a/jstests/core/index/geo/geo_box1.js b/jstests/core/index/geo/geo_box1.js
index d0c4ff50bdff6..40a754d64b3a8 100644
--- a/jstests/core/index/geo/geo_box1.js
+++ b/jstests/core/index/geo/geo_box1.js
@@ -2,32 +2,32 @@
// requires_getmore,
// ]
-t = db.geo_box1;
+let t = db.geo_box1;
t.drop();
-num = 0;
-for (x = 0; x <= 20; x++) {
- for (y = 0; y <= 20; y++) {
- o = {_id: num++, loc: [x, y]};
+let num = 0;
+for (let x = 0; x <= 20; x++) {
+ for (let y = 0; y <= 20; y++) {
+ let o = {_id: num++, loc: [x, y]};
t.save(o);
}
}
t.createIndex({loc: "2d"});
-searches = [
+let searches = [
[[1, 2], [4, 5]],
[[1, 1], [2, 2]],
[[0, 2], [4, 5]],
[[1, 1], [2, 8]],
];
-for (i = 0; i < searches.length; i++) {
- b = searches[i];
+for (let i = 0; i < searches.length; i++) {
+ let b = searches[i];
// printjson( b );
- q = {loc: {$within: {$box: b}}};
- numWanetd = (1 + b[1][0] - b[0][0]) * (1 + b[1][1] - b[0][1]);
+ let q = {loc: {$within: {$box: b}}};
+ let numWanetd = (1 + b[1][0] - b[0][0]) * (1 + b[1][1] - b[0][1]);
assert.eq(numWanetd, t.find(q).itcount(), "itcount: " + tojson(q));
printjson(t.find(q).explain());
}
diff --git a/jstests/core/index/geo/geo_box1_noindex.js b/jstests/core/index/geo/geo_box1_noindex.js
index 879f17e300288..3da72a96b4478 100644
--- a/jstests/core/index/geo/geo_box1_noindex.js
+++ b/jstests/core/index/geo/geo_box1_noindex.js
@@ -3,28 +3,28 @@
// ]
// SERVER-7343: allow $within without a geo index.
-t = db.geo_box1_noindex;
+let t = db.geo_box1_noindex;
t.drop();
-num = 0;
-for (x = 0; x <= 20; x++) {
- for (y = 0; y <= 20; y++) {
- o = {_id: num++, loc: [x, y]};
+let num = 0;
+for (let x = 0; x <= 20; x++) {
+ for (let y = 0; y <= 20; y++) {
+ let o = {_id: num++, loc: [x, y]};
t.save(o);
}
}
-searches = [
+let searches = [
[[1, 2], [4, 5]],
[[1, 1], [2, 2]],
[[0, 2], [4, 5]],
[[1, 1], [2, 8]],
];
-for (i = 0; i < searches.length; i++) {
- b = searches[i];
- q = {loc: {$within: {$box: b}}};
- numWanted = (1 + b[1][0] - b[0][0]) * (1 + b[1][1] - b[0][1]);
+for (let i = 0; i < searches.length; i++) {
+ let b = searches[i];
+ let q = {loc: {$within: {$box: b}}};
+ let numWanted = (1 + b[1][0] - b[0][0]) * (1 + b[1][1] - b[0][1]);
assert.eq(numWanted, t.find(q).itcount(), "itcount: " + tojson(q));
printjson(t.find(q).explain());
}
diff --git a/jstests/core/index/geo/geo_box2.js b/jstests/core/index/geo/geo_box2.js
index 7120cfd3e4a16..60c76603b5b3a 100644
--- a/jstests/core/index/geo/geo_box2.js
+++ b/jstests/core/index/geo/geo_box2.js
@@ -1,9 +1,9 @@
-t = db.geo_box2;
+let t = db.geo_box2;
t.drop();
-for (i = 1; i < 10; i++) {
- for (j = 1; j < 10; j++) {
+for (let i = 1; i < 10; i++) {
+ for (let j = 1; j < 10; j++) {
t.insert({loc: [i, j]});
}
}
diff --git a/jstests/core/index/geo/geo_box3.js b/jstests/core/index/geo/geo_box3.js
index 4a91ffb0d1d19..3ca3d4b82e35d 100644
--- a/jstests/core/index/geo/geo_box3.js
+++ b/jstests/core/index/geo/geo_box3.js
@@ -4,7 +4,7 @@
// bounding box.
// This is the bug reported in SERVER-994.
-t = db.geo_box3;
+let t = db.geo_box3;
t.drop();
t.insert({point: {x: -15000000, y: 10000000}});
t.createIndex({point: "2d"}, {min: -21000000, max: 21000000});
diff --git a/jstests/core/index/geo/geo_center_sphere1.js b/jstests/core/index/geo/geo_center_sphere1.js
index 2b37c7c3f96ff..c3cdb5aa7d076 100644
--- a/jstests/core/index/geo/geo_center_sphere1.js
+++ b/jstests/core/index/geo/geo_center_sphere1.js
@@ -3,13 +3,14 @@
// requires_fastcount,
// ]
-t = db.geo_center_sphere1;
+let t = db.geo_center_sphere1;
function test(index) {
t.drop();
- skip = 8; // lower for more rigor, higher for more speed (tested with .5, .678, 1, 2, 3, and 4)
+ let skip =
+ 8; // lower for more rigor, higher for more speed (tested with .5, .678, 1, 2, 3, and 4)
- searches = [
+ let searches = [
// x , y rad
[[5, 0], 0.05], // ~200 miles
[[135, 0], 0.05],
@@ -23,18 +24,18 @@ function test(index) {
[[-20, 60], 0.25],
[[-20, -70], 0.25],
];
- correct = searches.map(function(z) {
+ let correct = searches.map(function(z) {
return [];
});
- num = 0;
+ let num = 0;
var bulk = t.initializeUnorderedBulkOp();
- for (x = -179; x <= 179; x += skip) {
- for (y = -89; y <= 89; y += skip) {
- o = {_id: num++, loc: [x, y]};
+ for (let x = -179; x <= 179; x += skip) {
+ for (let y = -89; y <= 89; y += skip) {
+ let o = {_id: num++, loc: [x, y]};
bulk.insert(o);
- for (i = 0; i < searches.length; i++) {
+ for (let i = 0; i < searches.length; i++) {
if (Geo.sphereDistance([x, y], searches[i][0]) <= searches[i][1])
correct[i].push(o);
}
@@ -47,10 +48,10 @@ function test(index) {
t.createIndex({loc: index});
}
- for (i = 0; i < searches.length; i++) {
+ for (let i = 0; i < searches.length; i++) {
print('------------');
print(tojson(searches[i]) + "\t" + correct[i].length);
- q = {loc: {$within: {$centerSphere: searches[i]}}};
+ let q = {loc: {$within: {$centerSphere: searches[i]}}};
// correct[i].forEach( printjson )
// printjson( q );
@@ -69,8 +70,8 @@ function test(index) {
return z._id;
});
- missing = [];
- epsilon = 0.001; // allow tenth of a percent error due to conversions
+ let missing = [];
+ let epsilon = 0.001; // allow tenth of a percent error due to conversions
for (var j = 0; j < x.length; j++) {
if (!Array.contains(y, x[j])) {
missing.push(x[j]);
diff --git a/jstests/core/index/geo/geo_center_sphere2.js b/jstests/core/index/geo/geo_center_sphere2.js
index 1c59850d84182..ecf3622217c9e 100644
--- a/jstests/core/index/geo/geo_center_sphere2.js
+++ b/jstests/core/index/geo/geo_center_sphere2.js
@@ -18,8 +18,8 @@ function computexscandist(y, maxDistDegrees) {
}
function pointIsOK(startPoint, radius) {
- yscandist = rad2deg(radius) + 0.01;
- xscandist = computexscandist(startPoint[1], yscandist);
+ let yscandist = rad2deg(radius) + 0.01;
+ let xscandist = computexscandist(startPoint[1], yscandist);
return (startPoint[0] + xscandist < 180) && (startPoint[0] - xscandist > -180) &&
(startPoint[1] + yscandist < 90) && (startPoint[1] - yscandist > -90);
}
diff --git a/jstests/core/index/geo/geo_circle2.js b/jstests/core/index/geo/geo_circle2.js
index d7947f96502eb..1d228db988ca8 100644
--- a/jstests/core/index/geo/geo_circle2.js
+++ b/jstests/core/index/geo/geo_circle2.js
@@ -1,4 +1,4 @@
-t = db.geo_circle2;
+let t = db.geo_circle2;
t.drop();
t.createIndex({loc: "2d", categories: 1}, {"name": "placesIdx", "min": -100, "max": 100});
diff --git a/jstests/core/index/geo/geo_circle3.js b/jstests/core/index/geo/geo_circle3.js
index da7a9af6b34fe..9466f1e0c5f96 100644
--- a/jstests/core/index/geo/geo_circle3.js
+++ b/jstests/core/index/geo/geo_circle3.js
@@ -1,7 +1,7 @@
// SERVER-848 and SERVER-1191.
db.places.drop();
-n = 0;
+let n = 0;
db.places.save({"_id": n++, "loc": {"x": 4.9999, "y": 52}});
db.places.save({"_id": n++, "loc": {"x": 5, "y": 52}});
db.places.save({"_id": n++, "loc": {"x": 5.0001, "y": 52}});
@@ -12,8 +12,8 @@ db.places.save({"_id": n++, "loc": {"x": 5.0001, "y": 52.0001}});
db.places.save({"_id": n++, "loc": {"x": 4.9999, "y": 51.9999}});
db.places.save({"_id": n++, "loc": {"x": 5.0001, "y": 51.9999}});
db.places.createIndex({loc: "2d"});
-radius = 0.0001;
-center = [5, 52];
+let radius = 0.0001;
+let center = [5, 52];
// print(db.places.find({"loc" : {"$within" : {"$center" : [center, radius]}}}).count())
// FIXME: we want an assert, e.g., that there be 5 answers in the find().
db.places.find({"loc": {"$within": {"$center": [center, radius]}}}).forEach(printjson);
diff --git a/jstests/core/index/geo/geo_circle4.js b/jstests/core/index/geo/geo_circle4.js
index c2194142795e0..50cff15512ffe 100644
--- a/jstests/core/index/geo/geo_circle4.js
+++ b/jstests/core/index/geo/geo_circle4.js
@@ -2,8 +2,8 @@
function test(index) {
db.server848.drop();
- radius = 0.0001;
- center = [5, 52];
+ let radius = 0.0001;
+ let center = [5, 52];
db.server848.save({"_id": 1, "loc": {"x": 4.9999, "y": 52}});
db.server848.save({"_id": 2, "loc": {"x": 5, "y": 52}});
@@ -17,12 +17,12 @@ function test(index) {
if (index) {
db.server848.createIndex({loc: "2d"});
}
- r = db.server848.find({"loc": {"$within": {"$center": [center, radius]}}}, {_id: 1});
+ let r = db.server848.find({"loc": {"$within": {"$center": [center, radius]}}}, {_id: 1});
assert.eq(5, r.count(), "A1");
// FIXME: surely code like this belongs in utils.js.
- a = r.toArray();
- x = [];
- for (k in a) {
+ let a = r.toArray();
+ let x = [];
+ for (let k in a) {
x.push(a[k]["_id"]);
}
x.sort();
diff --git a/jstests/core/index/geo/geo_circle5.js b/jstests/core/index/geo/geo_circle5.js
index 27b973a8edeb1..4f43d8b3becbb 100644
--- a/jstests/core/index/geo/geo_circle5.js
+++ b/jstests/core/index/geo/geo_circle5.js
@@ -12,9 +12,9 @@ db.server1238.createIndex({loc: "2d"}, {min: -21000000, max: 21000000});
db.server1238.save({loc: [5000000, 900000], id: 3});
db.server1238.save({loc: [5000000, 900000], id: 4});
-c1 = db.server1238.find({"loc": {"$within": {"$center": [[5000000, 900000], 1.0]}}}).count();
+let c1 = db.server1238.find({"loc": {"$within": {"$center": [[5000000, 900000], 1.0]}}}).count();
-c2 = db.server1238.find({"loc": {"$within": {"$center": [[5000001, 900000], 5.0]}}}).count();
+let c2 = db.server1238.find({"loc": {"$within": {"$center": [[5000001, 900000], 5.0]}}}).count();
assert.eq(4, c1, "A1");
assert.eq(c1, c2, "B1");
diff --git a/jstests/core/index/geo/geo_exactfetch.js b/jstests/core/index/geo/geo_exactfetch.js
index 43ef46fb55845..6ced564107f8c 100644
--- a/jstests/core/index/geo/geo_exactfetch.js
+++ b/jstests/core/index/geo/geo_exactfetch.js
@@ -1,5 +1,5 @@
// SERVER-7322
-t = db.geo_exactfetch;
+let t = db.geo_exactfetch;
t.drop();
function test(indexname) {
diff --git a/jstests/core/index/geo/geo_fiddly_box.js b/jstests/core/index/geo/geo_fiddly_box.js
index 9f5a9e8d6c41f..84e58aa60e32a 100644
--- a/jstests/core/index/geo/geo_fiddly_box.js
+++ b/jstests/core/index/geo/geo_fiddly_box.js
@@ -8,7 +8,7 @@
// "expand" portion of the geo-lookup expands the 2d range in only one
// direction (so points are required on either side of the expanding range)
-t = db.geo_fiddly_box;
+let t = db.geo_fiddly_box;
t.drop();
t.createIndex({loc: "2d"});
@@ -28,11 +28,11 @@ assert.eq(
// Test normal lookup of a small square of points as a sanity check.
-epsilon = 0.0001;
-min = -1;
-max = 1;
-step = 1;
-numItems = 0;
+let epsilon = 0.0001;
+let min = -1;
+let max = 1;
+let step = 1;
+let numItems = 0;
t.drop();
t.createIndex({loc: "2d"}, {max: max + epsilon / 2, min: min - epsilon / 2});
diff --git a/jstests/core/index/geo/geo_fiddly_box2.js b/jstests/core/index/geo/geo_fiddly_box2.js
index a0f87203163ec..c71c4778603bf 100644
--- a/jstests/core/index/geo/geo_fiddly_box2.js
+++ b/jstests/core/index/geo/geo_fiddly_box2.js
@@ -4,7 +4,7 @@
// required to do
// exact lookups on the points to get correct results.
-t = db.geo_fiddly_box2;
+let t = db.geo_fiddly_box2;
t.drop();
t.insert({"letter": "S", "position": [-3, 0]});
@@ -17,7 +17,7 @@ t.insert({"letter": "L", "position": [3, 0]});
t.insert({"letter": "E", "position": [4, 0]});
t.createIndex({position: "2d"});
-result = t.find({"position": {"$within": {"$box": [[-3, -1], [0, 1]]}}});
+let result = t.find({"position": {"$within": {"$box": [[-3, -1], [0, 1]]}}});
assert.eq(4, result.count());
t.dropIndex({position: "2d"});
diff --git a/jstests/core/index/geo/geo_invalid_polygon.js b/jstests/core/index/geo/geo_invalid_polygon.js
index 0eab7ca5406cc..7736f26073185 100644
--- a/jstests/core/index/geo/geo_invalid_polygon.js
+++ b/jstests/core/index/geo/geo_invalid_polygon.js
@@ -1,6 +1,6 @@
// With invalid geometry, error message should include _id
// SERVER-8992
-t = db.geo_invalid_polygon;
+let t = db.geo_invalid_polygon;
t.drop();
// Self-intersecting polygon, triggers
diff --git a/jstests/core/index/geo/geo_max.js b/jstests/core/index/geo/geo_max.js
index 3ef6e39dcaa06..8db0cbb5858c5 100644
--- a/jstests/core/index/geo/geo_max.js
+++ b/jstests/core/index/geo/geo_max.js
@@ -16,10 +16,10 @@ test.t.insert({loc: [-180, 0]});
test.t.insert({loc: [179.999, 0]});
test.t.insert({loc: [-179.999, 0]});
-assertXIsNegative = function(obj) {
+let assertXIsNegative = function(obj) {
assert.lt(obj.loc[0], 0);
};
-assertXIsPositive = function(obj) {
+let assertXIsPositive = function(obj) {
assert.gt(obj.loc[0], 0);
};
diff --git a/jstests/core/index/geo/geo_multikey0.js b/jstests/core/index/geo/geo_multikey0.js
index 27075a779ee90..8296dcd6854c6 100644
--- a/jstests/core/index/geo/geo_multikey0.js
+++ b/jstests/core/index/geo/geo_multikey0.js
@@ -1,6 +1,6 @@
// Multikey geo values tests - SERVER-3793.
-t = db.jstests_geo_multikey0;
+let t = db.jstests_geo_multikey0;
t.drop();
// Check that conflicting constraints are satisfied by parallel array elements.
diff --git a/jstests/core/index/geo/geo_multikey1.js b/jstests/core/index/geo/geo_multikey1.js
index e6d0ec086da91..3bdbea422726a 100644
--- a/jstests/core/index/geo/geo_multikey1.js
+++ b/jstests/core/index/geo/geo_multikey1.js
@@ -1,11 +1,11 @@
// Multikey geo index tests with parallel arrays.
-t = db.jstests_geo_multikey1;
+let t = db.jstests_geo_multikey1;
t.drop();
-locArr = [];
-arr = [];
-for (i = 0; i < 10; ++i) {
+let locArr = [];
+let arr = [];
+for (let i = 0; i < 10; ++i) {
locArr.push([i, i + 1]);
arr.push(i);
}
diff --git a/jstests/core/index/geo/geo_multinest0.js b/jstests/core/index/geo/geo_multinest0.js
index 746a530e19b0d..11111f17a5515 100644
--- a/jstests/core/index/geo/geo_multinest0.js
+++ b/jstests/core/index/geo/geo_multinest0.js
@@ -6,7 +6,7 @@
// Make sure nesting of location arrays also works.
-t = db.geonest;
+let t = db.geonest;
t.drop();
t.insert({zip: "10001", data: [{loc: [10, 10], type: "home"}, {loc: [50, 50], type: "work"}]});
diff --git a/jstests/core/index/geo/geo_multinest1.js b/jstests/core/index/geo/geo_multinest1.js
index b6aa53cae168c..4cc829e79ba6c 100644
--- a/jstests/core/index/geo/geo_multinest1.js
+++ b/jstests/core/index/geo/geo_multinest1.js
@@ -6,7 +6,7 @@
// Test distance queries with interleaved distances
-t = db.multinest;
+let t = db.multinest;
t.drop();
t.insert({zip: "10001", data: [{loc: [10, 10], type: "home"}, {loc: [29, 29], type: "work"}]});
diff --git a/jstests/core/index/geo/geo_near_random1.js b/jstests/core/index/geo/geo_near_random1.js
index 1573e72c1f816..b1f5457bdef52 100644
--- a/jstests/core/index/geo/geo_near_random1.js
+++ b/jstests/core/index/geo/geo_near_random1.js
@@ -18,9 +18,7 @@ test.testPt(test.mkPt());
test.testPt(test.mkPt());
test.testPt(test.mkPt());
-opts = {
- sphere: 1
-};
+let opts = {sphere: 1};
// Test $nearSphere with a 2d index
test.testPt([0, 0], opts);
diff --git a/jstests/core/index/geo/geo_near_random2.js b/jstests/core/index/geo/geo_near_random2.js
index a9e242edbcca2..257b27f4f314d 100644
--- a/jstests/core/index/geo/geo_near_random2.js
+++ b/jstests/core/index/geo/geo_near_random2.js
@@ -18,10 +18,7 @@ test.insertPts(5000);
// distances are in increasing order. The test runs in O(N^2).
// Test $near with 2d index
-opts = {
- sphere: 0,
- nToTest: test.nPts * 0.01
-};
+let opts = {sphere: 0, nToTest: test.nPts * 0.01};
test.testPt([0, 0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
diff --git a/jstests/core/index/geo/geo_or.js b/jstests/core/index/geo/geo_or.js
index cee91d4929dfc..a8fe7ae7b5e1c 100644
--- a/jstests/core/index/geo/geo_or.js
+++ b/jstests/core/index/geo/geo_or.js
@@ -1,6 +1,6 @@
// multiple geo clauses with $or
-t = db.geoor;
+let t = db.geoor;
t.drop();
diff --git a/jstests/core/index/geo/geo_poly_line.js b/jstests/core/index/geo/geo_poly_line.js
index 85dc927415827..58a1ae215f899 100644
--- a/jstests/core/index/geo/geo_poly_line.js
+++ b/jstests/core/index/geo/geo_poly_line.js
@@ -1,6 +1,6 @@
// Test that weird polygons work SERVER-3725
-t = db.geo_polygon5;
+let t = db.geo_polygon5;
t.drop();
t.insert({loc: [0, 0]});
diff --git a/jstests/core/index/geo/geo_polygon1_noindex.js b/jstests/core/index/geo/geo_polygon1_noindex.js
index 5f43f736b45b4..8b441c81dc409 100644
--- a/jstests/core/index/geo/geo_polygon1_noindex.js
+++ b/jstests/core/index/geo/geo_polygon1_noindex.js
@@ -1,22 +1,22 @@
// SERVER-7343: allow $within without a geo index.
-t = db.geo_polygon1_noindex;
+let t = db.geo_polygon1_noindex;
t.drop();
-num = 0;
-for (x = 1; x < 9; x++) {
- for (y = 1; y < 9; y++) {
- o = {_id: num++, loc: [x, y]};
+let num = 0;
+for (let x = 1; x < 9; x++) {
+ for (let y = 1; y < 9; y++) {
+ let o = {_id: num++, loc: [x, y]};
t.save(o);
}
}
-triangle = [[0, 0], [1, 1], [0, 2]];
+let triangle = [[0, 0], [1, 1], [0, 2]];
// Look at only a small slice of the data within a triangle
assert.eq(1, t.find({loc: {"$within": {"$polygon": triangle}}}).count(), "Triangle Test");
-boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]];
+let boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]];
assert.eq(num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(), "Bounding Box Test");
@@ -33,7 +33,7 @@ assert.eq(
t.drop();
-pacman = [
+let pacman = [
[0, 2],
[0, 4],
[2, 6],
diff --git a/jstests/core/index/geo/geo_polygon2.js b/jstests/core/index/geo/geo_polygon2.js
index 7a6ad0aedb53e..0452b28f5631b 100644
--- a/jstests/core/index/geo/geo_polygon2.js
+++ b/jstests/core/index/geo/geo_polygon2.js
@@ -29,8 +29,8 @@ for (var test = 0; test < numTests; test++) {
x = x[0];
}
- xp = x * Math.cos(rotation) - y * Math.sin(rotation);
- yp = y * Math.cos(rotation) + x * Math.sin(rotation);
+ let xp = x * Math.cos(rotation) - y * Math.sin(rotation);
+ let yp = y * Math.cos(rotation) + x * Math.sin(rotation);
var scaleX = (bounds[1] - bounds[0]) / 360;
var scaleY = (bounds[1] - bounds[0]) / 360;
@@ -121,12 +121,13 @@ for (var test = 0; test < numTests; test++) {
}
}
- turtlePaths = [];
+ let turtlePaths = [];
for (var t = 0; t < numTurtles; t++) {
- turtlePath = [];
+ let turtlePath = [];
var nextSeg = function(currTurtle, prevTurtle) {
var pathX = currTurtle[0];
+ let pathY;
if (currTurtle[1] < prevTurtle[1]) {
pathX = currTurtle[0] + 1;
@@ -150,15 +151,15 @@ for (var test = 0; test < numTests; test++) {
};
for (var s = 1; s < turtles[t].length; s++) {
- currTurtle = turtles[t][s];
- prevTurtle = turtles[t][s - 1];
+ let currTurtle = turtles[t][s];
+ let prevTurtle = turtles[t][s - 1];
turtlePath.push(nextSeg(currTurtle, prevTurtle));
}
for (var s = turtles[t].length - 2; s >= 0; s--) {
- currTurtle = turtles[t][s];
- prevTurtle = turtles[t][s + 1];
+ let currTurtle = turtles[t][s];
+ let prevTurtle = turtles[t][s + 1];
turtlePath.push(nextSeg(currTurtle, prevTurtle));
}
@@ -169,7 +170,7 @@ for (var test = 0; test < numTests; test++) {
var lastTurtle = turtles[t][turtles[t].length - 1];
grid[lastTurtle[0]][lastTurtle[1]] = undefined;
- fixedTurtlePath = [];
+ let fixedTurtlePath = [];
for (var s = 1; s < turtlePath.length; s++) {
if (turtlePath[s - 1][0] == turtlePath[s][0] &&
turtlePath[s - 1][1] == turtlePath[s][1]) {
@@ -236,7 +237,7 @@ for (var test = 0; test < numTests; test++) {
t.insert({loc: allPointsIn});
t.insert({loc: allPointsOut});
- allPoints = allPointsIn.concat(allPointsOut);
+ let allPoints = allPointsIn.concat(allPointsOut);
t.insert({loc: allPoints});
print("Points : ");
diff --git a/jstests/core/index/geo/geo_queryoptimizer.js b/jstests/core/index/geo/geo_queryoptimizer.js
index 199cedf5330f9..a9c3378925f0a 100644
--- a/jstests/core/index/geo/geo_queryoptimizer.js
+++ b/jstests/core/index/geo/geo_queryoptimizer.js
@@ -1,4 +1,4 @@
-t = db.geo_qo1;
+let t = db.geo_qo1;
t.drop();
t.createIndex({loc: "2d"});
diff --git a/jstests/core/index/geo/geo_regex0.js b/jstests/core/index/geo/geo_regex0.js
index 7629c193ea2a9..1a7ef2ffde68e 100644
--- a/jstests/core/index/geo/geo_regex0.js
+++ b/jstests/core/index/geo/geo_regex0.js
@@ -1,22 +1,15 @@
// From SERVER-2247
// Tests to make sure regex works with geo indices
-t = db.regex0;
+let t = db.regex0;
t.drop();
t.createIndex({point: '2d', words: 1});
t.insert({point: [1, 1], words: ['foo', 'bar']});
-regex = {
- words: /^f/
-};
-geo = {
- point: {$near: [1, 1]}
-};
-both = {
- point: {$near: [1, 1]},
- words: /^f/
-};
+let regex = {words: /^f/};
+let geo = {point: {$near: [1, 1]}};
+let both = {point: {$near: [1, 1]}, words: /^f/};
assert.eq(1, t.find(regex).count());
assert.eq(1, t.find(geo).count());
diff --git a/jstests/core/index/geo/geo_s2dedupnear.js b/jstests/core/index/geo/geo_s2dedupnear.js
index ad1674f1e6e93..89a1c705c7194 100644
--- a/jstests/core/index/geo/geo_s2dedupnear.js
+++ b/jstests/core/index/geo/geo_s2dedupnear.js
@@ -1,6 +1,6 @@
// Make sure that we don't return several of the same result due to faulty
// assumptions about the btree cursor. That is, don't return duplicate results.
-t = db.geo_s2dedupnear;
+let t = db.geo_s2dedupnear;
t.drop();
t.createIndex({geo: "2dsphere"});
@@ -9,5 +9,5 @@ var x = {
"coordinates": [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]]]
};
t.insert({geo: x});
-res = t.find({geo: {$geoNear: {"type": "Point", "coordinates": [31, 41]}}});
+let res = t.find({geo: {$geoNear: {"type": "Point", "coordinates": [31, 41]}}});
assert.eq(res.itcount(), 1);
diff --git a/jstests/core/index/geo/geo_s2dupe_points.js b/jstests/core/index/geo/geo_s2dupe_points.js
index 9f94f69caaa0d..54f9b3b3fc80f 100644
--- a/jstests/core/index/geo/geo_s2dupe_points.js
+++ b/jstests/core/index/geo/geo_s2dupe_points.js
@@ -2,7 +2,7 @@
// s2 rejects shapes with duplicate adjacent points as invalid, but they are
// valid in GeoJSON. We store the duplicates, but internally remove them
// before indexing or querying.
-t = db.geo_s2dupe_points;
+let t = db.geo_s2dupe_points;
t.drop();
t.createIndex({geo: "2dsphere"});
diff --git a/jstests/core/index/geo/geo_s2edgecases.js b/jstests/core/index/geo/geo_s2edgecases.js
index 6e4633f58c8c3..b083ea88ae092 100644
--- a/jstests/core/index/geo/geo_s2edgecases.js
+++ b/jstests/core/index/geo/geo_s2edgecases.js
@@ -1,51 +1,42 @@
-t = db.geo_s2edgecases;
+let t = db.geo_s2edgecases;
t.drop();
-roundworldpoint = {
- "type": "Point",
- "coordinates": [180, 0]
-};
+let roundworldpoint = {"type": "Point", "coordinates": [180, 0]};
// Opposite the equator
-roundworld = {
+let roundworld = {
"type": "Polygon",
"coordinates": [[[179, 1], [-179, 1], [-179, -1], [179, -1], [179, 1]]]
};
t.insert({geo: roundworld});
-roundworld2 = {
+let roundworld2 = {
"type": "Polygon",
"coordinates": [[[179, 1], [179, -1], [-179, -1], [-179, 1], [179, 1]]]
};
t.insert({geo: roundworld2});
// North pole
-santapoint = {
- "type": "Point",
- "coordinates": [180, 90]
-};
-santa = {
+let santapoint = {"type": "Point", "coordinates": [180, 90]};
+let santa = {
"type": "Polygon",
"coordinates": [[[179, 89], [179, 90], [-179, 90], [-179, 89], [179, 89]]]
};
t.insert({geo: santa});
-santa2 = {
+let santa2 = {
"type": "Polygon",
"coordinates": [[[179, 89], [-179, 89], [-179, 90], [179, 90], [179, 89]]]
};
t.insert({geo: santa2});
// South pole
-penguinpoint = {
- "type": "Point",
- "coordinates": [0, -90]
-};
-penguin1 = {
+let penguinpoint = {"type": "Point", "coordinates": [0, -90]};
+let penguin1 = {
"type": "Polygon",
"coordinates": [[[0, -89], [0, -90], [179, -90], [179, -89], [0, -89]]]
};
t.insert({geo: penguin1});
-penguin2 = {
+let penguin2 = {
"type": "Polygon",
"coordinates": [[[0, -89], [179, -89], [179, -90], [0, -90], [0, -89]]]
};
@@ -53,7 +44,7 @@ t.insert({geo: penguin2});
t.createIndex({geo: "2dsphere", nonGeo: 1});
-res = t.find({"geo": {"$geoIntersects": {"$geometry": roundworldpoint}}});
+let res = t.find({"geo": {"$geoIntersects": {"$geometry": roundworldpoint}}});
assert.eq(res.count(), 2);
res = t.find({"geo": {"$geoIntersects": {"$geometry": santapoint}}});
assert.eq(res.count(), 2);
diff --git a/jstests/core/index/geo/geo_s2exact.js b/jstests/core/index/geo/geo_s2exact.js
index 92ce551d87320..eb563498c07d4 100644
--- a/jstests/core/index/geo/geo_s2exact.js
+++ b/jstests/core/index/geo/geo_s2exact.js
@@ -1,5 +1,5 @@
// Queries on exact geometry should return the exact geometry.
-t = db.geo_s2exact;
+let t = db.geo_s2exact;
t.drop();
function test(geometry) {
@@ -10,20 +10,11 @@ function test(geometry) {
t.dropIndex({geo: "2dsphere"});
}
-pointA = {
- "type": "Point",
- "coordinates": [40, 5]
-};
+let pointA = {"type": "Point", "coordinates": [40, 5]};
test(pointA);
-someline = {
- "type": "LineString",
- "coordinates": [[40, 5], [41, 6]]
-};
+let someline = {"type": "LineString", "coordinates": [[40, 5], [41, 6]]};
test(someline);
-somepoly = {
- "type": "Polygon",
- "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]
-};
+let somepoly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]};
test(somepoly);
diff --git a/jstests/core/index/geo/geo_s2index.js b/jstests/core/index/geo/geo_s2index.js
index af4475a79cbaf..9ffdc2c02a0fe 100644
--- a/jstests/core/index/geo/geo_s2index.js
+++ b/jstests/core/index/geo/geo_s2index.js
@@ -1,56 +1,36 @@
-t = db.geo_s2index;
+let t = db.geo_s2index;
t.drop();
// We internally drop adjacent duplicate points in lines.
-someline = {
- "type": "LineString",
- "coordinates": [[40, 5], [40, 5], [40, 5], [41, 6], [41, 6]]
-};
+let someline = {"type": "LineString", "coordinates": [[40, 5], [40, 5], [40, 5], [41, 6], [41, 6]]};
t.insert({geo: someline, nonGeo: "someline"});
t.createIndex({geo: "2dsphere"});
-foo = t.find({geo: {$geoIntersects: {$geometry: {type: "Point", coordinates: [40, 5]}}}}).next();
+let foo =
+ t.find({geo: {$geoIntersects: {$geometry: {type: "Point", coordinates: [40, 5]}}}}).next();
assert.eq(foo.geo, someline);
t.dropIndex({geo: "2dsphere"});
-pointA = {
- "type": "Point",
- "coordinates": [40, 5]
-};
+let pointA = {"type": "Point", "coordinates": [40, 5]};
t.insert({geo: pointA, nonGeo: "pointA"});
-pointD = {
- "type": "Point",
- "coordinates": [41.001, 6.001]
-};
+let pointD = {"type": "Point", "coordinates": [41.001, 6.001]};
t.insert({geo: pointD, nonGeo: "pointD"});
-pointB = {
- "type": "Point",
- "coordinates": [41, 6]
-};
+let pointB = {"type": "Point", "coordinates": [41, 6]};
t.insert({geo: pointB, nonGeo: "pointB"});
-pointC = {
- "type": "Point",
- "coordinates": [41, 6]
-};
+let pointC = {"type": "Point", "coordinates": [41, 6]};
t.insert({geo: pointC});
// Add a point within the polygon but not on the border. Don't want to be on
// the path of the polyline.
-pointE = {
- "type": "Point",
- "coordinates": [40.6, 5.4]
-};
+let pointE = {"type": "Point", "coordinates": [40.6, 5.4]};
t.insert({geo: pointE});
// Make sure we can index this without error.
t.insert({nonGeo: "noGeoField!"});
-somepoly = {
- "type": "Polygon",
- "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]
-};
+let somepoly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]};
t.insert({geo: somepoly, nonGeo: "somepoly"});
var res = t.createIndex({geo: "2dsphere", nonGeo: 1});
diff --git a/jstests/core/index/geo/geo_s2indexoldformat.js b/jstests/core/index/geo/geo_s2indexoldformat.js
index ffe472464d4af..ad1008115dddb 100644
--- a/jstests/core/index/geo/geo_s2indexoldformat.js
+++ b/jstests/core/index/geo/geo_s2indexoldformat.js
@@ -1,6 +1,6 @@
// Make sure that the 2dsphere index can deal with non-GeoJSON points.
// 2dsphere does not accept legacy shapes, only legacy points.
-t = db.geo_s2indexoldformat;
+let t = db.geo_s2indexoldformat;
t.drop();
t.insert({geo: [40, 5], nonGeo: ["pointA"]});
@@ -11,7 +11,7 @@ t.insert({geo: {x: 40.6, y: 5.4}});
t.createIndex({geo: "2dsphere", nonGeo: 1});
-res = t.find({"geo": {"$geoIntersects": {"$geometry": {x: 40, y: 5}}}});
+let res = t.find({"geo": {"$geoIntersects": {"$geometry": {x: 40, y: 5}}}});
assert.eq(res.count(), 1);
res = t.find({"geo": {"$geoIntersects": {"$geometry": [41, 6]}}});
diff --git a/jstests/core/index/geo/geo_s2largewithin.js b/jstests/core/index/geo/geo_s2largewithin.js
index e1eed8a2e6a98..d94ebc5f0a774 100644
--- a/jstests/core/index/geo/geo_s2largewithin.js
+++ b/jstests/core/index/geo/geo_s2largewithin.js
@@ -1,20 +1,17 @@
// If our $within is enormous, create a coarse covering for the search so it
// doesn't take forever.
-t = db.geo_s2largewithin;
+let t = db.geo_s2largewithin;
t.drop();
t.createIndex({geo: "2dsphere"});
-testPoint = {
- name: "origin",
- geo: {type: "Point", coordinates: [0.0, 0.0]}
-};
+let testPoint = {name: "origin", geo: {type: "Point", coordinates: [0.0, 0.0]}};
-testHorizLine = {
+let testHorizLine = {
name: "horiz",
geo: {type: "LineString", coordinates: [[-2.0, 10.0], [2.0, 10.0]]}
};
-testVertLine = {
+let testVertLine = {
name: "vert",
geo: {type: "LineString", coordinates: [[10.0, -2.0], [10.0, 2.0]]}
};
@@ -25,12 +22,12 @@ t.insert(testVertLine);
// Test a poly that runs horizontally along the equator.
-longPoly = {
+let longPoly = {
type: "Polygon",
coordinates: [[[30.0, 1.0], [-30.0, 1.0], [-30.0, -1.0], [30.0, -1.0], [30.0, 1.0]]]
};
-result = t.find({geo: {$geoWithin: {$geometry: longPoly}}});
+let result = t.find({geo: {$geoWithin: {$geometry: longPoly}}});
assert.eq(result.itcount(), 1);
result = t.find({geo: {$geoWithin: {$geometry: longPoly}}});
assert.eq("origin", result[0].name);
diff --git a/jstests/core/index/geo/geo_s2meridian.js b/jstests/core/index/geo/geo_s2meridian.js
index 99eb0c63dc439..a0196bb5a41aa 100644
--- a/jstests/core/index/geo/geo_s2meridian.js
+++ b/jstests/core/index/geo/geo_s2meridian.js
@@ -1,4 +1,4 @@
-t = db.geo_s2meridian;
+let t = db.geo_s2meridian;
t.drop();
t.createIndex({geo: "2dsphere"});
@@ -8,18 +8,15 @@ t.createIndex({geo: "2dsphere"});
* that runs along the meridian.
*/
-meridianCrossingLine = {
+let meridianCrossingLine = {
geo: {type: "LineString", coordinates: [[-178.0, 10.0], [178.0, 10.0]]}
};
assert.commandWorked(t.insert(meridianCrossingLine));
-lineAlongMeridian = {
- type: "LineString",
- coordinates: [[180.0, 11.0], [180.0, 9.0]]
-};
+let lineAlongMeridian = {type: "LineString", coordinates: [[180.0, 11.0], [180.0, 9.0]]};
-result = t.find({geo: {$geoIntersects: {$geometry: lineAlongMeridian}}});
+let result = t.find({geo: {$geoIntersects: {$geometry: lineAlongMeridian}}});
assert.eq(result.itcount(), 1);
t.drop();
@@ -29,21 +26,15 @@ t.createIndex({geo: "2dsphere"});
* on the meridian, and immediately on either side, and confirm that a poly
* covering all of them returns them all.
*/
-pointOnNegativeSideOfMeridian = {
- geo: {type: "Point", coordinates: [-179.0, 1.0]}
-};
-pointOnMeridian = {
- geo: {type: "Point", coordinates: [180.0, 1.0]}
-};
-pointOnPositiveSideOfMeridian = {
- geo: {type: "Point", coordinates: [179.0, 1.0]}
-};
+let pointOnNegativeSideOfMeridian = {geo: {type: "Point", coordinates: [-179.0, 1.0]}};
+let pointOnMeridian = {geo: {type: "Point", coordinates: [180.0, 1.0]}};
+let pointOnPositiveSideOfMeridian = {geo: {type: "Point", coordinates: [179.0, 1.0]}};
t.insert(pointOnMeridian);
t.insert(pointOnNegativeSideOfMeridian);
t.insert(pointOnPositiveSideOfMeridian);
-meridianCrossingPoly = {
+let meridianCrossingPoly = {
type: "Polygon",
coordinates: [[[-178.0, 10.0], [178.0, 10.0], [178.0, -10.0], [-178.0, -10.0], [-178.0, 10.0]]]
};
@@ -58,15 +49,9 @@ t.createIndex({geo: "2dsphere"});
* closer, but across the meridian, and confirm they both come back, and
* that the order is correct.
*/
-pointOnNegativeSideOfMerid = {
- name: "closer",
- geo: {type: "Point", coordinates: [-179.0, 0.0]}
-};
+let pointOnNegativeSideOfMerid = {name: "closer", geo: {type: "Point", coordinates: [-179.0, 0.0]}};
-pointOnPositiveSideOfMerid = {
- name: "farther",
- geo: {type: "Point", coordinates: [176.0, 0.0]}
-};
+let pointOnPositiveSideOfMerid = {name: "farther", geo: {type: "Point", coordinates: [176.0, 0.0]}};
t.insert(pointOnNegativeSideOfMerid);
t.insert(pointOnPositiveSideOfMerid);
diff --git a/jstests/core/index/geo/geo_s2multi.js b/jstests/core/index/geo/geo_s2multi.js
index 858dfd1efa1d9..3da229ad743ad 100644
--- a/jstests/core/index/geo/geo_s2multi.js
+++ b/jstests/core/index/geo/geo_s2multi.js
@@ -1,22 +1,19 @@
-t = db.geo_s2multi;
+let t = db.geo_s2multi;
t.drop();
t.createIndex({geo: "2dsphere"});
// Let's try the examples in the GeoJSON spec.
-multiPointA = {
- "type": "MultiPoint",
- "coordinates": [[100.0, 0.0], [101.0, 1.0]]
-};
+let multiPointA = {"type": "MultiPoint", "coordinates": [[100.0, 0.0], [101.0, 1.0]]};
assert.commandWorked(t.insert({geo: multiPointA}));
-multiLineStringA = {
+let multiLineStringA = {
"type": "MultiLineString",
"coordinates": [[[100.0, 0.0], [101.0, 1.0]], [[102.0, 2.0], [103.0, 3.0]]]
};
assert.commandWorked(t.insert({geo: multiLineStringA}));
-multiPolygonA = {
+let multiPolygonA = {
"type": "MultiPolygon",
"coordinates": [
[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
@@ -61,7 +58,7 @@ assert.eq(
// Polygon contains itself and the multipoint.
assert.eq(2, t.find({geo: {$geoWithin: {$geometry: multiPolygonA}}}).itcount());
-partialPolygonA = {
+let partialPolygonA = {
"type": "Polygon",
"coordinates": [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]]
};
diff --git a/jstests/core/index/geo/geo_s2near.js b/jstests/core/index/geo/geo_s2near.js
index 2cd732da67a2a..7c584992e8a77 100644
--- a/jstests/core/index/geo/geo_s2near.js
+++ b/jstests/core/index/geo/geo_s2near.js
@@ -4,17 +4,14 @@
// Test 2dsphere near search, called via find and $geoNear.
(function() {
-t = db.geo_s2near;
+let t = db.geo_s2near;
t.drop();
// Make sure that geoNear gives us back loc
-goldenPoint = {
- type: "Point",
- coordinates: [31.0, 41.0]
-};
+let goldenPoint = {type: "Point", coordinates: [31.0, 41.0]};
t.insert({geo: goldenPoint});
t.createIndex({geo: "2dsphere"});
-resNear =
+let resNear =
t.aggregate([
{$geoNear: {near: [30, 40], distanceField: "d", spherical: true, includeLocs: "loc"}},
{$limit: 1}
@@ -25,31 +22,22 @@ assert.eq(resNear[0].loc, goldenPoint);
// FYI:
// One degree of long @ 0 is 111km or so.
// One degree of lat @ 0 is 110km or so.
-lat = 0;
-lng = 0;
-points = 10;
+let lat = 0;
+let lng = 0;
+let points = 10;
for (var x = -points; x < points; x += 1) {
for (var y = -points; y < points; y += 1) {
t.insert({geo: {"type": "Point", "coordinates": [lng + x / 1000.0, lat + y / 1000.0]}});
}
}
-origin = {
- "type": "Point",
- "coordinates": [lng, lat]
-};
+let origin = {"type": "Point", "coordinates": [lng, lat]};
t.createIndex({geo: "2dsphere"});
// Near only works when the query is a point.
-someline = {
- "type": "LineString",
- "coordinates": [[40, 5], [41, 6]]
-};
-somepoly = {
- "type": "Polygon",
- "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]
-};
+let someline = {"type": "LineString", "coordinates": [[40, 5], [41, 6]]};
+let somepoly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]};
assert.throws(function() {
return t.find({"geo": {"$near": {"$geometry": someline}}}).count();
});
@@ -70,7 +58,7 @@ assert.commandFailedWithCode(db.runCommand({
2);
// Do some basic near searches.
-res = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: 2000}}}).limit(10);
+let res = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: 2000}}}).limit(10);
resNear = t.aggregate([
{$geoNear: {near: [0, 0], distanceField: "dis", maxDistance: Math.PI, spherical: true}},
{$limit: 10},
@@ -109,29 +97,30 @@ assert.eq(res.itcount(), resNear.itcount(), ((2 * points) * (2 * points) + 4).to
function testRadAndDegreesOK(distance) {
// Distance for old style points is radians.
- resRadians = t.find({geo: {$nearSphere: [0, 0], $maxDistance: (distance / (6378.1 * 1000))}});
+ let resRadians =
+ t.find({geo: {$nearSphere: [0, 0], $maxDistance: (distance / (6378.1 * 1000))}});
// Distance for new style points is meters.
- resMeters = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: distance}}});
+ let resMeters = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: distance}}});
// And we should get the same # of results no matter what.
assert.eq(resRadians.itcount(), resMeters.itcount());
// Also, $geoNear should behave the same way.
- resGNMeters = t.aggregate({
- $geoNear: {
- near: origin,
- distanceField: "dis",
- maxDistance: distance,
- spherical: true,
- }
- }).toArray();
- resGNRadians = t.aggregate({
- $geoNear: {
- near: [0, 0],
- distanceField: "dis",
- maxDistance: (distance / (6378.1 * 1000)),
- spherical: true,
- }
- }).toArray();
+ let resGNMeters = t.aggregate({
+ $geoNear: {
+ near: origin,
+ distanceField: "dis",
+ maxDistance: distance,
+ spherical: true,
+ }
+ }).toArray();
+ let resGNRadians = t.aggregate({
+ $geoNear: {
+ near: [0, 0],
+ distanceField: "dis",
+ maxDistance: (distance / (6378.1 * 1000)),
+ spherical: true,
+ }
+ }).toArray();
const errmsg = `$geoNear using meter distances returned ${tojson(resGNMeters)}, but ` +
`$geoNear using radian distances returned ${tojson(resGNRadians)}`;
assert.eq(resGNRadians.length, resGNMeters.length, errmsg);
diff --git a/jstests/core/index/geo/geo_s2nearComplex.js b/jstests/core/index/geo/geo_s2nearComplex.js
index 22fe3112499d6..df978d45e6114 100644
--- a/jstests/core/index/geo/geo_s2nearComplex.js
+++ b/jstests/core/index/geo/geo_s2nearComplex.js
@@ -18,7 +18,7 @@ var atan2 = Math.atan2;
var originGeo = {type: "Point", coordinates: [20.0, 20.0]};
// Center point for all tests.
-var origin = {name: "origin", geo: originGeo};
+let origin = {name: "origin", geo: originGeo};
/*
* Convenience function for checking that coordinates match. threshold let's you
@@ -114,7 +114,7 @@ function uniformPointsWithClusters(
origin, count, minDist, maxDist, numberOfClusters, minClusterSize, maxClusterSize, distRatio) {
distRatio = distRatio || 10;
var points = uniformPoints(origin, count, minDist, maxDist);
- for (j = 0; j < numberOfClusters; j++) {
+ for (let j = 0; j < numberOfClusters; j++) {
var randomPoint = points[Math.floor(random() * points.length)];
var clusterSize = (random() * (maxClusterSize - minClusterSize)) + minClusterSize;
uniformPoints(randomPoint, clusterSize, minDist / distRatio, maxDist / distRatio);
@@ -146,18 +146,18 @@ function validateOrdering(query) {
var near30 = t.find(query).limit(30);
var near40 = t.find(query).limit(40);
- for (i = 0; i < 10; i++) {
+ for (let i = 0; i < 10; i++) {
assert(coordinateEqual(near10[i], near20[i]));
assert(coordinateEqual(near10[i], near30[i]));
assert(coordinateEqual(near10[i], near40[i]));
}
- for (i = 0; i < 20; i++) {
+ for (let i = 0; i < 20; i++) {
assert(coordinateEqual(near20[i], near30[i]));
assert(coordinateEqual(near20[i], near40[i]));
}
- for (i = 0; i < 30; i++) {
+ for (let i = 0; i < 30; i++) {
assert(coordinateEqual(near30[i], near40[i]));
}
}
@@ -281,7 +281,7 @@ origin = {
uniformPoints(origin, 10, 89, 90);
-cur = t.find({geo: {$near: {$geometry: originGeo}}});
+let cur = t.find({geo: {$near: {$geometry: originGeo}}});
assert.eq(cur.itcount(), 10);
cur = t.find({geo: {$near: {$geometry: originGeo}}});
@@ -290,5 +290,5 @@ print("Near search on very distant points:");
print(t.find({geo: {$geoNear: {$geometry: originGeo}}})
.explain("executionStats")
.executionStats.executionTimeMillis);
-pt = cur.next();
+let pt = cur.next();
assert(pt);
diff --git a/jstests/core/index/geo/geo_s2nearcorrect.js b/jstests/core/index/geo/geo_s2nearcorrect.js
index 80ece4223d2c8..b21782140be99 100644
--- a/jstests/core/index/geo/geo_s2nearcorrect.js
+++ b/jstests/core/index/geo/geo_s2nearcorrect.js
@@ -2,17 +2,11 @@
// A geometry may have several covers, one of which is in a search ring and the other of which is
// not. If we see the cover that's not in the search ring, we can't mark the object as 'seen' for
// this ring.
-t = db.geo_s2nearcorrect;
+let t = db.geo_s2nearcorrect;
t.drop();
-longline = {
- "type": "LineString",
- "coordinates": [[0, 0], [179, 89]]
-};
+let longline = {"type": "LineString", "coordinates": [[0, 0], [179, 89]]};
t.insert({geo: longline});
t.createIndex({geo: "2dsphere"});
-origin = {
- "type": "Point",
- "coordinates": [45, 45]
-};
+let origin = {"type": "Point", "coordinates": [45, 45]};
assert.eq(1, t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: 20000000}}}).count());
diff --git a/jstests/core/index/geo/geo_s2nongeoarray.js b/jstests/core/index/geo/geo_s2nongeoarray.js
index f5830e8702d60..3e2b0d5eb52cd 100644
--- a/jstests/core/index/geo/geo_s2nongeoarray.js
+++ b/jstests/core/index/geo/geo_s2nongeoarray.js
@@ -1,8 +1,8 @@
// Explode arrays when indexing non-geo fields in 2dsphere, and make sure that
// we find them with queries.
-t = db.geo_s2nongeoarray;
+let t = db.geo_s2nongeoarray;
-oldPoint = [40, 5];
+let oldPoint = [40, 5];
var data = {geo: oldPoint, nonGeo: [123, 456], otherNonGeo: [{b: [1, 2]}, {b: [3, 4]}]};
diff --git a/jstests/core/index/geo/geo_s2nonstring.js b/jstests/core/index/geo/geo_s2nonstring.js
index a76a7cd073e5b..b7f9496a3b976 100644
--- a/jstests/core/index/geo/geo_s2nonstring.js
+++ b/jstests/core/index/geo/geo_s2nonstring.js
@@ -1,5 +1,5 @@
// Added to make sure that S2 indexing's string AND non-string keys work.
-t = db.geo_s2nonstring;
+let t = db.geo_s2nonstring;
t.drop();
t.createIndex({geo: '2dsphere', x: 1});
diff --git a/jstests/core/index/geo/geo_s2nopoints.js b/jstests/core/index/geo/geo_s2nopoints.js
index 7b3a3b970a76f..b27c2ea20343f 100644
--- a/jstests/core/index/geo/geo_s2nopoints.js
+++ b/jstests/core/index/geo/geo_s2nopoints.js
@@ -1,5 +1,5 @@
// See SERVER-7794.
-t = db.geo_s2nopoints;
+let t = db.geo_s2nopoints;
t.drop();
t.createIndex({loc: "2dsphere", x: 1});
diff --git a/jstests/core/index/geo/geo_s2oddshapes.js b/jstests/core/index/geo/geo_s2oddshapes.js
index db30d431caa53..e6610dea12a47 100644
--- a/jstests/core/index/geo/geo_s2oddshapes.js
+++ b/jstests/core/index/geo/geo_s2oddshapes.js
@@ -96,7 +96,7 @@ outsidePoint = {
t.insert(insidePoint);
t.insert(outsidePoint);
-smallPoly = {
+let smallPoly = {
type: "Polygon",
coordinates: [[[0.0, -0.01], [0.015, -0.01], [0.015, 0.01], [0.0, 0.01], [0.0, -0.01]]]
};
diff --git a/jstests/core/index/geo/geo_s2twofields.js b/jstests/core/index/geo/geo_s2twofields.js
index 9f769f6897f57..af7f07f64ebc1 100644
--- a/jstests/core/index/geo/geo_s2twofields.js
+++ b/jstests/core/index/geo/geo_s2twofields.js
@@ -33,7 +33,7 @@ for (var i = 0; i < maxPoints; ++i) {
arr.push(
{from: {type: "Point", coordinates: fromCoord}, to: {type: "Point", coordinates: toCoord}});
}
-res = t.insert(arr);
+let res = t.insert(arr);
assert.commandWorked(res);
assert.eq(t.count(), maxPoints);
diff --git a/jstests/core/index/geo/geo_s2within.js b/jstests/core/index/geo/geo_s2within.js
index 04915c77dcb80..6950cb3e482e6 100644
--- a/jstests/core/index/geo/geo_s2within.js
+++ b/jstests/core/index/geo/geo_s2within.js
@@ -1,18 +1,15 @@
// Test some cases that might be iffy with $within, mostly related to polygon w/holes.
-t = db.geo_s2within;
+let t = db.geo_s2within;
t.drop();
t.createIndex({geo: "2dsphere"});
-somepoly = {
- "type": "Polygon",
- "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]
-};
+let somepoly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]};
t.insert({geo: {"type": "LineString", "coordinates": [[40.1, 5.1], [40.2, 5.2]]}});
// This is only partially contained within the polygon.
t.insert({geo: {"type": "LineString", "coordinates": [[40.1, 5.1], [42, 7]]}});
-res = t.find({"geo": {"$within": {"$geometry": somepoly}}});
+let res = t.find({"geo": {"$within": {"$geometry": somepoly}}});
assert.eq(res.itcount(), 1);
t.drop();
diff --git a/jstests/core/index/geo/geo_sort1.js b/jstests/core/index/geo/geo_sort1.js
index 4d32c2f4ae20e..5f90ea50d60a0 100644
--- a/jstests/core/index/geo/geo_sort1.js
+++ b/jstests/core/index/geo/geo_sort1.js
@@ -1,21 +1,21 @@
-t = db.geo_sort1;
+let t = db.geo_sort1;
t.drop();
-for (x = 0; x < 10; x++) {
- for (y = 0; y < 10; y++) {
+for (let x = 0; x < 10; x++) {
+ for (let y = 0; y < 10; y++) {
t.insert({loc: [x, y], foo: x * x * y});
}
}
t.createIndex({loc: "2d", foo: 1});
-q = t.find({loc: {$near: [5, 5]}, foo: {$gt: 20}});
-m = function(z) {
+let q = t.find({loc: {$near: [5, 5]}, foo: {$gt: 20}});
+let m = function(z) {
return z.foo;
};
-a = q.clone().map(m);
-b = q.clone().sort({foo: 1}).map(m);
+let a = q.clone().map(m);
+let b = q.clone().sort({foo: 1}).map(m);
assert.neq(a, b, "A");
a.sort();
diff --git a/jstests/core/index/geo/geo_uniqueDocs.js b/jstests/core/index/geo/geo_uniqueDocs.js
index 9e0a811e7e77e..418f48691ed69 100644
--- a/jstests/core/index/geo/geo_uniqueDocs.js
+++ b/jstests/core/index/geo/geo_uniqueDocs.js
@@ -1,8 +1,8 @@
// Test uniqueDocs option for $within queries and the $geoNear aggregation stage. SERVER-3139
// SERVER-12120 uniqueDocs is deprecated. Server always returns unique documents.
-collName = 'geo_uniqueDocs_test';
-t = db.geo_uniqueDocs_test;
+let collName = 'geo_uniqueDocs_test';
+let t = db.geo_uniqueDocs_test;
t.drop();
assert.commandWorked(t.save({locs: [[0, 2], [3, 4]]}));
@@ -21,7 +21,8 @@ assert.eq(2,
t.aggregate({$geoNear: {near: [0, 0], distanceField: "dis", uniqueDocs: true}})
.toArray()
.length);
-results = t.aggregate([{$geoNear: {near: [0, 0], distanceField: "dis"}}, {$limit: 2}]).toArray();
+let results =
+ t.aggregate([{$geoNear: {near: [0, 0], distanceField: "dis"}}, {$limit: 2}]).toArray();
assert.eq(2, results.length);
assert.close(2, results[0].dis);
assert.close(10, results[1].dis);
diff --git a/jstests/core/index/geo/geo_update1.js b/jstests/core/index/geo/geo_update1.js
index f982966afd769..67b40a66ffb07 100644
--- a/jstests/core/index/geo/geo_update1.js
+++ b/jstests/core/index/geo/geo_update1.js
@@ -3,7 +3,7 @@
// requires_non_retryable_writes,
// ]
-t = db.geo_update1;
+let t = db.geo_update1;
t.drop();
for (var x = 0; x < 10; x++) {
diff --git a/jstests/core/index/geo/geo_update2.js b/jstests/core/index/geo/geo_update2.js
index 280023ffe442d..4073a5ccd9cf4 100644
--- a/jstests/core/index/geo/geo_update2.js
+++ b/jstests/core/index/geo/geo_update2.js
@@ -3,7 +3,7 @@
// requires_non_retryable_writes,
// ]
-t = db.geo_update2;
+let t = db.geo_update2;
t.drop();
for (var x = 0; x < 10; x++) {
diff --git a/jstests/core/index/geo/geo_update_btree.js b/jstests/core/index/geo/geo_update_btree.js
index 7a58362fe18cf..403af8e5f0c95 100644
--- a/jstests/core/index/geo/geo_update_btree.js
+++ b/jstests/core/index/geo/geo_update_btree.js
@@ -27,7 +27,7 @@ var parallelInsert = startParallelShell(
" db.jstests_geo_update_btree.insert(doc);" +
"}");
-for (i = 0; i < 1000; i++) {
+for (let i = 0; i < 1000; i++) {
coll.update({
loc: {$within: {$center: [[Random.rand() * 180, Random.rand() * 180], Random.rand() * 50]}}
},
diff --git a/jstests/core/index/geo/geo_withinquery.js b/jstests/core/index/geo/geo_withinquery.js
index 13f20c1433fad..c4069d93f4db1 100644
--- a/jstests/core/index/geo/geo_withinquery.js
+++ b/jstests/core/index/geo/geo_withinquery.js
@@ -3,13 +3,13 @@
// ]
// SERVER-7343: allow $within without a geo index.
-t = db.geo_withinquery;
+let t = db.geo_withinquery;
t.drop();
-num = 0;
-for (x = 0; x <= 20; x++) {
- for (y = 0; y <= 20; y++) {
- o = {_id: num++, loc: [x, y]};
+let num = 0;
+for (let x = 0; x <= 20; x++) {
+ for (let y = 0; y <= 20; y++) {
+ let o = {_id: num++, loc: [x, y]};
t.save(o);
}
}
diff --git a/jstests/core/index/geo/geoa.js b/jstests/core/index/geo/geoa.js
index 78cf6c960c944..26bfc10b8cfe3 100644
--- a/jstests/core/index/geo/geoa.js
+++ b/jstests/core/index/geo/geoa.js
@@ -1,4 +1,4 @@
-t = db.geoa;
+let t = db.geoa;
t.drop();
t.save({_id: 1, a: {loc: [5, 5]}});
@@ -7,5 +7,5 @@ t.save({_id: 3, a: {loc: [7, 7]}});
t.createIndex({"a.loc": "2d"});
-cur = t.find({"a.loc": {$near: [6, 6]}});
+let cur = t.find({"a.loc": {$near: [6, 6]}});
assert.eq(2, cur.next()._id, "A1");
diff --git a/jstests/core/index/geo/geoc.js b/jstests/core/index/geo/geoc.js
index 8875cd44614c5..5d8752b95a3e9 100644
--- a/jstests/core/index/geo/geoc.js
+++ b/jstests/core/index/geo/geoc.js
@@ -2,10 +2,10 @@
// requires_getmore,
// ]
-t = db.geoc;
+let t = db.geoc;
t.drop();
-N = 1000;
+let N = 1000;
for (var i = 0; i < N; i++)
t.insert({loc: [100 + Math.random(), 100 + Math.random()], z: 0});
diff --git a/jstests/core/index/geo/geod.js b/jstests/core/index/geo/geod.js
index 8586d64e3981e..27272929f65bb 100644
--- a/jstests/core/index/geo/geod.js
+++ b/jstests/core/index/geo/geod.js
@@ -5,13 +5,13 @@ t.save({loc: [0.5, 0]});
t.createIndex({loc: "2d"});
// do a few geoNears with different maxDistances. The first iteration
// should match no points in the dataset.
-dists = [.49, .51, 1.0];
-for (idx in dists) {
- b = db.geod
- .aggregate([
- {$geoNear: {near: [1, 0], distanceField: "d", maxDistance: dists[idx]}},
- {$limit: 2},
- ])
- .toArray();
+let dists = [.49, .51, 1.0];
+for (let idx in dists) {
+ let b = db.geod
+ .aggregate([
+ {$geoNear: {near: [1, 0], distanceField: "d", maxDistance: dists[idx]}},
+ {$limit: 2},
+ ])
+ .toArray();
assert.eq(b.length, idx, "B" + idx);
}
diff --git a/jstests/core/index/geo/geoe.js b/jstests/core/index/geo/geoe.js
index 84bc34f7a74c4..eac049e057436 100644
--- a/jstests/core/index/geo/geoe.js
+++ b/jstests/core/index/geo/geoe.js
@@ -4,7 +4,7 @@
// the end of the btree and not reverse direction (leaving the rest of
// the search always looking at some random non-matching point).
-t = db.geo_box;
+let t = db.geo_box;
t.drop();
t.insert({"_id": 1, "geo": [33, -11.1]});
@@ -24,7 +24,7 @@ t.insert({"_id": 14, "geo": [-122.289505, 37.695774]});
t.createIndex({geo: "2d"});
-c = t.find({geo: {"$within": {"$box": [[-125.078461, 36.494473], [-120.320648, 38.905199]]}}});
+let c = t.find({geo: {"$within": {"$box": [[-125.078461, 36.494473], [-120.320648, 38.905199]]}}});
assert.eq(11, c.count(), "A1");
c = t.find({geo: {"$within": {"$box": [[-124.078461, 36.494473], [-120.320648, 38.905199]]}}});
diff --git a/jstests/core/index/geo/geof.js b/jstests/core/index/geo/geof.js
index 1f1d9e0cc670b..8dce3f68b6f0d 100644
--- a/jstests/core/index/geo/geof.js
+++ b/jstests/core/index/geo/geof.js
@@ -1,4 +1,4 @@
-t = db.geof;
+let t = db.geof;
t.drop();
// corners (dist ~0.98)
diff --git a/jstests/core/index/geo/geonear_cmd_input_validation.js b/jstests/core/index/geo/geonear_cmd_input_validation.js
index 5b247759db116..7b936c9359a8b 100644
--- a/jstests/core/index/geo/geonear_cmd_input_validation.js
+++ b/jstests/core/index/geo/geonear_cmd_input_validation.js
@@ -10,7 +10,7 @@ t.createIndex({loc: "2dsphere"});
// 2dsphere index with legacy coordinate pair and spherical=false.
var indexTypes = ['2d', '2dsphere'], pointTypes = [{type: 'Point', coordinates: [0, 0]}, [0, 0]],
sphericalOptions = [true, false], optionNames = ['minDistance', 'maxDistance'],
- badNumbers = [-1, undefined, 'foo'];
+ badNumbers = [-1, undefined, 'foo'], unknownArg = 'foo';
indexTypes.forEach(function(indexType) {
t.drop();
@@ -100,6 +100,12 @@ indexTypes.forEach(function(indexType) {
command['distanceMultiplier'] = badNumber;
assert.commandFailed(db.runCommand(command), msg);
});
+
+ // Unknown argument
+ var msg = ("geoNear should've failed with unknown arg " + unknownArg);
+ var command = makeCommand(1);
+ command[unknownArg] = "unknown";
+ assert.commandFailed(db.runCommand(command), msg);
});
});
});
diff --git a/jstests/core/index/geo/geonear_key.js b/jstests/core/index/geo/geonear_key.js
index 1ecce018b081f..6a3cde1c65194 100644
--- a/jstests/core/index/geo/geonear_key.js
+++ b/jstests/core/index/geo/geonear_key.js
@@ -1,11 +1,6 @@
/**
* Tests for the 'key' field accepted by the $geoNear aggregation stage.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-
const coll = db.jstests_geonear_key;
coll.drop();
@@ -97,5 +92,4 @@ assertGeoNearFails({near: {type: "Point", coordinates: [0, 0]}, key: "b.c"},
// -- spherical=false.
// -- The search point is a legacy coordinate pair.
assertGeoNearFails({near: [0, 0], key: "b.d"}, ErrorCodes.NoQueryExecutionPlans);
-assertGeoNearFails({near: [0, 0], key: "b.d", spherical: false}, ErrorCodes.NoQueryExecutionPlans);
-}());
+assertGeoNearFails({near: [0, 0], key: "b.d", spherical: false}, ErrorCodes.NoQueryExecutionPlans);
\ No newline at end of file
diff --git a/jstests/core/index/hashed/hashed_index_collation.js b/jstests/core/index/hashed/hashed_index_collation.js
index ead8b69bbfc3e..d0f92e6978e0f 100644
--- a/jstests/core/index/hashed/hashed_index_collation.js
+++ b/jstests/core/index/hashed/hashed_index_collation.js
@@ -5,11 +5,8 @@
* assumes_unsharded_collection,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq().
-load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand().
+import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js";
const coll = db.hashed_index_collation;
coll.drop();
@@ -110,5 +107,4 @@ validateFindCmdOutputAndPlan({
projection: {"a.e": 1, _id: 0},
expectedStages: ["IXSCAN", "FETCH"],
expectedOutput: [{a: {e: 5}}, {a: {e: 5}}]
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/index/hashed/hashed_index_covered_queries.js b/jstests/core/index/hashed/hashed_index_covered_queries.js
index fa7753a89a56e..31e4acf704ac0 100644
--- a/jstests/core/index/hashed/hashed_index_covered_queries.js
+++ b/jstests/core/index/hashed/hashed_index_covered_queries.js
@@ -8,11 +8,8 @@
* assumes_no_implicit_index_creation,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq().
-load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand().
+import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js";
const coll = db.compound_hashed_index;
coll.drop();
@@ -103,5 +100,4 @@ validateCountCmdOutputAndPlan(
// Verify that a count operation with range query on a non-hashed prefix field can use
// COUNT_SCAN.
validateCountCmdOutputAndPlan(
- {filter: {a: {$gt: 25, $lt: 29}}, expectedStages: ["COUNT_SCAN"], expectedOutput: 3});
-})();
+ {filter: {a: {$gt: 25, $lt: 29}}, expectedStages: ["COUNT_SCAN"], expectedOutput: 3});
\ No newline at end of file
diff --git a/jstests/core/index/hashed/hashed_index_queries.js b/jstests/core/index/hashed/hashed_index_queries.js
index 54044855cc0b0..024cd311a655b 100644
--- a/jstests/core/index/hashed/hashed_index_queries.js
+++ b/jstests/core/index/hashed/hashed_index_queries.js
@@ -5,10 +5,8 @@
* assumes_read_concern_local,
* ]
*/
-(function() {
-"use strict";
load("jstests/aggregation/extras/utils.js"); // For arrayEq().
-load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand().
+import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js";
const collNamePrefix = 'hashed_index_queries_';
let collCount = 0;
@@ -132,5 +130,4 @@ validateCountCmdOutputAndPlan({
filter: {a: {$gt: 25, $lt: 29}, b: 0},
expectedOutput: 1,
expectedStages: ["IXSCAN", "FETCH"]
-});
-})();
\ No newline at end of file
+});
\ No newline at end of file
diff --git a/jstests/core/index/hashed/hashed_index_queries_with_logical_operators.js b/jstests/core/index/hashed/hashed_index_queries_with_logical_operators.js
index 79d5c33759f97..ff112e74bb0fa 100644
--- a/jstests/core/index/hashed/hashed_index_queries_with_logical_operators.js
+++ b/jstests/core/index/hashed/hashed_index_queries_with_logical_operators.js
@@ -7,11 +7,8 @@
* assumes_read_concern_local,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq().
-load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand().
+import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js";
const coll = db.hashed_index_queries_with_logical_operators;
coll.drop();
@@ -151,5 +148,4 @@ validateFindCmdOutputAndPlan({
filter: {a: {$not: {$gt: 12}}, b: 12},
expectedOutput: [{a: 12, b: 12}, {a: null, b: 12}, {b: 12}],
expectedStages: ["IXSCAN"]
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/index/hashed/hashed_index_sort.js b/jstests/core/index/hashed/hashed_index_sort.js
index 189e40a156902..64d53cbdfa8e0 100644
--- a/jstests/core/index/hashed/hashed_index_sort.js
+++ b/jstests/core/index/hashed/hashed_index_sort.js
@@ -6,10 +6,7 @@
* assumes_unsharded_collection,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand().
+import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js";
const coll = db.hashed_index_sort;
coll.drop();
@@ -193,5 +190,4 @@ validateFindCmdOutputAndPlan({
sort: {c: 1},
expectedOutput: [{c: 2}],
expectedStages: ["IXSCAN", "FETCH", "SORT"]
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/index/hashed/hashed_index_with_arrays.js b/jstests/core/index/hashed/hashed_index_with_arrays.js
index 8d6cc157eb6c3..b332d7d10abc4 100644
--- a/jstests/core/index/hashed/hashed_index_with_arrays.js
+++ b/jstests/core/index/hashed/hashed_index_with_arrays.js
@@ -73,4 +73,4 @@ assert.commandFailedWithCode(coll.insert({a: [1], b: 6}), 16766);
// Array insertion allowed when the document doesn't match the partial filter predication.
assert.commandWorked(coll.insert({a: [1], b: 1}));
-})();
\ No newline at end of file
+})();
diff --git a/jstests/core/index/hidden_index.js b/jstests/core/index/hidden_index.js
index 248b3c7b87511..ac140238c469b 100644
--- a/jstests/core/index/hidden_index.js
+++ b/jstests/core/index/hidden_index.js
@@ -15,14 +15,12 @@
* ]
*/
-(function() {
-'use strict';
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
load("jstests/libs/index_catalog_helpers.js"); // For IndexCatalogHelpers.findByName.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const columnstoreEnabled =
checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"], true /* checkAllNodes */) &&
@@ -176,4 +174,3 @@ assert(idxSpec.hidden);
assert.commandWorked(coll.unhideIndex("y"));
idxSpec = IndexCatalogHelpers.findByName(coll.getIndexes(), "y");
assert.eq(idxSpec.hidden, undefined);
-})();
diff --git a/jstests/core/index/index1.js b/jstests/core/index/index1.js
index 4c06bfe03fd4f..e3805a6d4e8c3 100644
--- a/jstests/core/index/index1.js
+++ b/jstests/core/index/index1.js
@@ -1,13 +1,10 @@
// @tags: [requires_non_retryable_writes]
-t = db.embeddedIndexTest;
+let t = db.embeddedIndexTest;
t.remove({});
-o = {
- name: "foo",
- z: {a: 17, b: 4}
-};
+let o = {name: "foo", z: {a: 17, b: 4}};
t.save(o);
assert(t.findOne().z.a == 17);
diff --git a/jstests/core/index/index13.js b/jstests/core/index/index13.js
index 97a3a85f7738a..ff2cb55332ef7 100644
--- a/jstests/core/index/index13.js
+++ b/jstests/core/index/index13.js
@@ -21,7 +21,7 @@
// SERVER-3104 implementation, the index constraints become [3,3] on the 'a.b' field _and_ [3,3] on
// the 'a.c' field.
-t = db.jstests_index13;
+let t = db.jstests_index13;
t.drop();
function assertConsistentResults(query) {
@@ -30,16 +30,13 @@ function assertConsistentResults(query) {
}
function assertResults(query) {
- explain = t.find(query).hint(index).explain();
+ let explain = t.find(query).hint(index).explain();
// printjson( explain ); // debug
assertConsistentResults(query);
}
// Cases with single dotted index fied names.
-index = {
- 'a.b': 1,
- 'a.c': 1
-};
+let index = {'a.b': 1, 'a.c': 1};
t.createIndex(index);
t.save({a: [{b: 1}, {c: 1}]});
t.save({a: [{b: 1, c: 1}]});
diff --git a/jstests/core/index/index4.js b/jstests/core/index/index4.js
index 179bcdd97267f..d577186213174 100644
--- a/jstests/core/index/index4.js
+++ b/jstests/core/index/index4.js
@@ -6,7 +6,7 @@
// index4.js
-t = db.index4;
+let t = db.index4;
t.drop();
t.save({name: "alleyinsider", instances: [{pool: "prod1"}, {pool: "dev1"}]});
@@ -21,7 +21,7 @@ t.createIndex({"instances.pool": 1});
sleep(10);
-a = t.find({instances: {pool: "prod1"}});
+let a = t.find({instances: {pool: "prod1"}});
assert(a.length() == 1, "len1");
assert(a[0].name == "alleyinsider", "alley");
diff --git a/jstests/core/index/index5.js b/jstests/core/index/index5.js
index 908b433c29985..8175d5bcc545a 100644
--- a/jstests/core/index/index5.js
+++ b/jstests/core/index/index5.js
@@ -4,17 +4,17 @@
function validate() {
assert.eq(2, t.find().count());
- f = t.find().sort({a: 1});
+ let f = t.find().sort({a: 1});
assert.eq(2, t.count());
assert.eq(1, f[0].a);
assert.eq(2, f[1].a);
- r = t.find().sort({a: -1});
+ let r = t.find().sort({a: -1});
assert.eq(2, r.count());
assert.eq(2, r[0].a);
assert.eq(1, r[1].a);
}
-t = db.index5;
+let t = db.index5;
t.drop();
t.save({a: 1});
diff --git a/jstests/core/index/index6.js b/jstests/core/index/index6.js
index 4626e9359eae9..3edefedfc1bc4 100644
--- a/jstests/core/index/index6.js
+++ b/jstests/core/index/index6.js
@@ -1,6 +1,6 @@
// index6.js Test indexes on array subelements.
-r = db.ed.db.index6;
+let r = db.ed.db.index6;
r.drop();
r.save({comments: [{name: "eliot", foo: 1}]});
diff --git a/jstests/core/index/index8.js b/jstests/core/index/index8.js
index 3887906dddc2a..2b62040ca5033 100644
--- a/jstests/core/index/index8.js
+++ b/jstests/core/index/index8.js
@@ -12,14 +12,14 @@
// Test key uniqueness
(function() {
-t = db.jstests_index8;
+let t = db.jstests_index8;
t.drop();
t.createIndex({a: 1});
t.createIndex({b: 1}, true);
t.createIndex({c: 1}, [false, "cIndex"]);
-checkIndexes = function(num) {
+let checkIndexes = function(num) {
const indexes = t.getIndexes();
assert.eq(4, indexes.length);
diff --git a/jstests/core/index/index9.js b/jstests/core/index/index9.js
index 7bf7ec5ac438e..ed7da66285810 100644
--- a/jstests/core/index/index9.js
+++ b/jstests/core/index/index9.js
@@ -2,7 +2,7 @@
// expected. Also, the primary node cannot change because we use the local database in this test.
// @tags: [assumes_no_implicit_collection_creation_after_drop, does_not_support_stepdowns]
-t = db.jstests_index9;
+let t = db.jstests_index9;
t.drop();
assert.commandWorked(db.createCollection("jstests_index9"));
diff --git a/jstests/core/index/index_arr1.js b/jstests/core/index/index_arr1.js
index d6db3e621cd61..749b54543d583 100644
--- a/jstests/core/index/index_arr1.js
+++ b/jstests/core/index/index_arr1.js
@@ -2,7 +2,7 @@
// collection.
// @tags: [assumes_no_implicit_index_creation]
-t = db.index_arr1;
+let t = db.index_arr1;
t.drop();
t.insert({_id: 1, a: 5, b: [{x: 1}]});
diff --git a/jstests/core/index/index_arr2.js b/jstests/core/index/index_arr2.js
index 999508804228a..ae977f5a514c8 100644
--- a/jstests/core/index/index_arr2.js
+++ b/jstests/core/index/index_arr2.js
@@ -1,27 +1,27 @@
// @tags: [assumes_balancer_off, requires_multi_updates, requires_non_retryable_writes]
-NUM = 20;
-M = 5;
+let NUM = 20;
+let M = 5;
-t = db.jstests_arr2;
+let t = db.jstests_arr2;
function test(withIndex) {
t.drop();
// insert a bunch of items to force queries to use the index.
- newObject = {_id: 1, a: [{b: {c: 1}}]};
+ let newObject = {_id: 1, a: [{b: {c: 1}}]};
- now = (new Date()).getTime() / 1000;
- for (created = now - NUM; created <= now; created++) {
+ let now = (new Date()).getTime() / 1000;
+ for (let created = now - NUM; created <= now; created++) {
newObject['created'] = created;
t.insert(newObject);
newObject['_id']++;
}
// change the last M items.
- query = {'created': {'$gte': now - M}};
+ let query = {'created': {'$gte': now - M}};
- Z = t.find(query).count();
+ let Z = t.find(query).count();
if (withIndex) {
// t.createIndex( { 'a.b.c' : 1, 'created' : -1 } )
@@ -35,7 +35,7 @@ function test(withIndex) {
// now see how many were actually updated.
query['a.b.c'] = 0;
- count = t.count(query);
+ let count = t.count(query);
assert.eq(Z, count, "count after withIndex:" + withIndex);
}
diff --git a/jstests/core/index/index_bounds_code.js b/jstests/core/index/index_bounds_code.js
index 0bc80201eb47d..b96fa1f7cfe44 100644
--- a/jstests/core/index/index_bounds_code.js
+++ b/jstests/core/index/index_bounds_code.js
@@ -3,10 +3,7 @@
// assumes_unsharded_collection,
// requires_non_retryable_writes,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount.
+import {assertCoveredQueryAndCount} from "jstests/libs/analyze_plan.js";
const coll = db.index_bounds_code;
coll.drop();
@@ -49,5 +46,4 @@ assert.commandWorked(coll.insert({a: MaxKey}));
assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: func}}, project: proj, count: 0});
assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: func}}, project: proj, count: 0});
assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: func}}, project: proj, count: 0});
-assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: func}}, project: proj, count: 0});
-})();
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: func}}, project: proj, count: 0});
\ No newline at end of file
diff --git a/jstests/core/index/index_bounds_maxkey.js b/jstests/core/index/index_bounds_maxkey.js
index 1b59340fffe77..ea2fce6f5801b 100644
--- a/jstests/core/index/index_bounds_maxkey.js
+++ b/jstests/core/index/index_bounds_maxkey.js
@@ -3,10 +3,7 @@
// assumes_unsharded_collection,
// requires_non_retryable_writes,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount.
+import {assertCoveredQueryAndCount} from "jstests/libs/analyze_plan.js";
const coll = db.index_bounds_maxkey;
coll.drop();
@@ -33,5 +30,4 @@ assert.commandWorked(coll.insert({}));
assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MaxKey}}, project: proj, count: 0});
assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MaxKey}}, project: proj, count: 0});
assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MaxKey}}, project: proj, count: 3});
-assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MaxKey}}, project: proj, count: 3});
-})();
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MaxKey}}, project: proj, count: 3});
\ No newline at end of file
diff --git a/jstests/core/index/index_bounds_minkey.js b/jstests/core/index/index_bounds_minkey.js
index 78efd322b1212..c22a9bcb4b624 100644
--- a/jstests/core/index/index_bounds_minkey.js
+++ b/jstests/core/index/index_bounds_minkey.js
@@ -3,10 +3,7 @@
// assumes_unsharded_collection,
// requires_non_retryable_writes,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount.
+import {assertCoveredQueryAndCount} from "jstests/libs/analyze_plan.js";
const coll = db.index_bounds_minkey;
coll.drop();
@@ -33,5 +30,4 @@ assert.commandWorked(coll.insert({}));
assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MinKey}}, project: proj, count: 3});
assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MinKey}}, project: proj, count: 3});
assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MinKey}}, project: proj, count: 0});
-assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MinKey}}, project: proj, count: 0});
-})();
+assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MinKey}}, project: proj, count: 0});
\ No newline at end of file
diff --git a/jstests/core/index/index_bounds_number_edge_cases.js b/jstests/core/index/index_bounds_number_edge_cases.js
index 3da4e53e2d0ad..6f08c6632463c 100644
--- a/jstests/core/index/index_bounds_number_edge_cases.js
+++ b/jstests/core/index/index_bounds_number_edge_cases.js
@@ -2,7 +2,7 @@
// should handle numerical extremes
// such as Number.MAX_VALUE and Infinity
-t = db.indexboundsnumberedgecases;
+let t = db.indexboundsnumberedgecases;
t.drop();
diff --git a/jstests/core/index/index_bounds_object.js b/jstests/core/index/index_bounds_object.js
index 255fb55f117dc..79b77f7308a60 100644
--- a/jstests/core/index/index_bounds_object.js
+++ b/jstests/core/index/index_bounds_object.js
@@ -3,10 +3,11 @@
// assumes_unsharded_collection,
// requires_non_retryable_writes,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount.
+import {
+ assertCoveredQueryAndCount,
+ getWinningPlan,
+ isIndexOnly
+} from "jstests/libs/analyze_plan.js";
const coll = db.index_bounds_object;
coll.drop();
@@ -58,5 +59,4 @@ assert.commandWorked(coll.insert({a: []}));
assert(!isIndexOnly(db, getWinningPlan(coll.find({a: {$gt: {}}}, proj).explain().queryPlanner)));
assert(!isIndexOnly(db, getWinningPlan(coll.find({a: {$gte: {}}}, proj).explain().queryPlanner)));
assert(!isIndexOnly(db, getWinningPlan(coll.find({a: {$lt: {}}}, proj).explain().queryPlanner)));
-assert(!isIndexOnly(db, getWinningPlan(coll.find({a: {$lte: {}}}, proj).explain().queryPlanner)));
-})();
+assert(!isIndexOnly(db, getWinningPlan(coll.find({a: {$lte: {}}}, proj).explain().queryPlanner)));
\ No newline at end of file
diff --git a/jstests/core/index/index_bounds_pipe.js b/jstests/core/index/index_bounds_pipe.js
index f94c5a748fa12..e8ec773fc5af5 100644
--- a/jstests/core/index/index_bounds_pipe.js
+++ b/jstests/core/index/index_bounds_pipe.js
@@ -5,10 +5,7 @@
* assumes_read_concern_local,
* ]
*/
-(function() {
-'use strict';
-
-load('jstests/libs/analyze_plan.js');
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
const collName = 'index_bounds_pipe';
const coll = db.getCollection(collName);
@@ -113,5 +110,4 @@ assertIndexBoundsAndResult({
regex: /^\Q\|\E/,
bounds: ['["\\|", "\\}")', '[/^\\Q\\|\\E/, /^\\Q\\|\\E/]'],
results: [{_id: '\\|'}]
-});
-}());
+});
\ No newline at end of file
diff --git a/jstests/core/index/index_bounds_timestamp.js b/jstests/core/index/index_bounds_timestamp.js
index 1edf62b929eec..ca6ccca9653f2 100644
--- a/jstests/core/index/index_bounds_timestamp.js
+++ b/jstests/core/index/index_bounds_timestamp.js
@@ -5,10 +5,7 @@
// assumes_read_concern_local,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {assertExplainCount, isIndexOnly} from "jstests/libs/analyze_plan.js";
// Setup the test collection.
let coll = db.index_bounds_timestamp;
@@ -137,5 +134,4 @@ plan = coll.explain("executionStats")
.find({ts: {$gte: Timestamp(0, 1), $lte: Timestamp(1, 0)}}, {ts: 1, _id: 0})
.finish();
assert(isIndexOnly(db, plan.queryPlanner.winningPlan),
- "ts $gte, $lte find with project should be a covered query");
-})();
+ "ts $gte, $lte find with project should be a covered query");
\ No newline at end of file
diff --git a/jstests/core/index/index_check2.js b/jstests/core/index/index_check2.js
index e296e3b558af1..c5c05c5e6d5af 100644
--- a/jstests/core/index/index_check2.js
+++ b/jstests/core/index/index_check2.js
@@ -3,11 +3,11 @@
// requires_getmore
// ]
-t = db.index_check2;
+let t = db.index_check2;
t.drop();
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js";
for (var i = 0; i < 1000; i++) {
var a = [];
@@ -17,15 +17,9 @@ for (var i = 0; i < 1000; i++) {
t.save({num: i, tags: a});
}
-q1 = {
- tags: "tag6"
-};
-q2 = {
- tags: "tag12"
-};
-q3 = {
- tags: {$all: ["tag6", "tag12"]}
-};
+let q1 = {tags: "tag6"};
+let q2 = {tags: "tag12"};
+let q3 = {tags: {$all: ["tag6", "tag12"]}};
assert.eq(120, t.find(q1).itcount(), "q1 a");
assert.eq(120, t.find(q2).itcount(), "q2 a");
@@ -42,9 +36,9 @@ assert(isIxscan(db, getWinningPlan(t.find(q1).explain().queryPlanner)), "e1");
assert(isIxscan(db, getWinningPlan(t.find(q2).explain().queryPlanner)), "e2");
assert(isIxscan(db, getWinningPlan(t.find(q3).explain().queryPlanner)), "e3");
-scanned1 = t.find(q1).explain("executionStats").executionStats.totalKeysExamined;
-scanned2 = t.find(q2).explain("executionStats").executionStats.totalKeysExamined;
-scanned3 = t.find(q3).explain("executionStats").executionStats.totalKeysExamined;
+let scanned1 = t.find(q1).explain("executionStats").executionStats.totalKeysExamined;
+let scanned2 = t.find(q2).explain("executionStats").executionStats.totalKeysExamined;
+let scanned3 = t.find(q3).explain("executionStats").executionStats.totalKeysExamined;
// print( "scanned1: " + scanned1 + " scanned2: " + scanned2 + " scanned3: " + scanned3 );
diff --git a/jstests/core/index/index_check5.js b/jstests/core/index/index_check5.js
index 8921e014fcd05..62f72c0e61282 100644
--- a/jstests/core/index/index_check5.js
+++ b/jstests/core/index/index_check5.js
@@ -1,5 +1,5 @@
-t = db.index_check5;
+let t = db.index_check5;
t.drop();
t.save({
diff --git a/jstests/core/index/index_count_scan.js b/jstests/core/index/index_count_scan.js
new file mode 100644
index 0000000000000..f0870de08a027
--- /dev/null
+++ b/jstests/core/index/index_count_scan.js
@@ -0,0 +1,88 @@
+// Test that an index can be used to accelerate count commands, as well as the $count agg
+// stage.
+//
+// The collection cannot be sharded, since the requirement to SHARD_FILTER precludes the planner
+// from generating a COUNT_SCAN plan. Further, we do not allow stepdowns, since the code responsible
+// for retrying on interrupt is not prepared to handle aggregation explain.
+// @tags: [
+// assumes_unsharded_collection,
+// does_not_support_stepdowns,
+// # Test may fail with "index already exists".
+// assumes_no_implicit_index_creation,
+// # Explain for the aggregate command cannot run within a multi-document transaction.
+// does_not_support_transactions,
+// ]
+import {getPlanStage} from "jstests/libs/analyze_plan.js";
+
+const coll = db.index_count;
+coll.drop();
+
+assert.commandWorked(coll.insert([
+ {a: 1},
+ {a: 1, b: 1},
+ {a: 2},
+ {a: 3},
+ {a: null},
+ {a: [-1, 0]},
+ {a: [4, -3, 5]},
+ {},
+ {a: {b: 4}},
+ {a: []},
+ {a: [[], {}]},
+ {a: {}},
+]));
+
+const runTest = function(indexPattern, indexOption = {}) {
+ assert.commandWorked(coll.createIndex(indexPattern, indexOption));
+
+ assert.eq(5, coll.count({a: {$gt: 0}}));
+ assert.eq(5, coll.find({a: {$gt: 0}}).itcount());
+
+ // Retrieve the query plain from explain, whose shape varies depending on the query and the
+ // engines used (classic/sbe).
+ const getQueryPlan = function(explain) {
+ if (explain.stages) {
+ explain = explain.stages[0].$cursor;
+ }
+ let winningPlan = explain.queryPlanner.winningPlan;
+ return winningPlan.queryPlan ? [winningPlan.queryPlan, winningPlan.slotBasedPlan]
+ : [winningPlan, null];
+ };
+
+ // Verify that this query uses a COUNT_SCAN.
+ const runAndVerify = function(expectedCount, pipeline) {
+ assert.eq(expectedCount, coll.aggregate(pipeline).next().count);
+ let explain = coll.explain().aggregate(pipeline);
+ const [queryPlan, sbePlan] = getQueryPlan(explain);
+ let countScan = getPlanStage(queryPlan, "COUNT_SCAN");
+ assert.neq(null, countScan, explain);
+ if (sbePlan) {
+ assert.eq(true, sbePlan.stages.includes("ixseek"), sbePlan);
+ }
+ };
+
+ runAndVerify(2, [{$match: {a: 1}}, {$count: "count"}]);
+ // Run more times to ensure the query is cached.
+ runAndVerify(2, [{$match: {a: 1}}, {$count: "count"}]);
+ runAndVerify(2, [{$match: {a: 1}}, {$count: "count"}]);
+ // Make sure query is parameterized correctly for count scan index keys.
+ runAndVerify(1, [{$match: {a: 2}}, {$count: "count"}]);
+ if (indexPattern.b) {
+ runAndVerify(1, [{$match: {a: 1, b: 1}}, {$count: "count"}]);
+ }
+ runAndVerify(2, [{$match: {a: {}}}, {$count: "count"}]);
+ runAndVerify(3, [{$match: {a: {$gt: 1}}}, {$count: "count"}]);
+ // Add a $project stage between $match and $count to avoid pushdown.
+ runAndVerify(2, [{$match: {a: 1}}, {$project: {_id: 0, a: 0}}, {$count: "count"}]);
+ if (indexPattern.a) {
+ runAndVerify(12, [{$sort: {a: 1}}, {$count: "count"}]);
+ runAndVerify(12, [{$sort: {a: -1}}, {$count: "count"}]);
+ runAndVerify(12, [{$sort: {a: -1}}, {$group: {_id: null, count: {$sum: 1}}}]);
+ }
+
+ assert.commandWorked(coll.dropIndex(indexPattern));
+};
+
+runTest({a: 1});
+runTest({"$**": 1});
+runTest({"$**": -1, b: -1}, {wildcardProjection: {b: 0}});
\ No newline at end of file
diff --git a/jstests/core/index/index_decimal.js b/jstests/core/index/index_decimal.js
index 01cd343b2a415..ffa50b8077543 100644
--- a/jstests/core/index/index_decimal.js
+++ b/jstests/core/index/index_decimal.js
@@ -6,11 +6,8 @@
// ]
// Test indexing of decimal numbers
-(function() {
-'use strict';
-
// Include helpers for analyzing explain output.
-load('jstests/libs/analyze_plan.js');
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
var t = db.decimal_indexing;
t.drop();
@@ -54,5 +51,4 @@ assert.eq(t.find({y: {$lt: NumberDecimal('0.3')}}, {y: 1, _id: 0}).toArray(),
'querying for double less than decimal 0.3 should return double 0.3');
assert.eq(t.find({_id: 0}, {_id: 1}).toArray(),
[{_id: NumberDecimal('0E3')}],
- 'querying for zero does not return the correct decimal');
-})();
+ 'querying for zero does not return the correct decimal');
\ No newline at end of file
diff --git a/jstests/core/index/index_diag.js b/jstests/core/index/index_diag.js
index 769e5575bf168..bb9cd4ed3239f 100644
--- a/jstests/core/index/index_diag.js
+++ b/jstests/core/index/index_diag.js
@@ -3,14 +3,14 @@
// ]
load("jstests/libs/fixture_helpers.js");
-t = db.index_diag;
+let t = db.index_diag;
t.drop();
assert.commandWorked(t.createIndex({x: 1}));
-all = [];
-ids = [];
-xs = [];
+let all = [];
+let ids = [];
+let xs = [];
function r(a) {
var n = [];
@@ -19,8 +19,8 @@ function r(a) {
return n;
}
-for (i = 1; i < 4; i++) {
- o = {_id: i, x: -i};
+for (let i = 1; i < 4; i++) {
+ let o = {_id: i, x: -i};
t.insert(o);
all.push(o);
ids.push({_id: i});
diff --git a/jstests/core/index/index_filter_catalog_independent.js b/jstests/core/index/index_filter_catalog_independent.js
index 2c244fdb0707b..bf0992c1ef326 100644
--- a/jstests/core/index/index_filter_catalog_independent.js
+++ b/jstests/core/index/index_filter_catalog_independent.js
@@ -13,10 +13,7 @@
* tenant_migration_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+import {getPlanStages, getWinningPlan, isCollscan} from "jstests/libs/analyze_plan.js";
const collName = "index_filter_catalog_independent";
const coll = db[collName];
@@ -88,5 +85,4 @@ assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
explain = assert.commandWorked(coll.find({x: 3}).explain());
checkIndexFilterSet(explain, true);
-assertIsIxScanOnIndex(getWinningPlan(explain.queryPlanner), {x: 1, y: 1});
-})();
+assertIsIxScanOnIndex(getWinningPlan(explain.queryPlanner), {x: 1, y: 1});
\ No newline at end of file
diff --git a/jstests/core/index/index_filter_collation.js b/jstests/core/index/index_filter_collation.js
index b1b3edaa3d039..8b6abc11ce4d2 100644
--- a/jstests/core/index/index_filter_collation.js
+++ b/jstests/core/index/index_filter_collation.js
@@ -13,10 +13,7 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
const collName = "index_filter_collation";
const coll = db[collName];
@@ -117,5 +114,4 @@ assertIsIxScanOnIndex(getWinningPlan(explain.queryPlanner), {x: 1, y: 1});
explain = coll.explain().distinct("_id", {x: 3}, {collation: caseInsensitive});
checkIndexFilterSet(explain, true);
-assertIsIxScanOnIndex(getWinningPlan(explain.queryPlanner), {x: 1});
-})();
+assertIsIxScanOnIndex(getWinningPlan(explain.queryPlanner), {x: 1});
\ No newline at end of file
diff --git a/jstests/core/index/index_filter_commands.js b/jstests/core/index/index_filter_commands.js
index 429a79adea818..0872f264d9ee4 100644
--- a/jstests/core/index/index_filter_commands.js
+++ b/jstests/core/index/index_filter_commands.js
@@ -35,14 +35,23 @@
* does_not_support_stepdowns,
* # The SBE plan cache was first enabled in 6.3.
* requires_fcv_63,
+ * references_foreign_collection,
* ]
*/
-(function() {
-load("jstests/libs/analyze_plan.js");
+import {
+ getPlanCacheKeyFromPipeline,
+ getPlanCacheKeyFromShape,
+ getPlanStage,
+ getWinningPlan,
+ isClusteredIxscan,
+ isCollscan,
+ isIdhack,
+ isIxscan,
+} from "jstests/libs/analyze_plan.js";
load("jstests/libs/clustered_collections/clustered_collection_util.js");
load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const coll = db.jstests_index_filter_commands;
@@ -187,7 +196,7 @@ assert.eq(null, planCacheEntryForQuery(shape), coll.getPlanCache().list());
// Check details of winning plan in plan cache after setting filter and re-executing query.
assert.eq(1, coll.find(queryA1, projectionA1).sort(sortA1).itcount(), 'unexpected document count');
-planAfterSetFilter = planCacheEntryForQuery(shape);
+let planAfterSetFilter = planCacheEntryForQuery(shape);
assert.neq(null, planAfterSetFilter, coll.getPlanCache().list());
// Check 'indexFilterSet' field in plan details
assert.eq(true, planAfterSetFilter.indexFilterSet, planAfterSetFilter);
@@ -441,7 +450,7 @@ if (checkSBEEnabled(db)) {
assert.eq(lookupStage.strategy, "IndexedLoopJoin", explain);
assert.eq(lookupStage.indexName, "foreign_a_1");
- ixscanStage = getPlanStage(explain, "IXSCAN");
+ let ixscanStage = getPlanStage(explain, "IXSCAN");
assert.neq(null, ixscanStage, explain);
assert.eq(ixscanStage.indexName, "main_a_1_c_1", explain);
@@ -474,4 +483,3 @@ if (checkSBEEnabled(db)) {
planCacheEntry = planCacheEntryForPipeline(pipeline);
assert.eq(null, planCacheEntry, coll.getPlanCache().list());
}
-}());
diff --git a/jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js b/jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js
index bc2dc74c0e1cd..d5a888d2f1c5b 100644
--- a/jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js
+++ b/jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js
@@ -16,14 +16,11 @@
* # Plan cache state is node-local and will not get migrated alongside tenant data.
* tenant_migration_incompatible,
* # TODO SERVER-67607: Test plan cache with CQF enabled.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
// For testing convenience this variable is made an integer "1" if SBE is fully enabled, because the
// expected amount of plan cache entries differs between the SBE plan cache and the classic one.
@@ -109,4 +106,3 @@ assert.commandWorked(
db.runCommand({planCacheSetFilter: collName, query: {a: 1, b: 1}, indexes: [{a: 1}]}));
assert(!existsInPlanCache({a: 1, b: 1}, {}, {}, coll));
assert(existsInPlanCache({a: 1, b: 1}, {}, {}, collOther));
-})();
diff --git a/jstests/core/index/index_filter_on_hidden_index.js b/jstests/core/index/index_filter_on_hidden_index.js
index 770802b8b3e73..85b71acf33a05 100644
--- a/jstests/core/index/index_filter_on_hidden_index.js
+++ b/jstests/core/index/index_filter_on_hidden_index.js
@@ -20,10 +20,7 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For 'getPlanStages' and 'isCollscan'.
+import {getPlanStages, getWinningPlan, isCollscan} from "jstests/libs/analyze_plan.js";
const collName = 'hidden_indexes_remain_visible_in_index_filters';
db[collName].drop();
@@ -119,5 +116,4 @@ validateIxscanOrCollscanUsed(queryShape, null);
// Unhiding the index should make it able to be used.
assert.commandWorked(coll.unhideIndex("a_1"));
-validateIxscanOrCollscanUsed(queryShape, "a_1");
-})();
+validateIxscanOrCollscanUsed(queryShape, "a_1");
\ No newline at end of file
diff --git a/jstests/core/index/index_id_options.js b/jstests/core/index/index_id_options.js
index ca19627a48e3a..49b2128407a2a 100644
--- a/jstests/core/index/index_id_options.js
+++ b/jstests/core/index/index_id_options.js
@@ -44,7 +44,6 @@ assert.commandFailed(coll.createIndex({_id: 1}, {name: "_id_", unique: true}));
assert.commandFailed(coll.createIndex({_id: 1}, {name: "_id_", sparse: false}));
assert.commandFailed(coll.createIndex({_id: 1}, {name: "_id_", partialFilterExpression: {a: 1}}));
assert.commandFailed(coll.createIndex({_id: 1}, {name: "_id_", expireAfterSeconds: 3600}));
-assert.commandFailed(coll.createIndex({_id: 1}, {name: "_id_", background: false}));
assert.commandFailed(coll.createIndex({_id: 1}, {name: "_id_", unknown: true}));
assert.commandWorked(coll.createIndex(
{_id: 1}, {name: "_id_", ns: coll.getFullName(), v: 2, collation: {locale: "simple"}}));
diff --git a/jstests/core/index/index_many.js b/jstests/core/index/index_many.js
index cdd559c9be5d9..ee1f71477c01d 100644
--- a/jstests/core/index/index_many.js
+++ b/jstests/core/index/index_many.js
@@ -4,7 +4,7 @@
/* test using lots of indexes on one collection */
-t = db.many;
+let t = db.many;
function f() {
t.drop();
@@ -13,10 +13,10 @@ function f() {
t.save({x: 9, y: 99});
t.save({x: 19, y: 99});
- x = 2;
+ let x = 2;
var lastErr = null;
while (x < 70) {
- patt = {};
+ let patt = {};
patt[x] = 1;
if (x == 20)
patt = {x: 1};
@@ -29,7 +29,7 @@ function f() {
assert.commandFailed(lastErr, "should have got an error 'too many indexes'");
// 40 is the limit currently
- lim = t.getIndexes().length;
+ let lim = t.getIndexes().length;
if (lim != 64) {
print("# of indexes should be 64 but is : " + lim);
return;
diff --git a/jstests/core/index/index_multikey.js b/jstests/core/index/index_multikey.js
index 9dc26c9c13bd0..dd7c022f27298 100644
--- a/jstests/core/index/index_multikey.js
+++ b/jstests/core/index/index_multikey.js
@@ -5,11 +5,8 @@
* assumes_read_concern_local,
* ]
*/
-(function() {
-"use strict";
-
// For making assertions about explain output.
-load("jstests/libs/analyze_plan.js");
+import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js";
const coll = db.getCollection("index_multikey");
coll.drop();
@@ -36,5 +33,4 @@ assert.eq(ixscan.isMultiKey,
"index should have been marked as multikey after insert; plan: " + tojson(ixscan));
assert.eq(ixscan.multiKeyPaths,
{a: ["a"], b: []},
- "index has wrong multikey paths after insert; plan: " + ixscan);
-})();
+ "index has wrong multikey paths after insert; plan: " + ixscan);
\ No newline at end of file
diff --git a/jstests/core/index/index_partial_create_drop.js b/jstests/core/index/index_partial_create_drop.js
index c0a095b30df71..c61c5502f54c6 100644
--- a/jstests/core/index/index_partial_create_drop.js
+++ b/jstests/core/index/index_partial_create_drop.js
@@ -13,10 +13,8 @@
// Test partial index creation and drops.
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
-(function() {
-"use strict";
var coll = db.index_partial_create_drop;
var getNumKeys = function(idxName) {
@@ -77,8 +75,7 @@ assert.commandWorked(coll.dropIndex({x: 1}));
assert.eq(1, coll.getIndexes().length);
// Create partial index in background.
-assert.commandWorked(
- coll.createIndex({x: 1}, {background: true, partialFilterExpression: {a: {$lt: 5}}}));
+assert.commandWorked(coll.createIndex({x: 1}, {partialFilterExpression: {a: {$lt: 5}}}));
assert.eq(5, getNumKeys("x_1"));
assert.commandWorked(coll.dropIndex({x: 1}));
assert.eq(1, coll.getIndexes().length);
@@ -120,4 +117,3 @@ numIndexesBefore = coll.getIndexes().length;
assert.commandFailedWithCode(coll.dropIndex({x: 1}), ErrorCodes.AmbiguousIndexKeyPattern);
assert.commandWorked(coll.dropIndex("partialIndex2"));
assert.eq(coll.getIndexes().length, numIndexesBefore - 1);
-})();
diff --git a/jstests/core/index/index_partial_read_ops.js b/jstests/core/index/index_partial_read_ops.js
index 2bc6578479e3b..339134e5e2a09 100644
--- a/jstests/core/index/index_partial_read_ops.js
+++ b/jstests/core/index/index_partial_read_ops.js
@@ -11,11 +11,9 @@
// Read ops tests for partial indexes.
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/feature_flag_util.js");
+import {getRejectedPlans, getWinningPlan, isCollscan, isIxscan} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
-(function() {
-"use strict";
let explain;
const coll = db.index_partial_read_ops;
@@ -137,7 +135,7 @@ const coll = db.index_partial_read_ops;
if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) {
jsTest.log(
"Skipping partialFilterExpression testing for $in, $or and non-top level $and as timeseriesMetricIndexesEnabled is false");
- return;
+ quit();
}
(function testFilterWithInExpression() {
@@ -195,4 +193,3 @@ if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) {
assert.eq(1, explain.executionStats.nReturned);
assert(isCollscan(db, getWinningPlan(explain.queryPlanner)));
})();
-})();
diff --git a/jstests/core/index/index_signature.js b/jstests/core/index/index_signature.js
index 9d015bfa2d042..bc3796ce4e185 100644
--- a/jstests/core/index/index_signature.js
+++ b/jstests/core/index/index_signature.js
@@ -9,11 +9,8 @@
* requires_non_retryable_writes,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
-load("jstests/libs/fixture_helpers.js"); // For 'isSharded'.
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+load("jstests/libs/fixture_helpers.js"); // For 'isSharded'.
const testDB = db.getSiblingDB(jsTestName());
const coll = testDB.test;
@@ -143,7 +140,7 @@ assert.commandFailedWithCode(coll.createIndex(keyPattern, partialFilterUnsortedL
ErrorCodes.IndexKeySpecsConflict);
// Verifies that non-signature options cannot distinguish a new index from an existing index.
-const nonSignatureOptions = [{expireAfterSeconds: 10}, {background: true}];
+const nonSignatureOptions = [{expireAfterSeconds: 10}];
// Builds a new, basic index on {a: 1}, since some of the options we intend to test are not
// compatible with the partialFilterExpression on the existing {a: 1} indexes.
@@ -249,4 +246,3 @@ if (allowCompoundWildcardIndexes) {
{name: "cwi_a_sub_b_c_1", wildcardProjection: {"a.c": 1, "a.b": 1}}),
ErrorCodes.IndexOptionsConflict);
}
-})();
diff --git a/jstests/core/index/index_sparse1.js b/jstests/core/index/index_sparse1.js
index 58bc5baa9b8a6..0aec94e8ab221 100644
--- a/jstests/core/index/index_sparse1.js
+++ b/jstests/core/index/index_sparse1.js
@@ -2,7 +2,7 @@
// collection.
// @tags: [assumes_no_implicit_index_creation, requires_non_retryable_writes, requires_fastcount]
-t = db.index_sparse1;
+let t = db.index_sparse1;
t.drop();
t.insert({_id: 1, x: 1});
diff --git a/jstests/core/index/index_sparse2.js b/jstests/core/index/index_sparse2.js
index 324b46d82163a..c7a9b0fac0461 100644
--- a/jstests/core/index/index_sparse2.js
+++ b/jstests/core/index/index_sparse2.js
@@ -2,7 +2,7 @@
// collection.
// @tags: [assumes_no_implicit_index_creation, requires_fastcount]
-t = db.index_sparse2;
+let t = db.index_sparse2;
t.drop();
t.insert({_id: 1, x: 1, y: 1});
diff --git a/jstests/core/index/index_stats.js b/jstests/core/index/index_stats.js
index b7e3e64d3242a..c3d13b7d1e103 100644
--- a/jstests/core/index/index_stats.js
+++ b/jstests/core/index/index_stats.js
@@ -16,16 +16,14 @@
// # errors.
// tenant_migration_incompatible,
// # TODO SERVER-67639: Verify $indexStats works for queries that are eligible for CQF.
-// cqf_incompatible,
+// cqf_experimental_incompatible,
// # Uses mapReduce command.
// requires_scripting,
+// references_foreign_collection,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getAggPlanStage, getPlanStages} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
var colName = "jstests_index_stats";
var col = db[colName];
@@ -360,4 +358,3 @@ assert.commandWorked(col.unhideIndex("a_1"));
res = col.findOne({a: 1});
assert(1, res);
assert.eq(1, getUsageCount("a_1"));
-})();
diff --git a/jstests/core/index/index_type_change.js b/jstests/core/index/index_type_change.js
index 455d9b6067d77..294170ad933a4 100644
--- a/jstests/core/index/index_type_change.js
+++ b/jstests/core/index/index_type_change.js
@@ -10,10 +10,7 @@
* will update the index entries associated with that document.
*/
-load("jstests/libs/analyze_plan.js"); // For 'isIndexOnly'.
-
-(function() {
-"use strict";
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
var coll = db.index_type_change;
coll.drop();
@@ -40,4 +37,3 @@ assert(isIndexOnly(db, explain));
var updated = coll.findOne({a: 2}, {_id: 0, a: 1});
assert(updated.a instanceof NumberLong, "Index entry did not change type");
-})();
diff --git a/jstests/core/index/indexa.js b/jstests/core/index/indexa.js
index 01fde035621c5..937dde52df180 100644
--- a/jstests/core/index/indexa.js
+++ b/jstests/core/index/indexa.js
@@ -5,7 +5,7 @@
// unique index constraint test for updates
// case where object doesn't grow tested here
-t = db.indexa;
+let t = db.indexa;
t.drop();
t.createIndex({x: 1}, true);
@@ -18,8 +18,8 @@ assert.eq(2, t.count(), "indexa 1");
t.update({x: 'B'}, {x: 'A'});
-a = t.find().toArray();
-u = Array.unique(a.map(function(z) {
+let a = t.find().toArray();
+let u = Array.unique(a.map(function(z) {
return z.x;
}));
assert.eq(2, t.count(), "indexa 2");
diff --git a/jstests/core/index/indexb.js b/jstests/core/index/indexb.js
index 59546f7fae273..5c5177fd23974 100644
--- a/jstests/core/index/indexb.js
+++ b/jstests/core/index/indexb.js
@@ -8,15 +8,13 @@
// see indexa.js for the test case for an update with dup id check
// when it doesn't move
-t = db.indexb;
+let t = db.indexb;
t.drop();
t.createIndex({a: 1}, true);
t.insert({a: 1});
-x = {
- a: 2
-};
+let x = {a: 2};
t.save(x);
{
diff --git a/jstests/core/index/indexc.js b/jstests/core/index/indexc.js
index bf5735380faad..b65d5a6e89123 100644
--- a/jstests/core/index/indexc.js
+++ b/jstests/core/index/indexc.js
@@ -1,5 +1,5 @@
-t = db.indexc;
+let t = db.indexc;
t.drop();
const startMillis = new Date().getTime();
@@ -7,7 +7,7 @@ for (var i = 1; i < 100; i++) {
var d = new Date(startMillis + i);
t.save({a: i, ts: d, cats: [i, i + 1, i + 2]});
if (i == 51)
- mid = d;
+ var mid = d;
}
assert.eq(50, t.find({ts: {$lt: mid}}).itcount(), "A");
diff --git a/jstests/core/index/indexe.js b/jstests/core/index/indexe.js
index a307882adc3aa..aaafc5bda3bb0 100644
--- a/jstests/core/index/indexe.js
+++ b/jstests/core/index/indexe.js
@@ -1,11 +1,11 @@
// @tags: [requires_getmore, requires_fastcount]
-t = db.indexe;
+let t = db.indexe;
t.drop();
var num = 1000;
-for (i = 0; i < num; i++) {
+for (let i = 0; i < num; i++) {
t.insert({a: "b"});
}
diff --git a/jstests/core/index/indexf.js b/jstests/core/index/indexf.js
index 37c279672293d..8837b9e2571d0 100644
--- a/jstests/core/index/indexf.js
+++ b/jstests/core/index/indexf.js
@@ -1,5 +1,5 @@
-t = db.indexf;
+let t = db.indexf;
t.drop();
t.createIndex({x: 1});
diff --git a/jstests/core/index/indexg.js b/jstests/core/index/indexg.js
index 486f4be0ebe9c..47f7a587d9e8a 100644
--- a/jstests/core/index/indexg.js
+++ b/jstests/core/index/indexg.js
@@ -1,13 +1,13 @@
-f = db.jstests_indexg;
+let f = db.jstests_indexg;
f.drop();
f.save({list: [1, 2]});
f.save({list: [1, 3]});
-doit = function() {
+let doit = function() {
assert.eq(1, f.count({list: {$in: [1], $ne: 3}}));
assert.eq(1, f.count({list: {$in: [1], $not: {$in: [3]}}}));
};
doit();
f.createIndex({list: 1});
-doit();
\ No newline at end of file
+doit();
diff --git a/jstests/core/index/indexj.js b/jstests/core/index/indexj.js
index 93034fec923ec..11b70ff2d4c36 100644
--- a/jstests/core/index/indexj.js
+++ b/jstests/core/index/indexj.js
@@ -12,10 +12,8 @@
// requires_fcv_63,
// ]
-(function() {
-"use strict";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
-load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'.
const t = db[jsTestName()];
t.drop();
@@ -93,5 +91,4 @@ assert.commandWorked(t.insert({a: 1, b: 1.5}));
// both sets of bounds being scanned.
expectedKeys = isSBEEnabled ? 1 : 4;
numKeys = keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1});
-assert.eq(numKeys, expectedKeys, errMsg(numKeys));
-})();
+assert.eq(numKeys, expectedKeys, errMsg(numKeys));
\ No newline at end of file
diff --git a/jstests/core/index/indexl.js b/jstests/core/index/indexl.js
index cde169eda71dd..d0ba873e4377a 100644
--- a/jstests/core/index/indexl.js
+++ b/jstests/core/index/indexl.js
@@ -1,6 +1,6 @@
// Check nonoverlapping $in/$all with multikeys SERVER-2165
-t = db.jstests_indexl;
+let t = db.jstests_indexl;
function test(t) {
t.save({a: [1, 2]});
@@ -24,4 +24,4 @@ t.drop();
test(t);
t.drop();
t.createIndex({a: 1});
-test(t);
\ No newline at end of file
+test(t);
diff --git a/jstests/core/index/indexm.js b/jstests/core/index/indexm.js
index 7613b12535739..2fc042632733d 100644
--- a/jstests/core/index/indexm.js
+++ b/jstests/core/index/indexm.js
@@ -1,6 +1,6 @@
// Check proper range combinations with or clauses overlapping non or portion of query SERVER-2302
-t = db.jstests_indexm;
+let t = db.jstests_indexm;
t.drop();
t.save({a: [{x: 1}, {x: 2}, {x: 3}, {x: 4}]});
diff --git a/jstests/core/index/indexn.js b/jstests/core/index/indexn.js
index a292ae2e5d310..ddd7dc5169d50 100644
--- a/jstests/core/index/indexn.js
+++ b/jstests/core/index/indexn.js
@@ -4,7 +4,7 @@
// assumes_read_concern_local,
// ]
-t = db.jstests_indexn;
+let t = db.jstests_indexn;
t.drop();
t.save({a: 1, b: [1, 2]});
diff --git a/jstests/core/index/indexr.js b/jstests/core/index/indexr.js
index d3ff5f7e02ebd..a056d29281f12 100644
--- a/jstests/core/index/indexr.js
+++ b/jstests/core/index/indexr.js
@@ -4,7 +4,7 @@
// Check multikey index cases with parallel nested fields SERVER-958.
-t = db.jstests_indexr;
+let t = db.jstests_indexr;
t.drop();
// Check without indexes.
diff --git a/jstests/core/index/indexs.js b/jstests/core/index/indexs.js
index 6ee625071e2de..6cbabbb8f821b 100644
--- a/jstests/core/index/indexs.js
+++ b/jstests/core/index/indexs.js
@@ -1,7 +1,7 @@
// Test index key generation issue with parent and nested fields in same index and array containing
// subobject SERVER-3005.
-t = db.jstests_indexs;
+let t = db.jstests_indexs;
t.drop();
t.createIndex({a: 1});
diff --git a/jstests/core/index/indext.js b/jstests/core/index/indext.js
index 1ac92d6be5d02..13e0ce0fb57f5 100644
--- a/jstests/core/index/indext.js
+++ b/jstests/core/index/indext.js
@@ -1,6 +1,6 @@
// Sparse indexes with arrays SERVER-3216
-t = db.jstests_indext;
+let t = db.jstests_indext;
t.drop();
t.createIndex({'a.b': 1}, {sparse: true});
diff --git a/jstests/core/index/partial_index_optimization.js b/jstests/core/index/partial_index_optimization.js
new file mode 100644
index 0000000000000..0e8fa2ccd1a60
--- /dev/null
+++ b/jstests/core/index/partial_index_optimization.js
@@ -0,0 +1,225 @@
+/**
+ * Tests for classic query optimization with partial indices: do not generate fetch stage in the
+ * plan if a query predicate is satisfied by the filter expression of the chosen partial index. If
+ * the fetch phase is needed for another reason, make sure that the predicate is not in the fetch
+ * filter.
+ *
+ * @tags: [
+ * # the test conflicts with hidden wildcard indexes
+ * assumes_no_implicit_index_creation,
+ * does_not_support_stepdowns,
+ * multiversion_incompatible,
+ * requires_fcv_70,
+ * ]
+ */
+
+import {
+ assertFetchFilter,
+ assertNoFetchFilter,
+ assertStagesForExplainOfCommand,
+ getWinningPlan,
+ isCollscan,
+} from "jstests/libs/analyze_plan.js";
+
+function flagVal(n) {
+ return (n % 5 > 3) ? true : false;
+}
+
+function stateVal(n) {
+ const states = ["open", "closed", "unknown"];
+ return states[n % 3];
+}
+
+function getDocs(len, start = 0) {
+ return Array.from({length: len}, (_, i) => ({
+ _id: start + i,
+ a: i,
+ b: i + 3,
+ c: [i, i + 5],
+ flag: flagVal(i),
+ state: stateVal(i),
+ array: [{a: i, state: stateVal(i)}, {b: i}]
+ }));
+}
+
+const coll = db.partial_index_opt;
+coll.drop();
+assert.commandWorked(coll.insertMany(getDocs(100)));
+assert.commandWorked(coll.insertMany([
+ {
+ _id: 100,
+ a: 100,
+ state: "open",
+ array: [{a: 100, state: "closed"}, {a: 101, state: "closed"}]
+ },
+ {_id: 101, a: 101, state: "open", array: [{a: 101, state: "open"}]},
+ {_id: 102, a: 102, state: "closed", array: [{a: 102, state: "open"}]}
+]));
+
+const expectedStagesCount = ["COUNT", "COUNT_SCAN"];
+
+assert.commandWorked(coll.createIndex({a: 1}, {"partialFilterExpression": {flag: true}}));
+let cmdObj = {find: coll.getName(), filter: {flag: true, a: 4}, projection: {_id: 0, a: 1}};
+assertNoFetchFilter({coll: coll, cmdObj: cmdObj});
+
+// The following plan has a fetch phase because of the projection, but no filter on it.
+cmdObj = {
+ find: coll.getName(),
+ filter: {flag: true, a: 4},
+ projection: {a: 1}
+};
+assertNoFetchFilter({coll: coll, cmdObj: cmdObj});
+
+// Count command.
+cmdObj = {
+ count: coll.getName(),
+ query: {flag: true, a: 4}
+};
+assertStagesForExplainOfCommand({
+ coll: coll,
+ cmdObj: cmdObj,
+ expectedStages: expectedStagesCount,
+ stagesNotExpected: ["FETCH"]
+});
+
+// Partial index with filter expression with conjunction.
+assert.commandWorked(coll.createIndex(
+ {a: 1}, {name: "a_1_range", "partialFilterExpression": {a: {$gte: 20, $lte: 40}}}));
+cmdObj = {
+ find: coll.getName(),
+ filter: {a: {$gte: 20, $lte: 40}},
+ projection: {_id: 0, a: 1}
+};
+assertNoFetchFilter({coll: coll, cmdObj: cmdObj});
+
+cmdObj = {
+ find: coll.getName(),
+ filter: {a: {$gte: 25, $lte: 30}},
+ projection: {_id: 0, a: 1}
+};
+assertNoFetchFilter({coll: coll, cmdObj: cmdObj});
+
+// Partial index with compound key.
+assert.commandWorked(coll.createIndex({a: 1, b: 1}, {"partialFilterExpression": {flag: true}}));
+cmdObj = {
+ find: coll.getName(),
+ filter: {a: {$gte: 50}, b: {$in: [55, 57, 59, 62]}, flag: true},
+ projection: {_id: 0, a: 1, b: 1}
+};
+assertNoFetchFilter({coll: coll, cmdObj: cmdObj});
+
+// Filter expression with conjunction on multiple fields.
+assert.commandWorked(coll.createIndex(
+ {b: 1}, {name: "b_1_state_open", "partialFilterExpression": {state: "open", b: {$gt: 50}}}));
+
+cmdObj = {
+ find: coll.getName(),
+ filter: {state: "open", b: {$gt: 80}},
+ projection: {_id: 0, b: 1}
+};
+assertNoFetchFilter({coll: coll, cmdObj: cmdObj});
+
+cmdObj = {
+ count: coll.getName(),
+ query: {state: "open", b: {$gt: 50}}
+};
+assertStagesForExplainOfCommand({
+ coll: coll,
+ cmdObj: cmdObj,
+ expectedStages: expectedStagesCount,
+ stagesNotExpected: ["FETCH"]
+});
+
+// Index filter expression with $exists.
+assert.commandWorked(coll.createIndex(
+ {a: 1}, {name: "a_1_b_exists", "partialFilterExpression": {b: {$exists: true}}}));
+
+cmdObj = {
+ find: coll.getName(),
+ filter: {a: {$gte: 90}, b: {$exists: true}},
+ projection: {_id: 0, a: 1}
+};
+assertNoFetchFilter({coll: coll, cmdObj: cmdObj});
+
+// Filter expression in a multi-key index.
+assert.commandWorked(
+ coll.createIndex({c: 1}, {name: "c_1_a", partialFilterExpression: {a: {$lte: 30}}}));
+
+cmdObj = {
+ count: coll.getName(),
+ query: {c: {$lte: 50}, a: {$lte: 30}}
+};
+assertStagesForExplainOfCommand({
+ coll: coll,
+ cmdObj: cmdObj,
+ expectedStages: expectedStagesCount,
+ stagesNotExpected: ["FETCH"]
+});
+
+// The following plan has a fetch phase, but no filter on 'a'.
+cmdObj = {
+ find: coll.getName(),
+ filter: {c: {$lte: 50}, a: {$lte: 30}},
+ projection: {_id: 0, c: 1}
+};
+assertNoFetchFilter({coll: coll, cmdObj: cmdObj});
+
+// Test that the same filter expression under $elemMatch will not be removed from the fetch filter.
+assert.commandWorked(
+ coll.createIndex({a: 1}, {name: "a_1_state_open", "partialFilterExpression": {state: "open"}}));
+
+let predicate = {
+ a: {$gte: 100},
+ state: "open",
+ array: {$elemMatch: {$and: [{a: {$gte: 100}}, {state: "open"}]}}
+};
+let fetchFilter = {
+ "array": {"$elemMatch": {"$and": [{"a": {"$gte": 100}}, {"state": {"$eq": "open"}}]}}
+};
+assertFetchFilter({coll: coll, predicate: predicate, expectedFilter: fetchFilter, nReturned: 1});
+
+// Index on $elemMatch predicate. Test that the index filter predicate is removed from the fetch
+// filter while $elemMatch predicate is preserved.
+assert.commandWorked(coll.createIndex(
+ {"array.a": 1}, {name: "array_a_1_state_open", "partialFilterExpression": {state: "open"}}));
+
+predicate = {
+ state: "open",
+ array: {$elemMatch: {$and: [{a: {$gte: 100}}, {state: "open"}]}}
+};
+fetchFilter = {
+ "array": {"$elemMatch": {"$and": [{"a": {"$gte": 100}}, {"state": {"$eq": "open"}}]}}
+};
+assertFetchFilter({coll: coll, predicate: predicate, expectedFilter: fetchFilter, nReturned: 1});
+
+// Test for index filter expression over nested field.
+assert.commandWorked(coll.createIndex(
+ {"array.a": 1},
+ {name: "array_a_1_array_state_open", "partialFilterExpression": {"array.state": "open"}}));
+
+cmdObj = {
+ find: coll.getName(),
+ filter: {$and: [{"array.a": {$gte: 100}}, {"array.state": "open"}]},
+ projection: {_id: 0, array: 1}
+};
+assertNoFetchFilter({coll: coll, cmdObj: cmdObj});
+
+// Tests that the query predicate is not removed if it is a subset of an $or index filter.
+assert.commandWorked(coll.createIndex(
+ {a: 1}, {name: "a_1_or", "partialFilterExpression": {$or: [{b: {$gte: 80}}, {flag: "true"}]}}));
+
+predicate = {
+ $and: [{a: {$gte: 75}}, {b: {$gte: 80}}]
+};
+fetchFilter = {
+ "b": {"$gte": 80}
+};
+assertFetchFilter({coll: coll, predicate: predicate, expectedFilter: fetchFilter, nReturned: 23});
+
+// Possible optimization: the following query could use a bounded index scan on 'a' and remove the
+// $or sub-predicate as it is covered by the partial index filter. Currently, the index is not
+// considered and a collection scan is used instead.
+const exp =
+ coll.find({$and: [{a: {$gte: 90}}, {$or: [{b: {$gte: 80}}, {flag: "true"}]}]}).explain();
+assert(isCollscan(db, exp),
+ "Expected collection scan, got " + tojson(getWinningPlan(exp.queryPlanner)));
diff --git a/jstests/core/index/sparse_index_internal_expr.js b/jstests/core/index/sparse_index_internal_expr.js
index 9ac47a5c1006f..2f1c6f58a2064 100644
--- a/jstests/core/index/sparse_index_internal_expr.js
+++ b/jstests/core/index/sparse_index_internal_expr.js
@@ -5,13 +5,11 @@
*
* @tags: [
* multiversion_incompatible,
+ * does_not_support_transaction,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
const coll = db.sparse_index_internal_expr;
coll.drop();
@@ -47,7 +45,8 @@ assert.docEq(res[0], {a: 1});
// Drop the non-sparse index and create a sparse index with the same key pattern.
assert.commandWorked(coll.dropIndex("missing_1"));
-assert.commandWorked(coll.createIndex({'missing': 1}, {'sparse': true}));
+assert.commandWorked(
+ coll.createIndex({'missing': 1}, {'sparse': true, 'name': 'missing_1_sparse'}));
// Run the same query to test that a COLLSCAN plan is used rather than an indexed plan.
const collScans =
@@ -58,13 +57,12 @@ assert.gt(collScans.length, 0, collScans);
// Test that a sparse index can be hinted to answer $expr query but incomplete results in returned,
// because the document is not indexed by the sparse index.
-res = coll.find(exprQuery, {_id: 0}).hint("missing_1").toArray();
+res = coll.find(exprQuery, {_id: 0}).hint("missing_1_sparse").toArray();
assert.eq(res.length, 0);
ixScans = getPlanStages(
- getWinningPlan(coll.find(exprQuery).hint("missing_1").explain().queryPlanner), "IXSCAN");
+ getWinningPlan(coll.find(exprQuery).hint("missing_1_sparse").explain().queryPlanner), "IXSCAN");
assert.gt(ixScans.length, 0, ixScans);
-assert.eq("missing_1", ixScans[0].indexName, ixScans);
-assert.eq(true, ixScans[0].isSparse, ixScans);
-}());
+assert.eq("missing_1_sparse", ixScans[0].indexName, ixScans);
+assert.eq(true, ixScans[0].isSparse, ixScans);
\ No newline at end of file
diff --git a/jstests/core/index/useindexonobjgtlt.js b/jstests/core/index/useindexonobjgtlt.js
index 7b393de7c2fd7..bcdf36f3279f6 100644
--- a/jstests/core/index/useindexonobjgtlt.js
+++ b/jstests/core/index/useindexonobjgtlt.js
@@ -1,6 +1,6 @@
// @tags: [requires_fastcount]
-t = db.factories;
+let t = db.factories;
t.drop();
t.insert({name: "xyz", metro: {city: "New York", state: "NY"}});
t.createIndex({metro: 1});
@@ -9,4 +9,4 @@ assert(db.factories.find().count());
assert.eq(1, db.factories.find({metro: {city: "New York", state: "NY"}}).hint({metro: 1}).count());
-assert.eq(1, db.factories.find({metro: {$gte: {city: "New York"}}}).hint({metro: 1}).count());
\ No newline at end of file
+assert.eq(1, db.factories.find({metro: {$gte: {city: "New York"}}}).hint({metro: 1}).count());
diff --git a/jstests/core/index/wildcard/compound_wildcard_index_filter.js b/jstests/core/index/wildcard/compound_wildcard_index_filter.js
index c9f2324c116d4..508be239c4d5c 100644
--- a/jstests/core/index/wildcard/compound_wildcard_index_filter.js
+++ b/jstests/core/index/wildcard/compound_wildcard_index_filter.js
@@ -10,10 +10,7 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/wildcard_index_helpers.js");
+import {WildcardIndexHelpers} from "jstests/libs/wildcard_index_helpers.js";
/**
* Utility function to find an index filter by keyPattern or index name in the given filterList.
@@ -189,5 +186,4 @@ for (const cwiFilter of cwiFilterList) {
for (const cwiFilter of cwiFilterList) {
assertExpectedIndexAnswersQueryWithFilter(
coll, cwiFilter.query, [cwiFilter.keyPattern], cwiFilter.query, cwiFilter.indexName);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/compound_wildcard_index_hiding.js b/jstests/core/index/wildcard/compound_wildcard_index_hiding.js
index 968baeff12308..b5cbfe68c41ab 100644
--- a/jstests/core/index/wildcard/compound_wildcard_index_hiding.js
+++ b/jstests/core/index/wildcard/compound_wildcard_index_hiding.js
@@ -5,16 +5,13 @@
* @tags: [
* not_allowed_with_security_token,
* does_not_support_stepdowns,
+ * does_not_support_transaction,
* featureFlagCompoundWildcardIndexes,
* requires_fcv_70,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/wildcard_index_helpers.js");
+import {WildcardIndexHelpers} from "jstests/libs/wildcard_index_helpers.js";
const collectionName = "compound_wildcard_index_hiding";
const cwiList = [
@@ -50,17 +47,6 @@ const cwiList = [
},
];
-function validateIndex(coll, indexSpec) {
- const index = findIndex(coll, indexSpec);
- assert.neq(null, index);
-
- if (indexSpec.hidden) {
- assert.eq(true, index.hidden);
- } else {
- assert.neq(true, index.hidden);
- }
-}
-
function setIndexVisibilityByKeyPattern(collectionName, keyPattern, hidden) {
assert.commandWorked(db.runCommand({collMod: collectionName, index: {keyPattern, hidden}}));
}
@@ -168,5 +154,4 @@ testCompoundWildcardIndexesHiding(cwiList, collectionName);
/////////////////////////////////////////////////////////////////////////
// 3. Test that queries do not use hidden Compound Wildcard Indexes.
-assertHiddenIndexesIsNotUsed(cwiList, collectionName);
-})();
+assertHiddenIndexesIsNotUsed(cwiList, collectionName);
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/compound_wildcard_index_hint.js b/jstests/core/index/wildcard/compound_wildcard_index_hint.js
index 552b385f5bb79..e15059dc6bcfd 100644
--- a/jstests/core/index/wildcard/compound_wildcard_index_hint.js
+++ b/jstests/core/index/wildcard/compound_wildcard_index_hint.js
@@ -7,9 +7,7 @@
* ]
*/
-(function() {
-
-load("jstests/libs/wildcard_index_helpers.js");
+import {WildcardIndexHelpers} from "jstests/libs/wildcard_index_helpers.js";
const cwiList = [
// Note: 'wildcardProjection' cannot be specified if the wildcard field is not "$**".
@@ -74,5 +72,4 @@ for (const testCase of cwiList) {
const explain = assert.commandWorked(
coll.find(testCase.query).hint(testCase.keyPattern).explain('executionStats'));
WildcardIndexHelpers.assertExpectedIndexIsUsed(explain, testCase.indexName);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/compound_wildcard_index_or.js b/jstests/core/index/wildcard/compound_wildcard_index_or.js
index 1eea064888b1c..b479bd76fba39 100644
--- a/jstests/core/index/wildcard/compound_wildcard_index_or.js
+++ b/jstests/core/index/wildcard/compound_wildcard_index_or.js
@@ -11,11 +11,8 @@
* requires_fcv_70,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/utils.js"); // For arrayEq().
-load("jstests/libs/wildcard_index_helpers.js"); // For WildcardIndexHelpers.
+load("jstests/aggregation/extras/utils.js"); // For arrayEq().
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
const documentList = [
{
@@ -63,15 +60,15 @@ const idxResult = wild.aggregate(pipeline).toArray();
assertArrayEq({expected: documentList, actual: noIdxResult});
assertArrayEq({expected: noIdxResult, actual: idxResult});
-const explain = assert.commandWorked(wild.explain('executionStats').aggregate(pipeline));
+let explain = assert.commandWorked(wild.explain('executionStats').aggregate(pipeline));
// We want to make sure that the correct expanded CWI key pattern was used. The CWI,
// {"str": -1, "obj.obj.obj.obj.$**": -1}, could be expanded internally to two key patterns:
// 1) {"str": -1, "obj.obj.obj.obj.obj": -1} for predicates including "obj.obj.obj.obj.obj".
// 2) {"str": -1, "$_path": -1} for queries only on the prefix field 'str'.
// The latter key pattern should be used for the predicate with {"str": {$regex: /^Chicken/}}.
-const winningPlan = getWinningPlan(explain.queryPlanner);
-const planStages = getPlanStages(winningPlan, 'IXSCAN');
+let winningPlan = getWinningPlan(explain.queryPlanner);
+let planStages = getPlanStages(winningPlan, 'IXSCAN');
let idxUsedCnt = 0;
for (const stage of planStages) {
@@ -79,10 +76,11 @@ for (const stage of planStages) {
if (stage.indexName === "str_-1_obj.obj.obj.obj.$**_-1") {
idxUsedCnt++;
- // This key pattern should contain "$_path" rather than any specific field.
const expectedKeyPattern = {"str": -1, "$_path": 1};
assert.eq(stage.keyPattern, expectedKeyPattern, stage);
- assert.eq(stage.indexBounds["$_path"], ["[MinKey, MaxKey]"], stage);
+ // The index bounds of "$_path" should always be expanded to "all-value" bounds no matter
+ // whether the CWI's key pattern being expanded to a known field or not.
+ assert.eq(stage.indexBounds["$_path"], ["[MinKey, MinKey]", "[\"\", {})"], stage);
}
if (stage.indexName === "obj.obj.obj.$**_1") {
idxUsedCnt++;
@@ -99,5 +97,91 @@ for (const stage of planStages) {
assert.eq(stage.indexBounds["obj.obj.obj.obj.obj"], ["[MinKey, MaxKey]"], stage);
}
}
-assert.eq(idxUsedCnt, 2);
-})();
+assert.eq(idxUsedCnt, 2, winningPlan);
+
+// Test that two different CWI can be used to answer a $or query.
+const collTwoCWI = db[jsTestName() + "_wild_2"];
+const docs = [
+ {num: 1, sub: {num: 1, str: 'aa'}, str: '1'},
+ {num: 2, sub: {num: 2, str: 'bb'}, str: '2'},
+ {num: 3, sub: {num: 3, str: 'cc'}, str: '3'},
+];
+collTwoCWI.drop();
+assert.commandWorked(collTwoCWI.insertMany(docs));
+assert.commandWorked(collTwoCWI.createIndexes([{num: 1, "sub.$**": 1}, {"sub.$**": 1, num: 1}]));
+
+explain = assert.commandWorked(
+ collTwoCWI.find({$or: [{num: {$gte: 1}}, {'sub.str': 'aa'}]}).explain("executionStats"));
+winningPlan = getWinningPlan(explain.queryPlanner);
+planStages = getPlanStages(winningPlan, 'IXSCAN');
+
+idxUsedCnt = 0;
+for (const stage of planStages) {
+ assert(stage.hasOwnProperty('indexName'), stage);
+ if (stage.indexName === "sub.$**_1_num_1") {
+ idxUsedCnt++;
+
+ const expectedKeyPattern = {"$_path": 1, "sub.str": 1, "num": 1};
+ assert.eq(stage.keyPattern, expectedKeyPattern, stage);
+ // The "$_path" field shouldn't be expanded because this CWI is wildcard-field-prefixed.
+ assert.eq(stage.indexBounds["$_path"], ["[\"sub.str\", \"sub.str\"]"], stage);
+ }
+ if (stage.indexName === "num_1_sub.$**_1") {
+ idxUsedCnt++;
+
+ // The CWI used to answer a $or query should be expanded to a generic CWI with "$_path"
+ // field being the wildcard field.
+ const expectedKeyPattern = {"num": 1, "$_path": 1};
+ assert.eq(stage.keyPattern, expectedKeyPattern, stage);
+ assert.eq(stage.indexBounds["num"], ["[1.0, inf.0]"], stage);
+ // The CWI used to answer a $or query should be expanded to include all paths and all keys
+ // for the wildcard field.
+ assert.eq(stage.indexBounds["$_path"], ["[MinKey, MinKey]", "[\"\", {})"], stage);
+ }
+}
+assert.eq(idxUsedCnt, 2, winningPlan);
+
+collTwoCWI.dropIndexes();
+assert.commandWorked(collTwoCWI.createIndexes([{num: 1, "sub.$**": 1}, {str: 1, "sub.$**": 1}]));
+
+// Test a filter with nested $and under a $or.
+explain = assert.commandWorked(
+ collTwoCWI
+ .find({$or: [{$and: [{num: 1}, {"sub.num": {$gt: 4}}]}, {str: '1', "sub.num": {$lt: 10}}]})
+ .explain("executionStats"));
+winningPlan = getWinningPlan(explain.queryPlanner);
+planStages = getPlanStages(winningPlan, 'IXSCAN');
+
+idxUsedCnt = 0;
+for (const stage of planStages) {
+ assert(stage.hasOwnProperty('indexName'), stage);
+ if (stage.indexName === "num_1_sub.$**_1") {
+ idxUsedCnt++;
+
+ // If the IndexScan stage has a filter on field 'sub.num', then this CWI's key pattern
+ // cannot be overwritten.
+ if (stage.hasOwnProperty("filter") && stage["filter"].hasOwnProperty("sub.num")) {
+ const expectedKeyPattern = {"num": 1, "$_path": 1, "sub.num": 1};
+ assert.eq(stage.keyPattern, expectedKeyPattern, stage);
+ } else {
+ const expectedKeyPattern = {"num": 1, "$_path": 1};
+ assert.eq(stage.keyPattern, expectedKeyPattern, stage);
+ assert.eq(stage.indexBounds["$_path"], ["[MinKey, MinKey]", "[\"\", {})"], stage);
+ }
+ }
+ if (stage.indexName === "str_1_sub.$**_1") {
+ idxUsedCnt++;
+
+ // If the IndexScan stage has a filter on field 'sub.num', then this CWI's key pattern
+ // cannot be overwritten.
+ if (stage.hasOwnProperty("filter") && stage["filter"].hasOwnProperty("sub.num")) {
+ const expectedKeyPattern = {"num": 1, "$_path": 1, "sub.num": 1};
+ assert.eq(stage.keyPattern, expectedKeyPattern, stage);
+ } else {
+ const expectedKeyPattern = {"str": 1, "$_path": 1};
+ assert.eq(stage.keyPattern, expectedKeyPattern, stage);
+ assert.eq(stage.indexBounds["$_path"], ["[MinKey, MinKey]", "[\"\", {})"], stage);
+ }
+ }
+}
+assert.eq(idxUsedCnt, 2, winningPlan);
diff --git a/jstests/core/index/wildcard/compound_wildcard_index_prefix.js b/jstests/core/index/wildcard/compound_wildcard_index_prefix.js
index 41e98fe74157b..68077ae55f68f 100644
--- a/jstests/core/index/wildcard/compound_wildcard_index_prefix.js
+++ b/jstests/core/index/wildcard/compound_wildcard_index_prefix.js
@@ -9,12 +9,9 @@
* requires_fcv_70,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/utils.js"); // For arrayEq().
-load("jstests/libs/analyze_plan.js"); // For getPlanStages().
-load("jstests/libs/wildcard_index_helpers.js"); // For WildcardIndexHelpers.
+load("jstests/aggregation/extras/utils.js"); // For arrayEq().
+import {getWinningPlan, getPlanStages} from "jstests/libs/analyze_plan.js";
+import {WildcardIndexHelpers} from "jstests/libs/wildcard_index_helpers.js";
const coll = db.query_on_prefix_of_compound_wildcard_index;
@@ -98,5 +95,4 @@ for (const query of supportedQueries) {
const expected = coll.find(query).sort(sortOrder).hint({$natural: 1}).toArray();
assertArrayEq({actual, expected});
}
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/compound_wildcard_index_unbounded.js b/jstests/core/index/wildcard/compound_wildcard_index_unbounded.js
new file mode 100644
index 0000000000000..ed7d3399697ab
--- /dev/null
+++ b/jstests/core/index/wildcard/compound_wildcard_index_unbounded.js
@@ -0,0 +1,55 @@
+/**
+ * Tests compound wildcard index with unbounded scans including multikey metadata entries doesn't
+ * cause any errors.
+ *
+ * @tags: [
+ * featureFlagCompoundWildcardIndexes,
+ * requires_fcv_70,
+ * # explain does not support majority read concern
+ * assumes_read_concern_local,
+ * ]
+ */
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
+
+const coll = db.compound_wildcard_index_unbounded;
+coll.drop();
+const keyPattern = {
+ a: 1,
+ "$**": 1
+};
+const keyProjection = {
+ wildcardProjection: {a: 0}
+};
+assert.commandWorked(coll.createIndex(keyPattern, keyProjection));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+// Add an array field in order to make wildcard insert a multikey path metadata entry.
+assert.commandWorked(coll.insert({b: [1, 2]}));
+
+const query = {
+ a: {$exists: true}
+};
+const explain = coll.find(query).hint(keyPattern).explain('executionStats');
+const plan = getWinningPlan(explain.queryPlanner);
+const ixscans = getPlanStages(plan, "IXSCAN");
+// Asserting that we have unbounded index scans on $_path so that multikey metadata will also be
+// included in the scan.
+assert.gt(ixscans.length, 0, explain);
+ixscans.forEach(ixscan => {
+ assert.eq({a: 1, $_path: 1}, ixscan.keyPattern, explain);
+ assert.eq({a: ["[MinKey, MaxKey]"], $_path: ["[MinKey, MinKey]", "[\"\", {})"]},
+ ixscan.indexBounds,
+ explain);
+});
+
+const assertNoIndexCorruption = (executionStats) => {
+ if (typeof executionStats === 'object') {
+ if ("executionSuccess" in executionStats) {
+ // The execution should succeed rather than spot any index corruption.
+ assert.eq(true, executionStats.executionSuccess, explain);
+ }
+ assert.eq(executionStats.nReturned, 1, executionStats);
+ } else if (Array.isArray(executionStats)) {
+ executionStats.forEach(stats => assertNoIndexCorruption(stats));
+ }
+};
+assertNoIndexCorruption(explain.executionStats);
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/compound_wildcard_sort.js b/jstests/core/index/wildcard/compound_wildcard_sort.js
index a9b8a0b8adb4f..acdab8dfe6c32 100644
--- a/jstests/core/index/wildcard/compound_wildcard_sort.js
+++ b/jstests/core/index/wildcard/compound_wildcard_sort.js
@@ -5,17 +5,19 @@
* # We may choose a different plan if other indexes are created, which would break the test.
* assumes_no_implicit_index_creation,
* assumes_read_concern_local,
+ * # Some expected query plans require the multi-planner to choose the optimal plan that uses a
+ * # more efficient CWI (non-generic). Sharded suites could mislead the multi-planner to choose a
+ * # worse CWI because the planner may not run sufficient trials if there's no enough docs in some
+ * # shard.
+ * assumes_unsharded_collection,
* does_not_support_stepdowns,
* featureFlagCompoundWildcardIndexes,
* requires_fcv_70,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq().
-load("jstests/libs/analyze_plan.js"); // For getWinningPlan(), getPlanStages().
-load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection().
+import {getWinningPlan, getPlanStages} from "jstests/libs/analyze_plan.js";
+load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection().
const coll = db.compound_wildcard_sort;
coll.drop();
@@ -177,22 +179,18 @@ function testIndexesForWildcardField(wildcardField, subFields) {
const valid = getValidKeyPatternPrefixesForSort(keyPattern);
for (const kp of valid) {
- // CWI with regular prefix cannot provide blocking sort for sort orders containing the
- // wildcard field.
- if (!keyPattern.hasOwnProperty('pre')) {
- {
- // Test sort on compound fields + first wildcard field (number).
- const sort = replaceFieldWith(kp, wildcardField, [subFields[0]]);
- const wildFieldPred = {[subFields[0]]: {$lte: 43}};
- runSortTestForWildcardField({index: keyPattern, sort, wildFieldPred});
- }
+ {
+ // Test sort on compound fields + first wildcard field (number).
+ const sort = replaceFieldWith(kp, wildcardField, [subFields[0]]);
+ const wildFieldPred = {[subFields[0]]: {$lte: 43}};
+ runSortTestForWildcardField({index: keyPattern, sort, wildFieldPred});
+ }
- {
- // Test sort on compound fields + second wildcard field (string).
- const sort = replaceFieldWith(kp, wildcardField, [subFields[1]]);
- const wildFieldPred = {[subFields[1]]: {$gt: ""}};
- runSortTestForWildcardField({index: keyPattern, sort, wildFieldPred});
- }
+ {
+ // Test sort on compound fields + second wildcard field (string).
+ const sort = replaceFieldWith(kp, wildcardField, [subFields[1]]);
+ const wildFieldPred = {[subFields[1]]: {$gt: ""}};
+ runSortTestForWildcardField({index: keyPattern, sort, wildFieldPred});
}
{
@@ -217,5 +215,4 @@ function testIndexesForWildcardField(wildcardField, subFields) {
initializeDocs();
testIndexesForWildcardField("wild.$**", ["wild.num1", "wild.str1"]);
-testIndexesForWildcardField("$**", ["num2", "str2"]);
-})();
+testIndexesForWildcardField("$**", ["num2", "str2"]);
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_and_text_indexes.js b/jstests/core/index/wildcard/wildcard_and_text_indexes.js
index c3cb8ef0bff7d..6ea4026a21155 100644
--- a/jstests/core/index/wildcard/wildcard_and_text_indexes.js
+++ b/jstests/core/index/wildcard/wildcard_and_text_indexes.js
@@ -6,13 +6,15 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-load("jstests/libs/analyze_plan.js"); // For getPlanStages and planHasStage.
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
-load("jstests/libs/fixture_helpers.js"); // For isMongos.
+import {
+ getWinningPlan,
+ getPlanStages,
+ getRejectedPlans,
+ planHasStage
+} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+load("jstests/libs/fixture_helpers.js"); // For isMongos.
const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
@@ -78,9 +80,6 @@ if (allowCompoundWildcardIndexes) {
assertWildcardQuery({_fts: {$gt: 0, $lt: 4}}, {'_fts': 1}, false /* isCompound */);
if (allowCompoundWildcardIndexes) {
- // The expanded CWI key pattern shouldn't have '_fts'. The query is a $and query and 'pre' field
- // is the prefix of the CWI, so it's basically a query on the non-wildcard prefix field of a
- // CWI. The only eligible expanded CWI is with key pattern {"pre": 1, "$_path": 1}.
assertWildcardQuery({_fts: 10, pre: 1}, {'pre': 1, '$_path': 1}, true /* isCompound */);
}
@@ -128,5 +127,4 @@ for (let textIndex of [{'$**': 'text'}, {a: 1, '$**': 'text'}]) {
// Drop the index so that a different text index can be created.
assert.commandWorked(coll.dropIndex("textIndex"));
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_and_with_not.js b/jstests/core/index/wildcard/wildcard_and_with_not.js
index 1571ef56c94d6..1e3935e369976 100644
--- a/jstests/core/index/wildcard/wildcard_and_with_not.js
+++ b/jstests/core/index/wildcard/wildcard_and_with_not.js
@@ -12,15 +12,13 @@
* does_not_support_transactions,
* featureFlagCompoundWildcardIndexes,
* requires_fcv_70,
+ * references_foreign_collection,
* ]
*/
-(function() {
-'use strict';
-
load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
-load("jstests/libs/analyze_plan.js"); // For getWinningPlan(), getAggPlanStages().
-load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection().
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
+load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection().
const documentList = [
{
@@ -130,5 +128,4 @@ testAndMatches(false /* useCollScan */);
// Create a compound wildcard index with obj.date as a suffix (always ineligible).
assert.commandWorked(that.dropIndexes());
assert.commandWorked(that.createIndex({"obj.obj.obj.$**": 1, "obj.date": 1}, {}));
-testAndMatches(true /* useCollScan */);
-})();
+testAndMatches(true /* useCollScan */);
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_index_basic_index_bounds.js b/jstests/core/index/wildcard/wildcard_index_basic_index_bounds.js
index c9ef6bfa50210..fd02c968030ef 100644
--- a/jstests/core/index/wildcard/wildcard_index_basic_index_bounds.js
+++ b/jstests/core/index/wildcard/wildcard_index_basic_index_bounds.js
@@ -7,14 +7,22 @@
* @tags: [
* assumes_balancer_off,
* does_not_support_stepdowns,
+ * # Some expected index bounds require the multi-planner to choose the optimal plan that uses a
+ * # more efficient CWI (non-generic). Sharded suites could mislead the multi-planner to choose a
+ * # worse CWI because the planner may not run sufficient trials if there's no enough docs in some
+ * # shard.
+ * assumes_unsharded_collection,
+ * featureFlagCompoundWildcardIndexes,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
-load("jstests/libs/fixture_helpers.js"); // For isMongos and numberOfShardsForCollection.
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {
+ getPlanStages,
+ getRejectedPlan,
+ getRejectedPlans,
+ getWinningPlan
+} from "jstests/libs/analyze_plan.js";
+load("jstests/libs/fixture_helpers.js"); // For isMongos and numberOfShardsForCollection.
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
// Asserts that the given cursors produce identical result sets.
function assertResultsEq(cursor1, cursor2) {
@@ -35,17 +43,19 @@ const allowCompoundWildcardIndexes =
// Template document which defines the 'schema' of the documents in the test collection.
const templateDoc = {
a: 0,
- b: {c: 0, d: {e: 0}, f: {}}
+ b: {c: 0, d: {e: 0, g: 1}, f: {}, arr: [1]}
};
const pathList = ['a', 'b.c', 'b.d.e', 'b.f'];
// Insert a set of documents into the collection, based on the template document and populated
// with an increasing sequence of values. This is to ensure that the range of values present for
// each field in the dataset is not entirely homogeneous.
-for (let i = 0; i < 10; i++) {
+for (let i = 0; i < 100; i++) {
(function populateDoc(doc, value) {
for (let key in doc) {
- if (typeof doc[key] === 'object')
+ if (Array.isArray(doc[key])) {
+ doc[key].push(value);
+ } else if (typeof doc[key] === 'object')
value = populateDoc(doc[key], value);
else
doc[key] = value++;
@@ -88,34 +98,39 @@ const operationList = [
// In principle we could have tighter bounds for this. See SERVER-36765.
{expression: {$eq: null, $exists: true}, bounds: ['[MinKey, MaxKey]'], subpathBounds: true},
{expression: {$eq: []}, bounds: ['[undefined, undefined]', '[[], []]']},
-
];
// Operations for compound wildcard indexes.
const operationListCompound = [
{
query: {'a': 3, 'b.c': {$gte: 3}},
- bounds: {'a': ['[3.0, 3.0]'], '$_path': ['[MinKey, MaxKey]'], 'c': ['[MinKey, MaxKey]']},
- path: '$_path',
- expectedKeyPattern: {'a': 1, '$_path': 1, 'c': 1}
+ bounds: {'a': ['[3.0, 3.0]'], 'b.c': ['[3.0, inf.0]'], 'c': ['[MinKey, MaxKey]']},
+ path: 'b.c',
+ subpathBounds: false,
+ expectedKeyPattern: {'a': 1, '$_path': 1, 'b.c': 1, 'c': 1}
},
{
query: {'a': 3, 'b.c': {$gte: 3}, 'c': {$lt: 3}},
- bounds: {'a': ['[3.0, 3.0]'], '$_path': ['[MinKey, MaxKey]'], 'c': ['[MinKey, MaxKey]']},
- path: '$_path',
- expectedKeyPattern: {'a': 1, '$_path': 1, 'c': 1}
+ bounds: {'a': ['[3.0, 3.0]'], 'b.c': ['[3.0, inf.0]'], 'c': ['[-inf.0, 3.0)']},
+ path: 'b.c',
+ subpathBounds: false,
+ expectedKeyPattern: {'a': 1, '$_path': 1, 'b.c': 1, 'c': 1}
},
{
- query: {'a': 3, 'b.c': {$in: [1, 2]}},
- bounds: {'a': ['[3.0, 3.0]'], '$_path': ['[MinKey, MaxKey]'], 'c': ['[MinKey, MaxKey]']},
- path: '$_path',
+ query: {'a': 3, 'b.c': {$in: [1]}},
+ bounds: {'a': ['[3.0, 3.0]'], 'b.c': ['[1.0, 1.0]'], 'c': ['[MinKey, MaxKey]']},
+ path: 'b.c',
subpathBounds: false,
- expectedKeyPattern: {'a': 1, '$_path': 1, 'c': 1}
+ expectedKeyPattern: {'a': 1, '$_path': 1, 'b.c': 1, 'c': 1}
},
{
query: {'a': 3, 'b.c': {$exists: true}, 'c': {$lt: 3}},
- bounds: {'a': ['[3.0, 3.0]'], '$_path': ['[MinKey, MaxKey]'], 'c': ['[MinKey, MaxKey]']},
+ bounds: {
+ 'a': ['[3.0, 3.0]'],
+ '$_path': ["[MinKey, MinKey]", "[\"\", {})"],
+ 'c': ['[MinKey, MaxKey]']
+ },
path: '$_path',
subpathBounds: false,
expectedKeyPattern: {'a': 1, '$_path': 1, 'c': 1}
@@ -304,14 +319,17 @@ function runCompoundWildcardIndexTest(keyPattern, pathProjection) {
// Verify that the winning plan uses the compound wildcard index with the expected bounds.
assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
// Use "tojson()" in order to make ordering of fields matter.
- assert.docEq(tojson(op.expectedKeyPattern), tojson(ixScans[0].keyPattern));
- assert.docEq(tojson(expectedBounds), tojson(ixScans[0].indexBounds));
+ assert.docEq(tojson(op.expectedKeyPattern), tojson(ixScans[0].keyPattern), explainRes);
+ if (tojson(expectedBounds) != tojson(ixScans[0].indexBounds)) {
+ assert.docEq(expectedBounds, ixScans[0].indexBounds, explainRes);
+ }
// Verify that the results obtained from the compound wildcard index are identical to a
// COLLSCAN. We must explicitly hint the wildcard index, because we also sort on {_id: 1} to
// ensure that both result sets are in the same order.
assertResultsEq(coll.find(op.query).sort({_id: 1}).hint(keyPattern),
- coll.find(op.query).sort({_id: 1}).hint({$natural: 1}));
+ coll.find(op.query).sort({_id: 1}).hint({$natural: 1}),
+ explainRes);
}
}
@@ -338,4 +356,3 @@ runWildcardIndexTest({'$**': 1}, {a: 0, 'b.d': 0}, ['b.c', 'b.f']);
// Test a compound wildcard index.
runCompoundWildcardIndexTest({'a': 1, 'b.$**': 1, 'c': 1}, null);
runCompoundWildcardIndexTest({'a': 1, '$**': 1, 'c': 1}, {'a': 0, 'c': 0});
-})();
diff --git a/jstests/core/index/wildcard/wildcard_index_cached_plans.js b/jstests/core/index/wildcard/wildcard_index_cached_plans.js
index cc19b56a669c9..d9c961815bfbb 100644
--- a/jstests/core/index/wildcard/wildcard_index_cached_plans.js
+++ b/jstests/core/index/wildcard/wildcard_index_cached_plans.js
@@ -20,14 +20,17 @@
* assumes_no_implicit_index_creation,
* ]
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js'); // For getPlanStage().
+import {
+ getPlanCacheKeyFromExplain,
+ getPlanCacheKeyFromShape,
+ getPlanStage,
+ getPlanStages,
+ getWinningPlan,
+} from "jstests/libs/analyze_plan.js";
load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load('jstests/libs/fixture_helpers.js'); // For getPrimaryForNodeHostingDatabase and isMongos.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const coll = db.wildcard_cached_plans;
@@ -192,4 +195,3 @@ for (const indexSpec of wildcardIndexes) {
getPlanCacheKeyFromExplain(queryUnindexedExplain, db));
}
}
-})();
diff --git a/jstests/core/index/wildcard/wildcard_index_collation.js b/jstests/core/index/wildcard/wildcard_index_collation.js
index 29be78dbc3579..e65e4c3327da5 100644
--- a/jstests/core/index/wildcard/wildcard_index_collation.js
+++ b/jstests/core/index/wildcard/wildcard_index_collation.js
@@ -12,12 +12,9 @@
* requires_non_retryable_writes,
* ]
*/
-(function() {
-"user strict";
-
-load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+load("jstests/aggregation/extras/utils.js"); // For arrayEq.
+import {getWinningPlan, getPlanStages, isIndexOnly} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load("jstests/libs/index_catalog_helpers.js"); // For IndexCatalogHelpers.
load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
load("jstests/libs/fixture_helpers.js"); // For isMongos.
@@ -141,4 +138,3 @@ for (const indexSpec of wildcardIndexes) {
coll = assertDropAndRecreateCollection(
db, "wildcard_collation", {collation: {locale: "en_US", strength: 1}});
}
-})();
diff --git a/jstests/core/index/wildcard/wildcard_index_count.js b/jstests/core/index/wildcard/wildcard_index_count.js
index 6be792353bba3..646ccfef4c823 100644
--- a/jstests/core/index/wildcard/wildcard_index_count.js
+++ b/jstests/core/index/wildcard/wildcard_index_count.js
@@ -8,11 +8,8 @@
// assumes_unsharded_collection,
// does_not_support_stepdowns,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {getAggPlanStage, getPlanStage, isCollscan} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const coll = db.wildcard_index_count;
coll.drop();
@@ -120,4 +117,3 @@ for (const indexSpec of wildcardIndexes) {
assert.commandWorked(coll.dropIndex(indexSpec.keyPattern));
}
-}());
diff --git a/jstests/core/index/wildcard/wildcard_index_covered_queries.js b/jstests/core/index/wildcard/wildcard_index_covered_queries.js
index 7cade95f88fc9..cfb62203e0217 100644
--- a/jstests/core/index/wildcard/wildcard_index_covered_queries.js
+++ b/jstests/core/index/wildcard/wildcard_index_covered_queries.js
@@ -10,12 +10,9 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-load("jstests/libs/analyze_plan.js"); // For getPlanStages and isIndexOnly.
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {getWinningPlan, getPlanStages, isIndexOnly} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const assertArrayEq = (l, r) => assert(arrayEq(l, r));
@@ -109,5 +106,4 @@ for (const indexSpec of wildcardIndexes) {
// Verify that predicates which produce inexact-fetch bounds are not covered by a $** index.
assertWildcardProvidesCoveredSolution(
{d: {$elemMatch: {$eq: 50}}}, {_id: 0, d: 1}, shouldFailToCover);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_index_dedup.js b/jstests/core/index/wildcard/wildcard_index_dedup.js
index da27f6d8f7873..accb038d5c79b 100644
--- a/jstests/core/index/wildcard/wildcard_index_dedup.js
+++ b/jstests/core/index/wildcard/wildcard_index_dedup.js
@@ -9,10 +9,7 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const coll = db.wildcard_index_dedup;
coll.drop();
@@ -50,5 +47,4 @@ if (allowCompoundWildcardIndexes) {
// Test compound wildcard indexes do not return duplicates.
assert.eq(1, coll.find({"a.c": {$exists: true}, post: 1}).hint(compoundKeyPattern).itcount());
assert.eq(1, coll.find({"a.h": {$exists: true}, post: 1}).hint(compoundKeyPattern).itcount());
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_index_distinct_scan.js b/jstests/core/index/wildcard/wildcard_index_distinct_scan.js
index 6d578a8fae385..407ffdf657e40 100644
--- a/jstests/core/index/wildcard/wildcard_index_distinct_scan.js
+++ b/jstests/core/index/wildcard/wildcard_index_distinct_scan.js
@@ -6,12 +6,9 @@
* no_selinux,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-load("jstests/libs/analyze_plan.js"); // For planHasStage and getPlanStages.
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {getWinningPlan, getPlanStages, planHasStage} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
@@ -224,5 +221,4 @@ for (let testCase of testCases) {
expectedResults: distinctValues,
expectedPath: null
});
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_index_dup_predicates.js b/jstests/core/index/wildcard/wildcard_index_dup_predicates.js
index 5ec6e6d1a6f8c..9afe35dd2811e 100644
--- a/jstests/core/index/wildcard/wildcard_index_dup_predicates.js
+++ b/jstests/core/index/wildcard/wildcard_index_dup_predicates.js
@@ -5,11 +5,8 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const coll = db.wildcard_index_dup_predicates;
coll.drop();
@@ -27,9 +24,9 @@ const allowCompoundWildcardIndexes =
// Inserts the given document and runs the given query to confirm that:
// (1) query matches the given document
// (2) the winning plan does a wildcard index scan
-function assertExpectedDocAnswersWildcardIndexQuery(doc, query, match, excludeCWI = false) {
+function assertExpectedDocAnswersWildcardIndexQuery(doc, query, match) {
for (const indexSpec of wildcardIndexes) {
- if ((!allowCompoundWildcardIndexes || excludeCWI) && indexSpec.wildcardProjection) {
+ if (!allowCompoundWildcardIndexes && indexSpec.wildcardProjection) {
continue;
}
coll.drop();
@@ -77,12 +74,11 @@ assertExpectedDocAnswersWildcardIndexQuery(
{a: {b: "foo"}}, {$and: [{a: {$gt: {}}}, {a: {$gt: {}}}, {"a.b": "foo"}]}, true);
assertExpectedDocAnswersWildcardIndexQuery(
- {a: {b: "foo"}}, {$and: [{a: {$ne: 3}}, {a: {$ne: 3}}, {"a.b": "foo"}]}, true, true);
+ {a: {b: "foo"}}, {$and: [{a: {$ne: 3}}, {a: {$ne: 3}}, {"a.b": "foo"}]}, true);
assertExpectedDocAnswersWildcardIndexQuery(
{a: {b: "foo"}},
{$and: [{a: {$nin: [3, 4, 5]}}, {a: {$nin: [3, 4, 5]}}, {"a.b": "foo"}]},
- true,
true);
assertExpectedDocAnswersWildcardIndexQuery(
@@ -92,4 +88,3 @@ assertExpectedDocAnswersWildcardIndexQuery(
{a: {b: "foo"}},
{$and: [{a: {$elemMatch: {$gt: {}}}}, {a: {$elemMatch: {$gt: {}}}}, {"a.b": "foo"}]},
false);
-})();
diff --git a/jstests/core/index/wildcard/wildcard_index_empty_arrays.js b/jstests/core/index/wildcard/wildcard_index_empty_arrays.js
index 2471ad1d40f42..e3b7e0b5bb2fd 100644
--- a/jstests/core/index/wildcard/wildcard_index_empty_arrays.js
+++ b/jstests/core/index/wildcard/wildcard_index_empty_arrays.js
@@ -5,11 +5,8 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const coll = db.wildcard_empty_arrays;
coll.drop();
@@ -57,5 +54,4 @@ for (const indexSpec of wildcardIndexes) {
// $** index matches empty array nested within an array.
assertArrayEq(coll.find({"b": []}, {_id: 0}).hint(indexSpec.keyPattern).toArray(),
[{a: 2, b: [[]], c: 1, d: 4}]);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_index_equality_to_empty_obj.js b/jstests/core/index/wildcard/wildcard_index_equality_to_empty_obj.js
index 861ecca09646b..b6b551759b44e 100644
--- a/jstests/core/index/wildcard/wildcard_index_equality_to_empty_obj.js
+++ b/jstests/core/index/wildcard/wildcard_index_equality_to_empty_obj.js
@@ -5,10 +5,7 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const coll = db.wildcard_index_equality_to_empty_obj;
coll.drop();
@@ -100,5 +97,4 @@ for (const indexSpec of wildcardIndexes) {
assert.eq(results,
coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray());
assert.eq(results, coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).toArray());
-}
-}());
+}
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_index_filter.js b/jstests/core/index/wildcard/wildcard_index_filter.js
index 879e245c42247..d9badc369a0dc 100644
--- a/jstests/core/index/wildcard/wildcard_index_filter.js
+++ b/jstests/core/index/wildcard/wildcard_index_filter.js
@@ -10,10 +10,8 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
-load("jstests/libs/analyze_plan.js");
load("jstests/libs/fixture_helpers.js"); // For 'isMongos()'.
const coll = db.wildcard_index_filter;
@@ -111,5 +109,4 @@ const indexAWildcard = {
assert.commandWorked(coll.createIndex(indexAWildcard));
// Filtering on a path specified $** index. Check that the $** is used over other indices.
-assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexAWildcard], {a: "a"}, "a.$**_1");
-})();
+assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexAWildcard], {a: "a"}, "a.$**_1");
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_index_hint.js b/jstests/core/index/wildcard/wildcard_index_hint.js
index 91e1cae3c5c02..2665f59b1e5d4 100644
--- a/jstests/core/index/wildcard/wildcard_index_hint.js
+++ b/jstests/core/index/wildcard/wildcard_index_hint.js
@@ -5,11 +5,8 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+import {getWinningPlan, getPlanStages} from "jstests/libs/analyze_plan.js";
const coll = db.wildcard_hint;
coll.drop();
@@ -97,5 +94,4 @@ assertExpectedIndexAnswersQueryWithHint(
// Hint a $** index by name.
assertExpectedIndexAnswersQueryWithHint(
- {"a": 1}, "$**_1", "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]);
-})();
+ {"a": 1}, "$**_1", "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]);
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_index_minmax.js b/jstests/core/index/wildcard/wildcard_index_minmax.js
index f1ee35669ef03..ecd13ecdf9f27 100644
--- a/jstests/core/index/wildcard/wildcard_index_minmax.js
+++ b/jstests/core/index/wildcard/wildcard_index_minmax.js
@@ -5,11 +5,8 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const coll = db.wildcard_index_minmax;
coll.drop();
@@ -103,5 +100,4 @@ for (const indexSpec of wildcardIndexes) {
// $** index does not interfere with valid min/max.
assertArrayEq(coll.find({}, {_id: 0}).min({"a": 0.5}).max({"a": 1.5}).hint({a: 1}).toArray(),
[{a: 1, b: 1}, {a: 1, b: 2}]);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_index_multikey.js b/jstests/core/index/wildcard/wildcard_index_multikey.js
index e91ea7223e4ff..b7d87fb324b2c 100644
--- a/jstests/core/index/wildcard/wildcard_index_multikey.js
+++ b/jstests/core/index/wildcard/wildcard_index_multikey.js
@@ -6,12 +6,9 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {getWinningPlan, getPlanStages} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r));
@@ -387,4 +384,3 @@ for (const indexSpec of wildcardIndexes) {
assertWildcardQuery({"a.3.4": {$exists: true}}, null, {"executionStats.nReturned": 0});
assertWildcardQuery({"a.3.4.b": {$exists: true}}, null, {"executionStats.nReturned": 0});
}
-}());
diff --git a/jstests/core/index/wildcard/wildcard_index_nonblocking_sort.js b/jstests/core/index/wildcard/wildcard_index_nonblocking_sort.js
index 7aa328d4e3ece..ac894b968fe20 100644
--- a/jstests/core/index/wildcard/wildcard_index_nonblocking_sort.js
+++ b/jstests/core/index/wildcard/wildcard_index_nonblocking_sort.js
@@ -3,13 +3,10 @@
// assumes_read_concern_local,
// does_not_support_stepdowns,
// ]
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For arrayEq().
-load("jstests/libs/analyze_plan.js"); // For getPlanStages().
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
-load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection().
+import {getWinningPlan, getPlanStages} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection().
// TODO SERVER-68303: Remove the feature flag and update corresponding tests.
const allowCompoundWildcardIndexes =
@@ -114,5 +111,4 @@ if (allowCompoundWildcardIndexes) {
runSortTests(dir, proj, {a: dir, excludedField: dir}, true /* isCompound */);
}
}
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_index_partial_index.js b/jstests/core/index/wildcard/wildcard_index_partial_index.js
index d07b03e919848..220399f428425 100644
--- a/jstests/core/index/wildcard/wildcard_index_partial_index.js
+++ b/jstests/core/index/wildcard/wildcard_index_partial_index.js
@@ -5,11 +5,8 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For isIxScan, isCollscan.
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {getWinningPlan, isCollscan, isIxscan} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const coll = db.wildcard_partial_index;
@@ -86,4 +83,3 @@ for (let i = 0; i < 2; ++i) {
// should match the document in the collection (but would fail to match if it incorrectly indexed
// the $eq:null predicate using the wildcard index).
assert.eq(1, coll.find({x: 1, y: null}).itcount());
-})();
diff --git a/jstests/core/index/wildcard/wildcard_index_projection.js b/jstests/core/index/wildcard/wildcard_index_projection.js
index b21ffa74afd34..d33a3f6d9bec4 100644
--- a/jstests/core/index/wildcard/wildcard_index_projection.js
+++ b/jstests/core/index/wildcard/wildcard_index_projection.js
@@ -8,11 +8,8 @@
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/fixture_helpers.js"); // For isMongos.
-load("jstests/libs/analyze_plan.js"); // For getRejectedPlan helper to analyze explain() output.
+import {getWinningPlan, getRejectedPlan} from "jstests/libs/analyze_plan.js";
const collName = jsTestName();
const coll = db[collName];
@@ -66,5 +63,4 @@ assert.eq(winningPlan.inputStage.keyPattern, {$_path: 1, _id: 1}, winningPlan.in
// Test that the results are correct.
const hintedResults = coll.find({_id: {$eq: 1}}).hint("$**_1").toArray();
assert.eq(hintedResults.length, 1, hintedResults);
-assert.eq(hintedResults[0]._id, 1, hintedResults);
-})();
+assert.eq(hintedResults[0]._id, 1, hintedResults);
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_index_type.js b/jstests/core/index/wildcard/wildcard_index_type.js
index 205a318bd92e3..966fbac59003c 100644
--- a/jstests/core/index/wildcard/wildcard_index_type.js
+++ b/jstests/core/index/wildcard/wildcard_index_type.js
@@ -5,11 +5,8 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const coll = db.wildcard_index_type;
coll.drop();
@@ -175,4 +172,3 @@ assertExpectedDocAnswersWildcardIndexQuery({a: new Date()}, {a: {$type: "date"}}
// A $type of 'timestamp' won't match a date value.
assertExpectedDocAnswersWildcardIndexQuery({a: new Date()}, {a: {$type: "timestamp"}}, false);
-})();
diff --git a/jstests/core/index/wildcard/wildcard_index_update.js b/jstests/core/index/wildcard/wildcard_index_update.js
index 9f0353ea7b27f..6964243a26320 100644
--- a/jstests/core/index/wildcard/wildcard_index_update.js
+++ b/jstests/core/index/wildcard/wildcard_index_update.js
@@ -4,13 +4,11 @@
* @tags: [
* requires_fcv_63,
* does_not_support_stepdowns,
+ * uses_full_validation,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const collName = jsTestName();
const coll = db[collName];
@@ -51,5 +49,4 @@ assert.commandWorked(coll.update({_id: 0}, {$set: {"pre": 1}}));
validate();
assert.commandWorked(coll.update({_id: 0}, {$set: {"other": 1}}));
-validate();
-})();
+validate();
\ No newline at end of file
diff --git a/jstests/core/index/wildcard/wildcard_index_validindex.js b/jstests/core/index/wildcard/wildcard_index_validindex.js
index 3a694e524eeea..c410fa9d024c3 100644
--- a/jstests/core/index/wildcard/wildcard_index_validindex.js
+++ b/jstests/core/index/wildcard/wildcard_index_validindex.js
@@ -4,14 +4,12 @@
* # Uses index building in background
* requires_background_index,
* does_not_support_stepdowns,
+ * does_not_support_transactions,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/index_catalog_helpers.js"); // For "IndexCatalogHelpers."
load("jstests/libs/collection_drop_recreate.js"); // For "assertDropCollection."
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const kCollectionName = "wildcard_validindex";
const coll = db.getCollection(kCollectionName);
@@ -32,12 +30,6 @@ IndexCatalogHelpers.createIndexAndVerifyWithDrop(coll, {"a.$**": 1}, {name: kInd
IndexCatalogHelpers.createIndexAndVerifyWithDrop(
coll, {"$**": 1}, {name: kIndexName, partialFilterExpression: {a: {"$gt": 0}}});
-// Can create a wildcard index with foreground & background construction.
-IndexCatalogHelpers.createIndexAndVerifyWithDrop(
- coll, {"$**": 1}, {background: false, name: kIndexName});
-IndexCatalogHelpers.createIndexAndVerifyWithDrop(
- coll, {"$**": 1}, {background: true, name: kIndexName});
-
// Can create a wildcard index with index level collation.
IndexCatalogHelpers.createIndexAndVerifyWithDrop(
coll, {"$**": 1}, {collation: {locale: "fr"}, name: kIndexName});
@@ -173,5 +165,4 @@ assert.commandFailedWithCode(
40414); // Need to specify 'unique'.
assert.commandFailedWithCode(
db.runCommand({create: clusteredCollName, clusteredIndex: {key: {"$**": 1}, unique: true}}),
- ErrorCodes.InvalidIndexSpecificationOption);
-})();
+ ErrorCodes.InvalidIndexSpecificationOption);
\ No newline at end of file
diff --git a/jstests/core/json1.js b/jstests/core/json1.js
index 731bef9fcdcf6..4f026e7ba32f5 100644
--- a/jstests/core/json1.js
+++ b/jstests/core/json1.js
@@ -1,8 +1,6 @@
-x = {
- quotes: "a\"b",
- nulls: null
-};
+let x = {quotes: "a\"b", nulls: null};
+let y;
eval("y = " + tojson(x));
assert.eq(tojson(x), tojson(y), "A");
assert.eq(typeof (x.nulls), typeof (y.nulls), "B");
@@ -79,4 +77,4 @@ x = {
assert.eq(
JSON.stringify(x),
- '{"data_binary":{"$binary":"VG8gYmUgb3Igbm90IHRvIGJlLi4uIFRoYXQgaXMgdGhlIHF1ZXN0aW9uLg==","$type":"00"},"data_timestamp":{"$timestamp":{"t":987654321,"i":0}},"data_regex":{"$regex":"^acme","$options":"i"},"data_oid":{"$oid":"579a70d9e249393f153b5bc1"},"data_ref":{"$ref":"test","$id":"579a70d9e249393f153b5bc1"},"data_minkey":{"$minKey":1},"data_maxkey":{"$maxKey":1},"data_numberlong":{"$numberLong":"12345"},"data_numberint":5,"data_numberdecimal":{"$numberDecimal":"3.14000000000000"}}');
\ No newline at end of file
+ '{"data_binary":{"$binary":"VG8gYmUgb3Igbm90IHRvIGJlLi4uIFRoYXQgaXMgdGhlIHF1ZXN0aW9uLg==","$type":"00"},"data_timestamp":{"$timestamp":{"t":987654321,"i":0}},"data_regex":{"$regex":"^acme","$options":"i"},"data_oid":{"$oid":"579a70d9e249393f153b5bc1"},"data_ref":{"$ref":"test","$id":"579a70d9e249393f153b5bc1"},"data_minkey":{"$minKey":1},"data_maxkey":{"$maxKey":1},"data_numberlong":{"$numberLong":"12345"},"data_numberint":5,"data_numberdecimal":{"$numberDecimal":"3.14000000000000"}}');
diff --git a/jstests/core/json_schema/json_schema.js b/jstests/core/json_schema/json_schema.js
index c1e063444989f..0b3c01fb5e6b1 100644
--- a/jstests/core/json_schema/json_schema.js
+++ b/jstests/core/json_schema/json_schema.js
@@ -7,11 +7,8 @@
/**
* Tests for JSON Schema document validation.
*/
-(function() {
-"use strict";
-
load("jstests/libs/assert_schema_match.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const isSBEEnabled = checkSBEEnabled(db);
@@ -343,5 +340,4 @@ if (!isSBEEnabled) {
assert.eq(1, coll.find({$_internalSchemaMaxProperties: 3, b: 2}).itcount());
}
assert.eq(1, coll.find({$alwaysTrue: 1, b: 2}).itcount());
-assert.eq(0, coll.find({$alwaysFalse: 1, b: 2}).itcount());
-}());
+assert.eq(0, coll.find({$alwaysFalse: 1, b: 2}).itcount());
\ No newline at end of file
diff --git a/jstests/core/json_schema/misc_validation.js b/jstests/core/json_schema/misc_validation.js
index 4821c0643f98c..7e95ed729ecfe 100644
--- a/jstests/core/json_schema/misc_validation.js
+++ b/jstests/core/json_schema/misc_validation.js
@@ -19,6 +19,7 @@
* requires_replication,
* # This test depends on hardcoded database name equality.
* tenant_migration_incompatible,
+ * references_foreign_collection,
* ]
*/
(function() {
@@ -38,8 +39,6 @@ assert.commandWorked(testDB.createCollection(testName));
const coll = testDB.getCollection(testName);
coll.drop();
-const isMongos = (testDB.runCommand("hello").msg === "isdbgrid");
-
// Test that $jsonSchema is rejected in an $elemMatch projection.
assert.throws(function() {
coll.find({}, {a: {$elemMatch: {$jsonSchema: {}}}}).itcount();
diff --git a/jstests/core/jssymbol.js b/jstests/core/jssymbol.js
index 714ea48870a2e..1f805feac0c38 100644
--- a/jstests/core/jssymbol.js
+++ b/jstests/core/jssymbol.js
@@ -2,6 +2,8 @@
//
// @tags: [
// no_selinux,
+// # TODO SERVER-77024 enable on sharded passthrough suites when orphans hook will be supported
+// assumes_unsharded_collection,
// ]
(function() {
@@ -16,9 +18,9 @@ assert(db[Symbol.species] != 1);
assert(db[Symbol.toPrimitive] != 1);
// Exercise Symbol.toPrimitive on BSON objects
-col1 = db.jssymbol_col;
+let col1 = db.jssymbol_col;
col1.insert({});
-a = db.getCollection("jssymbol_col").getIndexes()[0];
+let a = db.getCollection("jssymbol_col").getIndexes()[0];
assert(isNaN(+a));
assert(+a.v >= 1);
diff --git a/jstests/core/latch_analyzer.js b/jstests/core/latch_analyzer.js
index 73aa652c6c181..96c20d9c32083 100644
--- a/jstests/core/latch_analyzer.js
+++ b/jstests/core/latch_analyzer.js
@@ -1,7 +1,7 @@
/**
* Verify that the LatchAnalyzer is working to expectations
*
- * @tags: [multiversion_incompatible, no_selinux]
+ * @tags: [multiversion_incompatible, no_selinux, requires_latch_analyzer]
*/
(function() {
diff --git a/jstests/core/loadserverscripts.js b/jstests/core/loadserverscripts.js
index db2e15fb05bbd..4dfed2188ffdf 100644
--- a/jstests/core/loadserverscripts.js
+++ b/jstests/core/loadserverscripts.js
@@ -5,6 +5,9 @@
// uses_parallel_shell,
// # This test has statements that do not support non-local read concern.
// does_not_support_causal_consistency,
+// # DB.prototype.loadServerScripts does not behave as expected in module mode, and the SELinux
+// # test runner loads scripts with dynamic load.
+// no_selinux
// ]
// Test db.loadServerScripts()
diff --git a/jstests/core/logprocessdetails.js b/jstests/core/logprocessdetails.js
index 4c72f802262f5..fe9c8a99b1c75 100644
--- a/jstests/core/logprocessdetails.js
+++ b/jstests/core/logprocessdetails.js
@@ -14,7 +14,7 @@
* Checks an array for match against regex.
* Returns true if regex matches a string in the array
*/
-doesLogMatchRegex = function(logArray, regex) {
+let doesLogMatchRegex = function(logArray, regex) {
for (var i = (logArray.length - 1); i >= 0; i--) {
var regexInLine = regex.exec(logArray[i]);
if (regexInLine != null) {
@@ -24,7 +24,7 @@ doesLogMatchRegex = function(logArray, regex) {
return false;
};
-doTest = function() {
+let doTest = function() {
var log = db.adminCommand({getLog: 'global'});
// this regex will need to change if output changes
var re = new RegExp(".*conn.*options.*");
diff --git a/jstests/core/mod_overflow.js b/jstests/core/mod_overflow.js
index 77f0a68ef3b74..8481988b4038b 100644
--- a/jstests/core/mod_overflow.js
+++ b/jstests/core/mod_overflow.js
@@ -42,4 +42,4 @@ for (let divisor of [-1.0, NumberInt("-1"), NumberLong("-1"), NumberDecimal("-1"
.aggregate([{$project: {val: 1, modVal: {$mod: ["$val", divisor]}}}, {$sort: {_id: 1}}])
.toArray());
}
-})();
\ No newline at end of file
+})();
diff --git a/jstests/core/mr_single_reduce.js b/jstests/core/mr_single_reduce.js
index 371837e92ce4b..779f2f650e7c2 100644
--- a/jstests/core/mr_single_reduce.js
+++ b/jstests/core/mr_single_reduce.js
@@ -1,5 +1,7 @@
// The test runs commands that are not allowed with security token: mapReduce.
// @tags: [
+// # Step-down can cause mapReduce to fail.
+// does_not_support_stepdowns,
// not_allowed_with_security_token,
// # Uses mapReduce command.
// requires_scripting,
diff --git a/jstests/core/multi.js b/jstests/core/multi.js
index ac961ed7a9c69..58cb327f86f0e 100644
--- a/jstests/core/multi.js
+++ b/jstests/core/multi.js
@@ -1,4 +1,4 @@
-t = db.jstests_multi;
+let t = db.jstests_multi;
t.drop();
t.createIndex({a: 1});
diff --git a/jstests/core/multi2.js b/jstests/core/multi2.js
index 64473e4de987f..cf9dd924d5163 100644
--- a/jstests/core/multi2.js
+++ b/jstests/core/multi2.js
@@ -1,6 +1,6 @@
// @tags: [requires_fastcount]
-t = db.multi2;
+let t = db.multi2;
t.drop();
t.save({x: 1, a: [1]});
diff --git a/jstests/core/notablescan.js b/jstests/core/notablescan.js
index f9addc7809918..ff9a0b91f2d6b 100644
--- a/jstests/core/notablescan.js
+++ b/jstests/core/notablescan.js
@@ -16,7 +16,7 @@
// tenant_migration_incompatible,
// ]
-t = db.test_notablescan;
+let t = db.test_notablescan;
t.drop();
try {
diff --git a/jstests/core/notablescan_capped.js b/jstests/core/notablescan_capped.js
index cc7405ff88bae..43ac4d43f945f 100644
--- a/jstests/core/notablescan_capped.js
+++ b/jstests/core/notablescan_capped.js
@@ -17,14 +17,14 @@
// tenant_migration_incompatible,
// ]
-t = db.test_notablescan_capped;
+let t = db.test_notablescan_capped;
t.drop();
assert.commandWorked(db.createCollection(t.getName(), {capped: true, size: 100}));
try {
assert.commandWorked(db._adminCommand({setParameter: 1, notablescan: true}));
- err = assert.throws(function() {
+ let err = assert.throws(function() {
t.find({a: 1}).tailable(true).next();
});
assert.includes(err.toString(), "tailable");
diff --git a/jstests/core/opcounters_write_cmd.js b/jstests/core/opcounters_write_cmd.js
index 6c604a9348a36..e3ec59863fa27 100644
--- a/jstests/core/opcounters_write_cmd.js
+++ b/jstests/core/opcounters_write_cmd.js
@@ -4,6 +4,10 @@
// not_allowed_with_security_token,
// uses_multiple_connections,
// assumes_standalone_mongod,
+// # The config fuzzer may run logical session cache refreshes in the background, which modifies
+// # some serverStatus metrics read in this test.
+// does_not_support_config_fuzzer,
+// inspects_command_opcounters,
// does_not_support_repeated_reads,
// ]
diff --git a/jstests/core/operation_latency_histogram.js b/jstests/core/operation_latency_histogram.js
index cd220deae48ba..703d5da8f6bd7 100644
--- a/jstests/core/operation_latency_histogram.js
+++ b/jstests/core/operation_latency_histogram.js
@@ -27,6 +27,7 @@
(function() {
"use strict";
+load("jstests/libs/fixture_helpers.js");
load("jstests/libs/stats.js");
var name = "operationalLatencyHistogramTest";
@@ -145,8 +146,7 @@ lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
// Reindex (Only standalone mode supports the reIndex command.)
const hello = db.runCommand({hello: 1});
-const isMongos = (hello.msg === "isdbgrid");
-const isStandalone = !isMongos && !hello.hasOwnProperty('setName');
+const isStandalone = !FixtureHelpers.isMongos(db) && !hello.hasOwnProperty('setName');
if (isStandalone) {
assert.commandWorked(testColl.reIndex());
lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
diff --git a/jstests/core/optimized_match_explain.js b/jstests/core/optimized_match_explain.js
index bf4893519bdd9..dd5216a79bd7a 100644
--- a/jstests/core/optimized_match_explain.js
+++ b/jstests/core/optimized_match_explain.js
@@ -5,9 +5,7 @@
/**
* Tests that the explain output for $match reflects any optimizations.
*/
-(function() {
-"use strict";
-load("jstests/libs/analyze_plan.js");
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
const coll = db.match_explain;
coll.drop();
@@ -23,5 +21,4 @@ let explain = coll.explain().aggregate(
[{$sort: {b: -1}}, {$addFields: {c: {$mod: ["$a", 4]}}}, {$match: {$and: [{c: 1}]}}]);
assert.commandWorked(explain);
-assert.eq(getAggPlanStage(explain, "$match"), {$match: {c: {$eq: 1}}});
-}());
+assert.eq(getAggPlanStage(explain, "$match"), {$match: {c: {$eq: 1}}});
\ No newline at end of file
diff --git a/jstests/core/partialFilterExpression_with_geoWithin.js b/jstests/core/partialFilterExpression_with_geoWithin.js
index f3ce022636b63..4cc244b4e7ad9 100644
--- a/jstests/core/partialFilterExpression_with_geoWithin.js
+++ b/jstests/core/partialFilterExpression_with_geoWithin.js
@@ -1,10 +1,8 @@
// @tags: [requires_non_retryable_writes, requires_fcv_51]
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/feature_flag_util.js");
+import {getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
-(function() {
-"use strict";
const coll = db.partialFilterExpression_with_geoWithin;
coll.drop();
@@ -166,4 +164,3 @@ if (FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) {
// inside the limits of our polygon (or in other words, inside the UWS of Manhattan ).
assert.eq(results.length, 1);
}
-})();
diff --git a/jstests/core/query/add_skip_stage_before_fetch.js b/jstests/core/query/add_skip_stage_before_fetch.js
index bef29a795e6e6..4d5255198f358 100644
--- a/jstests/core/query/add_skip_stage_before_fetch.js
+++ b/jstests/core/query/add_skip_stage_before_fetch.js
@@ -8,10 +8,7 @@
// operations_longer_than_stepdown_interval_in_txns,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
const coll = db.add_skip_stage_before_fetch;
@@ -65,5 +62,4 @@ assert(isIndexOnly(db, explainResult.queryPlanner.winningPlan));
explainResult =
coll.find({a: 0, b: 2}).hint(testIndex).sort({d: 1}).skip(2400).explain("executionStats");
assert.gte(explainResult.executionStats.totalKeysExamined, 2500);
-assert.eq(explainResult.executionStats.totalDocsExamined, 2500);
-})();
+assert.eq(explainResult.executionStats.totalDocsExamined, 2500);
\ No newline at end of file
diff --git a/jstests/core/query/agg_hint.js b/jstests/core/query/agg_hint.js
index 8bc2748e228fa..071a6d165887e 100644
--- a/jstests/core/query/agg_hint.js
+++ b/jstests/core/query/agg_hint.js
@@ -11,10 +11,7 @@
// where agg execution differs from query. It also includes confirmation that hint works for find
// command against views, which is converted to a hinted aggregation on execution.
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStage.
+import {getAggPlanStage, getPlanStage} from "jstests/libs/analyze_plan.js";
const testDB = db.getSiblingDB("agg_hint");
assert.commandWorked(testDB.dropDatabase());
@@ -261,5 +258,4 @@ confirmCommandUsesIndex({
command: {count: "view", query: {x: 3}},
hintKeyPattern: {_id: 1},
expectedKeyPattern: {_id: 1}
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/query/all/all.js b/jstests/core/query/all/all.js
index a718e2615e8dd..9132c6f7c51ed 100644
--- a/jstests/core/query/all/all.js
+++ b/jstests/core/query/all/all.js
@@ -1,7 +1,7 @@
-t = db.jstests_all;
+let t = db.jstests_all;
t.drop();
-doTest = function() {
+let doTest = function() {
assert.commandWorked(t.save({a: [1, 2, 3]}));
assert.commandWorked(t.save({a: [1, 2, 4]}));
assert.commandWorked(t.save({a: [1, 8, 5]}));
diff --git a/jstests/core/query/all/all2.js b/jstests/core/query/all/all2.js
index b0c6d40cf68ef..738b5f746d001 100644
--- a/jstests/core/query/all/all2.js
+++ b/jstests/core/query/all/all2.js
@@ -1,11 +1,11 @@
-t = db.all2;
+let t = db.all2;
t.drop();
t.save({a: [{x: 1}, {x: 2}]});
t.save({a: [{x: 2}, {x: 3}]});
t.save({a: [{x: 3}, {x: 4}]});
-state = "no index";
+let state = "no index";
function check(n, q, e) {
assert.eq(n, t.find(q).count(), tojson(q) + " " + e + " count " + state);
diff --git a/jstests/core/query/all/all3.js b/jstests/core/query/all/all3.js
index 37cb6c690b24f..ee14e3a1102ef 100644
--- a/jstests/core/query/all/all3.js
+++ b/jstests/core/query/all/all3.js
@@ -2,7 +2,7 @@
// Check that $all matching null is consistent with $in - SERVER-3820
-t = db.jstests_all3;
+let t = db.jstests_all3;
t.drop();
t.save({});
diff --git a/jstests/core/query/all/all4.js b/jstests/core/query/all/all4.js
index eb97928949653..d3486b467ad4a 100644
--- a/jstests/core/query/all/all4.js
+++ b/jstests/core/query/all/all4.js
@@ -1,6 +1,6 @@
// Test $all/$elemMatch with missing field - SERVER-4492
-t = db.jstests_all4;
+let t = db.jstests_all4;
t.drop();
function checkQuery(query, val) {
diff --git a/jstests/core/query/all/all5.js b/jstests/core/query/all/all5.js
index a5faaa1767f0a..ff14671d52480 100644
--- a/jstests/core/query/all/all5.js
+++ b/jstests/core/query/all/all5.js
@@ -1,6 +1,6 @@
// Test $all/$elemMatch/null matching - SERVER-4517
-t = db.jstests_all5;
+let t = db.jstests_all5;
t.drop();
function checkMatch(doc) {
diff --git a/jstests/core/query/and/and.js b/jstests/core/query/and/and.js
index 20fd583c31fa5..c68f8381886a9 100644
--- a/jstests/core/query/and/and.js
+++ b/jstests/core/query/and/and.js
@@ -4,7 +4,7 @@
// requires_scripting
// ]
-t = db.jstests_and;
+let t = db.jstests_and;
t.drop();
t.save({a: [1, 2]});
diff --git a/jstests/core/query/and/and2.js b/jstests/core/query/and/and2.js
index 5a946c2cb76dc..79d4fc590c4fb 100644
--- a/jstests/core/query/and/and2.js
+++ b/jstests/core/query/and/and2.js
@@ -5,7 +5,7 @@
// Test dollar sign operator with $and SERVER-1089
-t = db.jstests_and2;
+let t = db.jstests_and2;
t.drop();
t.save({a: [1, 2]});
diff --git a/jstests/core/query/and/and3.js b/jstests/core/query/and/and3.js
index 5256237bc33d6..1dfecc87ffa51 100644
--- a/jstests/core/query/and/and3.js
+++ b/jstests/core/query/and/and3.js
@@ -6,7 +6,7 @@
// assumes_read_concern_local,
// ]
-t = db.jstests_and3;
+let t = db.jstests_and3;
t.drop();
t.save({a: 1});
diff --git a/jstests/core/query/and/andor.js b/jstests/core/query/and/andor.js
index 5bac12d83ca19..b8ae2df86a906 100644
--- a/jstests/core/query/and/andor.js
+++ b/jstests/core/query/and/andor.js
@@ -1,6 +1,6 @@
// SERVER-1089 Test and/or/nor nesting
-t = db.jstests_andor;
+let t = db.jstests_andor;
t.drop();
// not ok
@@ -10,7 +10,7 @@ function ok(q) {
t.save({a: 1});
-test = function() {
+let test = function() {
ok({a: 1});
ok({$and: [{a: 1}]});
diff --git a/jstests/core/query/array/array3.js b/jstests/core/query/array/array3.js
index 42acdfb6d3e54..16d03b880ed76 100644
--- a/jstests/core/query/array/array3.js
+++ b/jstests/core/query/array/array3.js
@@ -2,6 +2,6 @@
assert.eq(5, Array.sum([1, 4]), "A");
assert.eq(2.5, Array.avg([1, 4]), "B");
-arr = [2, 4, 4, 4, 5, 5, 7, 9];
+let arr = [2, 4, 4, 4, 5, 5, 7, 9];
assert.eq(5, Array.avg(arr), "C");
assert.eq(2, Array.stdDev(arr), "D");
diff --git a/jstests/core/query/array/array_match1.js b/jstests/core/query/array/array_match1.js
index 0c56e8d4c3484..e21d0a9e7c986 100644
--- a/jstests/core/query/array/array_match1.js
+++ b/jstests/core/query/array/array_match1.js
@@ -1,4 +1,4 @@
-t = db.array_match1;
+let t = db.array_match1;
t.drop();
t.insert({_id: 1, a: [5, 5]});
diff --git a/jstests/core/query/array/array_match2.js b/jstests/core/query/array/array_match2.js
index 3e0dde8f5f5dc..fa4034d5c3c9e 100644
--- a/jstests/core/query/array/array_match2.js
+++ b/jstests/core/query/array/array_match2.js
@@ -1,6 +1,6 @@
// @tags: [requires_non_retryable_writes]
-t = db.jstests_array_match2;
+let t = db.jstests_array_match2;
t.drop();
t.save({a: [{1: 4}, 5]});
diff --git a/jstests/core/query/array/array_match3.js b/jstests/core/query/array/array_match3.js
index 4990bdd90fde6..ad362e5e43bc1 100644
--- a/jstests/core/query/array/array_match3.js
+++ b/jstests/core/query/array/array_match3.js
@@ -1,6 +1,6 @@
// SERVER-2902 Test indexing of numerically referenced array elements.
-t = db.jstests_array_match3;
+let t = db.jstests_array_match3;
t.drop();
// Test matching numericallly referenced array element.
diff --git a/jstests/core/query/array/arrayfind1.js b/jstests/core/query/array/arrayfind1.js
index 4e9330549ae7b..b50ecae78097e 100644
--- a/jstests/core/query/array/arrayfind1.js
+++ b/jstests/core/query/array/arrayfind1.js
@@ -2,7 +2,7 @@
// requires_fastcount,
// ]
-t = db.arrayfind1;
+let t = db.arrayfind1;
t.drop();
t.save({a: [{x: 1}]});
diff --git a/jstests/core/query/array/arrayfind2.js b/jstests/core/query/array/arrayfind2.js
index 38de844f6f412..14eb2411e7c70 100644
--- a/jstests/core/query/array/arrayfind2.js
+++ b/jstests/core/query/array/arrayfind2.js
@@ -1,6 +1,6 @@
// @tags: [requires_fastcount]
-t = db.arrayfind2;
+let t = db.arrayfind2;
t.drop();
function go(prefix) {
diff --git a/jstests/core/query/array/arrayfind3.js b/jstests/core/query/array/arrayfind3.js
index 6dba0bf625ff0..619ed2e1046e4 100644
--- a/jstests/core/query/array/arrayfind3.js
+++ b/jstests/core/query/array/arrayfind3.js
@@ -1,4 +1,4 @@
-t = db.arrayfind3;
+let t = db.arrayfind3;
t.drop();
t.save({a: [1, 2]});
diff --git a/jstests/core/query/array/arrayfind4.js b/jstests/core/query/array/arrayfind4.js
index 2d7c0e0366844..231abee50dd77 100644
--- a/jstests/core/query/array/arrayfind4.js
+++ b/jstests/core/query/array/arrayfind4.js
@@ -4,7 +4,7 @@
// Test query empty array SERVER-2258
-t = db.jstests_arrayfind4;
+let t = db.jstests_arrayfind4;
t.drop();
t.save({a: []});
diff --git a/jstests/core/query/array/arrayfind5.js b/jstests/core/query/array/arrayfind5.js
index 004231e08939e..6238284f89a2a 100644
--- a/jstests/core/query/array/arrayfind5.js
+++ b/jstests/core/query/array/arrayfind5.js
@@ -4,7 +4,7 @@
// cqf_incompatible,
// ]
-t = db.jstests_arrayfind5;
+let t = db.jstests_arrayfind5;
t.drop();
function check(nullElemMatch) {
diff --git a/jstests/core/query/array/arrayfind6.js b/jstests/core/query/array/arrayfind6.js
index bd91859c9dc4e..2788fa6d07e5d 100644
--- a/jstests/core/query/array/arrayfind6.js
+++ b/jstests/core/query/array/arrayfind6.js
@@ -1,6 +1,6 @@
// Check index bound determination for $not:$elemMatch queries. SERVER-5740
-t = db.jstests_arrayfind6;
+let t = db.jstests_arrayfind6;
t.drop();
t.save({a: [{b: 1, c: 2}]});
diff --git a/jstests/core/query/array/arrayfind7.js b/jstests/core/query/array/arrayfind7.js
index be2061c5c0d92..2d3fe47179716 100644
--- a/jstests/core/query/array/arrayfind7.js
+++ b/jstests/core/query/array/arrayfind7.js
@@ -1,6 +1,6 @@
// Nested $elemMatch clauses. SERVER-5741
-t = db.jstests_arrayfind7;
+let t = db.jstests_arrayfind7;
t.drop();
t.save({a: [{b: [{c: 1, d: 2}]}]});
diff --git a/jstests/core/query/array/arrayfind9.js b/jstests/core/query/array/arrayfind9.js
index 5406ae8c1d1d3..88a781e999950 100644
--- a/jstests/core/query/array/arrayfind9.js
+++ b/jstests/core/query/array/arrayfind9.js
@@ -1,6 +1,6 @@
// Assorted $elemMatch behavior checks.
-t = db.jstests_arrayfind9;
+let t = db.jstests_arrayfind9;
t.drop();
// Top level field $elemMatch:$not matching
@@ -27,8 +27,8 @@ t.drop();
t.save({a: [{b: [0, 2]}]});
t.createIndex({a: 1});
t.createIndex({'a.b': 1});
-plans = [{$natural: 1}, {a: 1}, {'a.b': 1}];
-for (i in plans) {
- p = plans[i];
+let plans = [{$natural: 1}, {a: 1}, {'a.b': 1}];
+for (let i in plans) {
+ let p = plans[i];
assert.eq(1, t.find({a: {$elemMatch: {b: {$gte: 1, $lte: 1}}}}).hint(p).itcount());
}
diff --git a/jstests/core/query/array/arrayfinda.js b/jstests/core/query/array/arrayfinda.js
index 163af3d8d29ea..f79c7bf7a379c 100644
--- a/jstests/core/query/array/arrayfinda.js
+++ b/jstests/core/query/array/arrayfinda.js
@@ -1,6 +1,6 @@
// Assorted $elemMatch matching behavior checks.
-t = db.jstests_arrayfinda;
+let t = db.jstests_arrayfinda;
t.drop();
// $elemMatch only matches elements within arrays (a descriptive, not a normative test).
diff --git a/jstests/core/query/awaitdata_getmore_cmd.js b/jstests/core/query/awaitdata_getmore_cmd.js
index 5efcf6590efac..2deaa5a518984 100644
--- a/jstests/core/query/awaitdata_getmore_cmd.js
+++ b/jstests/core/query/awaitdata_getmore_cmd.js
@@ -19,39 +19,48 @@
load("jstests/libs/fixture_helpers.js");
load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
-var cmdRes;
-var cursorId;
-var defaultBatchSize = 101;
-var collName = 'await_data';
-var coll = db[collName];
+let collName = 'await_data_non_capped';
+let coll = db[collName];
// Create a non-capped collection with 10 documents.
+jsTestLog('Create a non-capped collection with 10 documents.');
coll.drop();
-for (var i = 0; i < 10; i++) {
- assert.commandWorked(coll.insert({a: i}));
+let docs = [];
+for (let i = 0; i < 10; i++) {
+ docs.push({a: i});
}
+assert.commandWorked(coll.insert(docs));
// Find with tailable flag set should fail for a non-capped collection.
-cmdRes = db.runCommand({find: collName, tailable: true});
+jsTestLog('Find with tailable flag set should fail for a non-capped collection.');
+let cmdRes = db.runCommand({find: collName, tailable: true});
assert.commandFailed(cmdRes);
// Should also fail in the non-capped case if both the tailable and awaitData flags are set.
+jsTestLog(
+ 'Should also fail in the non-capped case if both the tailable and awaitData flags are set.');
cmdRes = db.runCommand({find: collName, tailable: true, awaitData: true});
assert.commandFailed(cmdRes);
// With a non-existent collection, should succeed but return no data and a closed cursor.
+jsTestLog('With a non-existent collection, should succeed but return no data and a closed cursor.');
+collName = 'await_data_missing';
+coll = db[collName];
coll.drop();
cmdRes = assert.commandWorked(db.runCommand({find: collName, tailable: true}));
assert.eq(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.firstBatch.length, 0);
// Create a capped collection with 10 documents.
+jsTestLog('Create a capped collection with 10 documents.');
+collName = 'await_data'; // collection name must match parallel shell task.
+coll = db[collName];
+coll.drop();
assert.commandWorked(db.createCollection(collName, {capped: true, size: 2048}));
-for (var i = 0; i < 10; i++) {
- assert.commandWorked(coll.insert({a: i}));
-}
+assert.commandWorked(coll.insert(docs));
// GetMore should succeed if query has awaitData but no maxTimeMS is supplied.
+jsTestLog('getMore should succeed if query has awaitData but no maxTimeMS is supplied.');
cmdRes = db.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true});
assert.commandWorked(cmdRes);
assert.gt(cmdRes.cursor.id, NumberLong(0));
@@ -63,6 +72,7 @@ assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, coll.getFullName());
// Should also succeed if maxTimeMS is supplied on the original find.
+jsTestLog('Should also succeed if maxTimeMS is supplied on the original find.');
const sixtyMinutes = 60 * 60 * 1000;
cmdRes = db.runCommand(
{find: collName, batchSize: 2, awaitData: true, tailable: true, maxTimeMS: sixtyMinutes});
@@ -76,6 +86,7 @@ assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, coll.getFullName());
// Check that we can set up a tailable cursor over the capped collection.
+jsTestLog('Check that we can set up a tailable cursor over the capped collection.');
cmdRes = db.runCommand({find: collName, batchSize: 5, awaitData: true, tailable: true});
assert.commandWorked(cmdRes);
assert.gt(cmdRes.cursor.id, NumberLong(0));
@@ -84,6 +95,8 @@ assert.eq(cmdRes.cursor.firstBatch.length, 5);
// Check that tailing the capped collection with awaitData eventually ends up returning an empty
// batch after hitting the timeout.
+jsTestLog('Check that tailing the capped collection with awaitData eventually ends up returning ' +
+ 'an empty batch after hitting the timeout.');
cmdRes = db.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true});
assert.commandWorked(cmdRes);
assert.gt(cmdRes.cursor.id, NumberLong(0));
@@ -91,6 +104,7 @@ assert.eq(cmdRes.cursor.ns, coll.getFullName());
assert.eq(cmdRes.cursor.firstBatch.length, 2);
// Issue getMore until we get an empty batch of results.
+jsTestLog('Issue getMore until we get an empty batch of results.');
cmdRes = db.runCommand({
getMore: cmdRes.cursor.id,
collection: coll.getName(),
@@ -102,8 +116,10 @@ assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, coll.getFullName());
// Keep issuing getMore until we get an empty batch after the timeout expires.
+jsTestLog('Keep issuing getMore until we get an empty batch after the timeout expires.');
+let now;
while (cmdRes.cursor.nextBatch.length > 0) {
- var now = new Date();
+ now = new Date();
cmdRes = db.runCommand({
getMore: cmdRes.cursor.id,
collection: coll.getName(),
@@ -111,6 +127,7 @@ while (cmdRes.cursor.nextBatch.length > 0) {
maxTimeMS: 4000
});
assert.commandWorked(cmdRes);
+ jsTestLog('capped collection tailing cursor getMore: ' + now + ': ' + tojson(cmdRes));
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, coll.getFullName());
}
@@ -118,38 +135,55 @@ assert.gte((new Date()) - now, 2000);
// Repeat the test, this time tailing the oplog rather than a user-created capped collection.
// The oplog tailing in not possible on mongos.
+jsTestLog(
+ 'Repeat the test, this time tailing the oplog rather than a user-created capped collection.');
if (FixtureHelpers.isReplSet(db)) {
- var localDB = db.getSiblingDB("local");
- var oplogColl = localDB.oplog.rs;
+ const localDB = db.getSiblingDB("local");
+ const oplogColl = localDB.oplog.rs;
- cmdRes = localDB.runCommand(
- {find: oplogColl.getName(), batchSize: 2, awaitData: true, tailable: true});
+ jsTestLog('Check that tailing the oplog with awaitData eventually ends up returning ' +
+ 'an empty batch after hitting the timeout.');
+ cmdRes = localDB.runCommand({
+ find: oplogColl.getName(),
+ batchSize: 2,
+ awaitData: true,
+ tailable: true,
+ filter: {ns: {$ne: "config.system.sessions"}}
+ });
assert.commandWorked(cmdRes);
+ jsTestLog('Oplog tailing result: ' + tojson(cmdRes));
if (cmdRes.cursor.id > NumberLong(0)) {
assert.eq(cmdRes.cursor.ns, oplogColl.getFullName());
assert.eq(cmdRes.cursor.firstBatch.length, 2);
+ jsTestLog('Issue getMore on the oplog until we get an empty batch of results.');
cmdRes = localDB.runCommand(
{getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 1000});
assert.commandWorked(cmdRes);
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, oplogColl.getFullName());
- while (cmdRes.cursor.nextBatch.length > 0) {
+ jsTestLog('Keep issuing getMore on the oplog until we get an empty batch after the ' +
+ 'timeout expires.');
+ assert.soon(() => {
now = new Date();
cmdRes = localDB.runCommand(
{getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 4000});
assert.commandWorked(cmdRes);
+ jsTestLog('oplog tailing cursor getMore: ' + now + ': ' + tojson(cmdRes));
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, oplogColl.getFullName());
- }
+ return cmdRes.cursor.nextBatch.length == 0;
+ });
assert.gte((new Date()) - now, 2000);
}
}
-let originalCmdLogLevel = assert.commandWorked(db.setLogLevel(5, 'command')).was.command.verbosity;
-let originalQueryLogLevel = assert.commandWorked(db.setLogLevel(5, 'query')).was.query.verbosity;
+const originalCmdLogLevel =
+ assert.commandWorked(db.setLogLevel(5, 'command')).was.command.verbosity;
+const originalQueryLogLevel = assert.commandWorked(db.setLogLevel(5, 'query')).was.query.verbosity;
+jsTestLog('Test filtered inserts while writing to a capped collection.');
try {
// Test filtered inserts while writing to a capped collection.
// Find with a filter which doesn't match any documents in the collection.
diff --git a/jstests/core/query/basic1.js b/jstests/core/query/basic1.js
index cc2917fb5a32c..6ed22a1422cea 100644
--- a/jstests/core/query/basic1.js
+++ b/jstests/core/query/basic1.js
@@ -1,11 +1,9 @@
// @tags: [does_not_support_stepdowns]
-t = db.getCollection("basic1");
+let t = db.getCollection("basic1");
t.drop();
-o = {
- a: 1
-};
+let o = {a: 1};
t.save(o);
assert.eq(1, t.findOne().a, "first");
diff --git a/jstests/core/query/basic2.js b/jstests/core/query/basic2.js
index 3500d9fbdb5eb..d4501e3474fb0 100644
--- a/jstests/core/query/basic2.js
+++ b/jstests/core/query/basic2.js
@@ -3,12 +3,10 @@
// requires_non_retryable_writes,
// ]
-t = db.getCollection("basic2");
+let t = db.getCollection("basic2");
t.drop();
-o = {
- n: 2
-};
+let o = {n: 2};
t.save(o);
assert.eq(1, t.find().count());
diff --git a/jstests/core/query/basic4.js b/jstests/core/query/basic4.js
index 4b2cf6f96be7a..e94c5868c80f5 100644
--- a/jstests/core/query/basic4.js
+++ b/jstests/core/query/basic4.js
@@ -1,4 +1,4 @@
-t = db.getCollection("basic4");
+let t = db.getCollection("basic4");
t.drop();
t.save({a: 1, b: 1.0});
diff --git a/jstests/core/query/basic5.js b/jstests/core/query/basic5.js
index 7ec41ef7872e0..e1a0df35bbbeb 100644
--- a/jstests/core/query/basic5.js
+++ b/jstests/core/query/basic5.js
@@ -1,4 +1,4 @@
-t = db.getCollection("basic5");
+let t = db.getCollection("basic5");
t.drop();
t.save({a: 1, b: [1, 2, 3]});
diff --git a/jstests/core/query/basic6.js b/jstests/core/query/basic6.js
index 89aef4acc2e44..fb8d85b2f794e 100644
--- a/jstests/core/query/basic6.js
+++ b/jstests/core/query/basic6.js
@@ -4,7 +4,7 @@
* ]
*/
-t = db.basic6;
+let t = db.basic6;
t.findOne();
t.a.findOne();
diff --git a/jstests/core/query/basic7.js b/jstests/core/query/basic7.js
index dc6f18cc95bd6..334033803555f 100644
--- a/jstests/core/query/basic7.js
+++ b/jstests/core/query/basic7.js
@@ -1,5 +1,5 @@
-t = db.basic7;
+let t = db.basic7;
t.drop();
t.save({a: 1});
diff --git a/jstests/core/query/basic8.js b/jstests/core/query/basic8.js
index 4a35de6963b3f..58ab069fa3bb7 100644
--- a/jstests/core/query/basic8.js
+++ b/jstests/core/query/basic8.js
@@ -1,10 +1,10 @@
// @tags: [requires_fastcount]
-t = db.basic8;
+let t = db.basic8;
t.drop();
t.save({a: 1});
-o = t.findOne();
+let o = t.findOne();
o.b = 2;
t.save(o);
diff --git a/jstests/core/query/basica.js b/jstests/core/query/basica.js
index 1fe8b7c5de4f3..569b41ee90d10 100644
--- a/jstests/core/query/basica.js
+++ b/jstests/core/query/basica.js
@@ -1,11 +1,11 @@
-t = db.basica;
+let t = db.basica;
t.drop();
t.save({a: 1, b: [{x: 2, y: 2}, {x: 3, y: 3}]});
-x = t.findOne();
+let x = t.findOne();
x.b["0"].x = 4;
x.b["0"].z = 4;
x.b[0].m = 9;
diff --git a/jstests/core/query/bittest.js b/jstests/core/query/bittest.js
index a4a7272ae28fe..77aa3f46cab60 100644
--- a/jstests/core/query/bittest.js
+++ b/jstests/core/query/bittest.js
@@ -4,10 +4,7 @@
* assumes_read_concern_local,
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan, isCollscan} from "jstests/libs/analyze_plan.js";
var coll = db.jstests_bitwise;
@@ -217,5 +214,4 @@ assertQueryCorrect({
},
4);
-assert(coll.drop());
-})();
+assert(coll.drop());
\ No newline at end of file
diff --git a/jstests/core/query/count/count3.js b/jstests/core/query/count/count3.js
index d93df020f0b0d..51fb5d5d4c84b 100644
--- a/jstests/core/query/count/count3.js
+++ b/jstests/core/query/count/count3.js
@@ -1,5 +1,5 @@
-t = db.count3;
+let t = db.count3;
t.drop();
diff --git a/jstests/core/query/count/count5.js b/jstests/core/query/count/count5.js
index ceedf62b33625..81fd8786bfc91 100644
--- a/jstests/core/query/count/count5.js
+++ b/jstests/core/query/count/count5.js
@@ -1,15 +1,13 @@
// @tags: [requires_fastcount]
-t = db.count5;
+let t = db.count5;
t.drop();
-for (i = 0; i < 100; i++) {
+for (let i = 0; i < 100; i++) {
t.save({x: i});
}
-q = {
- x: {$gt: 25, $lte: 75}
-};
+let q = {x: {$gt: 25, $lte: 75}};
assert.eq(50, t.find(q).count(), "A");
assert.eq(50, t.find(q).itcount(), "B");
diff --git a/jstests/core/query/count/count6.js b/jstests/core/query/count/count6.js
index 78735c89c2659..8814b2b76a0fa 100644
--- a/jstests/core/query/count/count6.js
+++ b/jstests/core/query/count/count6.js
@@ -2,7 +2,7 @@
//
// @tags: [requires_fastcount]
-t = db.jstests_count6;
+let t = db.jstests_count6;
function checkCountForObject(obj) {
t.drop();
diff --git a/jstests/core/query/count/count7.js b/jstests/core/query/count/count7.js
index 443134474a868..cef47b46c2c04 100644
--- a/jstests/core/query/count/count7.js
+++ b/jstests/core/query/count/count7.js
@@ -2,7 +2,7 @@
// Check normal count matching and deduping.
-t = db.jstests_count7;
+let t = db.jstests_count7;
t.drop();
t.createIndex({a: 1});
diff --git a/jstests/core/query/count/count9.js b/jstests/core/query/count/count9.js
index 062f099e513de..250ada94cb1dd 100644
--- a/jstests/core/query/count/count9.js
+++ b/jstests/core/query/count/count9.js
@@ -1,6 +1,6 @@
// Test fast mode count with multikey entries.
-t = db.jstests_count9;
+let t = db.jstests_count9;
t.drop();
t.createIndex({a: 1});
diff --git a/jstests/core/query/count/countb.js b/jstests/core/query/count/countb.js
index 869825b25a1c2..cbce10c7e4e4b 100644
--- a/jstests/core/query/count/countb.js
+++ b/jstests/core/query/count/countb.js
@@ -4,7 +4,7 @@
// requires_scripting,
// ]
-t = db.jstests_countb;
+let t = db.jstests_countb;
t.drop();
t.createIndex({a: 1});
diff --git a/jstests/core/query/covered_multikey.js b/jstests/core/query/covered_multikey.js
index 9270a2b2c20b0..19bfd54c4f6b3 100644
--- a/jstests/core/query/covered_multikey.js
+++ b/jstests/core/query/covered_multikey.js
@@ -8,11 +8,8 @@
/**
* Test covering behavior for queries over a multikey index.
*/
-(function() {
-"use strict";
-
// For making assertions about explain output.
-load("jstests/libs/analyze_plan.js");
+import {getPlanStage, getWinningPlan, isIxscan, planHasStage} from "jstests/libs/analyze_plan.js";
let coll = db.covered_multikey;
coll.drop();
@@ -116,5 +113,4 @@ explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish();
winningPlan = getWinningPlan(explainRes.queryPlanner);
ixscanStage = getPlanStage(winningPlan, "IXSCAN");
assert.neq(null, ixscanStage);
-assert.eq(true, ixscanStage.isMultiKey);
-}());
+assert.eq(true, ixscanStage.isMultiKey);
\ No newline at end of file
diff --git a/jstests/core/query/cursor/aggregation_accepts_write_concern.js b/jstests/core/query/cursor/aggregation_accepts_write_concern.js
index 2c764414a1d7b..8117c296e03d5 100644
--- a/jstests/core/query/cursor/aggregation_accepts_write_concern.js
+++ b/jstests/core/query/cursor/aggregation_accepts_write_concern.js
@@ -1,7 +1,11 @@
/**
* Confirms that the aggregate command accepts writeConcern regardless of whether the pipeline
* writes or is read-only.
- * @tags: [assumes_write_concern_unchanged, does_not_support_stepdowns]
+ * @tags: [
+ * assumes_write_concern_unchanged,
+ * does_not_support_stepdowns,
+ * references_foreign_collection
+ * ]
*/
(function() {
"use strict";
diff --git a/jstests/core/query/cursor/cursor1.js b/jstests/core/query/cursor/cursor1.js
index 1cb00cc82fb0d..458787bf6d0c6 100644
--- a/jstests/core/query/cursor/cursor1.js
+++ b/jstests/core/query/cursor/cursor1.js
@@ -1,13 +1,13 @@
// @tags: [requires_getmore, requires_fastcount]
-t = db.cursor1;
+let t = db.cursor1;
t.drop();
-big = "";
+let big = "";
while (big.length < 50000)
big += "asdasdasdasdsdsdadsasdasdasD";
-num = Math.ceil(10000000 / big.length);
+let num = Math.ceil(10000000 / big.length);
for (var i = 0; i < num; i++) {
t.save({num: i, str: big});
diff --git a/jstests/core/query/cursor/cursor3.js b/jstests/core/query/cursor/cursor3.js
index 8e5672d4b3b27..acf1339c345b7 100644
--- a/jstests/core/query/cursor/cursor3.js
+++ b/jstests/core/query/cursor/cursor3.js
@@ -4,19 +4,19 @@
// assumes_read_concern_local,
// ]
-testNum = 1;
+let testNum = 1;
function checkResults(expected, cursor, testNum) {
assert.eq(expected.length,
cursor.count(),
"testNum: " + testNum + " A : " + tojson(cursor.toArray()) + " " +
tojson(cursor.explain()));
- for (i = 0; i < expected.length; ++i) {
+ for (let i = 0; i < expected.length; ++i) {
assert.eq(expected[i], cursor[i]["a"], "testNum: " + testNum + " B");
}
}
-t = db.cursor3;
+let t = db.cursor3;
t.drop();
t.save({a: 0});
diff --git a/jstests/core/query/cursor/cursor4.js b/jstests/core/query/cursor/cursor4.js
index d0440d329f1bf..32688ef7c558b 100644
--- a/jstests/core/query/cursor/cursor4.js
+++ b/jstests/core/query/cursor/cursor4.js
@@ -2,21 +2,21 @@
function checkResults(expected, cursor) {
assert.eq(expected.length, cursor.count());
- for (i = 0; i < expected.length; ++i) {
+ for (let i = 0; i < expected.length; ++i) {
assert.eq(expected[i].a, cursor[i].a);
assert.eq(expected[i].b, cursor[i].b);
}
}
function testConstrainedFindMultiFieldSorting(db) {
- r = db.ed_db_cursor4_cfmfs;
+ let r = db.ed_db_cursor4_cfmfs;
r.drop();
- entries = [{a: 0, b: 0}, {a: 0, b: 1}, {a: 1, b: 1}, {a: 1, b: 1}, {a: 2, b: 0}];
- for (i = 0; i < entries.length; ++i)
+ let entries = [{a: 0, b: 0}, {a: 0, b: 1}, {a: 1, b: 1}, {a: 1, b: 1}, {a: 2, b: 0}];
+ for (let i = 0; i < entries.length; ++i)
r.save(entries[i]);
r.createIndex({a: 1, b: 1});
- reverseEntries = entries.slice();
+ let reverseEntries = entries.slice();
reverseEntries.reverse();
checkResults(entries.slice(2, 4), r.find({a: 1, b: 1}).sort({a: 1, b: 1}).hint({a: 1, b: 1}));
diff --git a/jstests/core/query/cursor/cursor5.js b/jstests/core/query/cursor/cursor5.js
index aab03473255ee..eab5e5c5cc527 100644
--- a/jstests/core/query/cursor/cursor5.js
+++ b/jstests/core/query/cursor/cursor5.js
@@ -2,7 +2,7 @@
function checkResults(expected, cursor) {
assert.eq(expected.length, cursor.count());
- for (i = 0; i < expected.length; ++i) {
+ for (let i = 0; i < expected.length; ++i) {
assert.eq(expected[i].a.b, cursor[i].a.b);
assert.eq(expected[i].a.c, cursor[i].a.c);
assert.eq(expected[i].a.d, cursor[i].a.d);
@@ -11,10 +11,10 @@ function checkResults(expected, cursor) {
}
function testBoundsWithSubobjectIndexes(db) {
- r = db.ed_db_cursor5_bwsi;
+ let r = db.ed_db_cursor5_bwsi;
r.drop();
- z = [
+ let z = [
{a: {b: 1, c: 2, d: 3}, e: 4},
{a: {b: 1, c: 2, d: 3}, e: 5},
{a: {b: 1, c: 2, d: 4}, e: 4},
@@ -22,11 +22,11 @@ function testBoundsWithSubobjectIndexes(db) {
{a: {b: 2, c: 2, d: 3}, e: 4},
{a: {b: 2, c: 2, d: 3}, e: 5}
];
- for (i = 0; i < z.length; ++i)
+ for (let i = 0; i < z.length; ++i)
r.save(z[i]);
- idx = {"a.d": 1, a: 1, e: -1};
- rIdx = {"a.d": -1, a: -1, e: 1};
- r.createIndex(idx);
+ let idx = {"a.d": 1, a: 1, e: -1};
+ let rIdx = {"a.d": -1, a: -1, e: 1};
+ assert.commandWorked(r.createIndex(idx));
checkResults([z[0], z[4], z[2]], r.find({e: 4}).sort(idx).hint(idx));
checkResults([z[1], z[3]], r.find({e: {$gt: 4}, "a.b": 1}).sort(idx).hint(idx));
diff --git a/jstests/core/query/cursor/cursor6.js b/jstests/core/query/cursor/cursor6.js
index dde1f9069cb3a..ad55dad6d2579 100644
--- a/jstests/core/query/cursor/cursor6.js
+++ b/jstests/core/query/cursor/cursor6.js
@@ -13,7 +13,7 @@ function check(indexed) {
hint = {$natural: 1};
}
- f = r.find().sort({a: 1, b: 1}).hint(hint);
+ let f = r.find().sort({a: 1, b: 1}).hint(hint);
eq(z[0], f[0]);
eq(z[1], f[1]);
eq(z[2], f[2]);
@@ -50,11 +50,11 @@ function check(indexed) {
eq(z[0], f[3]);
}
-r = db.ed_db_cursor6;
+let r = db.ed_db_cursor6;
r.drop();
-z = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 2, b: 1}, {a: 2, b: 2}];
-for (i = 0; i < z.length; ++i)
+let z = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 2, b: 1}, {a: 2, b: 2}];
+for (let i = 0; i < z.length; ++i)
r.save(z[i]);
r.createIndex({a: 1, b: -1});
diff --git a/jstests/core/query/cursor/cursor7.js b/jstests/core/query/cursor/cursor7.js
index 4a21c4202740f..9e75ff605fff8 100644
--- a/jstests/core/query/cursor/cursor7.js
+++ b/jstests/core/query/cursor/cursor7.js
@@ -2,21 +2,21 @@
function checkResults(expected, cursor) {
assert.eq(expected.length, cursor.count());
- for (i = 0; i < expected.length; ++i) {
+ for (let i = 0; i < expected.length; ++i) {
assert.eq(expected[i].a, cursor[i].a);
assert.eq(expected[i].b, cursor[i].b);
}
}
function testMultipleInequalities(db) {
- r = db.ed_db_cursor_mi;
+ let r = db.ed_db_cursor_mi;
r.drop();
- z = [{a: 1, b: 2}, {a: 3, b: 4}, {a: 5, b: 6}, {a: 7, b: 8}];
- for (i = 0; i < z.length; ++i)
+ let z = [{a: 1, b: 2}, {a: 3, b: 4}, {a: 5, b: 6}, {a: 7, b: 8}];
+ for (let i = 0; i < z.length; ++i)
r.save(z[i]);
- idx = {a: 1, b: 1};
- rIdx = {a: -1, b: -1};
+ let idx = {a: 1, b: 1};
+ let rIdx = {a: -1, b: -1};
r.createIndex(idx);
checkResults([z[2], z[3]], r.find({a: {$gt: 3}}).sort(idx).hint(idx));
diff --git a/jstests/core/query/cursor/tailable_cursor_invalidation.js b/jstests/core/query/cursor/tailable_cursor_invalidation.js
index d59e852d63df2..1cd82554ef564 100644
--- a/jstests/core/query/cursor/tailable_cursor_invalidation.js
+++ b/jstests/core/query/cursor/tailable_cursor_invalidation.js
@@ -6,6 +6,7 @@
// # This test has statements that do not support non-local read concern.
// does_not_support_causal_consistency,
// ]
+load("jstests/libs/fixture_helpers.js");
// Tests for the behavior of tailable cursors when a collection is dropped or the cursor is
// otherwise invalidated.
@@ -26,8 +27,7 @@ const emptyBatchCursorId = assert
.commandWorked(db.runCommand(
{find: collName, tailable: true, awaitData: true, batchSize: 0}))
.cursor.id;
-const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid;
-if (isMongos) {
+if (FixtureHelpers.isMongos(db)) {
// Mongos will let you establish a cursor with batch size 0 and return to you before it
// realizes the shard's cursor is exhausted. The next getMore should return a 0 cursor id
// though.
diff --git a/jstests/core/query/date/date1.js b/jstests/core/query/date/date1.js
index 65449c662b578..f5e608bbaeb6a 100644
--- a/jstests/core/query/date/date1.js
+++ b/jstests/core/query/date/date1.js
@@ -1,5 +1,5 @@
-t = db.date1;
+let t = db.date1;
function go(d, msg) {
t.drop();
diff --git a/jstests/core/query/date/date2.js b/jstests/core/query/date/date2.js
index a398058d7716e..b1d8c3d8b2ffb 100644
--- a/jstests/core/query/date/date2.js
+++ b/jstests/core/query/date/date2.js
@@ -1,6 +1,6 @@
// Check that it's possible to compare a Date to a Timestamp, but they are never equal - SERVER-3304
-t = db.jstests_date2;
+let t = db.jstests_date2;
t.drop();
t.createIndex({a: 1});
diff --git a/jstests/core/query/date/date3.js b/jstests/core/query/date/date3.js
index e3eaea620ecf1..06f8af3fce758 100644
--- a/jstests/core/query/date/date3.js
+++ b/jstests/core/query/date/date3.js
@@ -2,12 +2,12 @@
//
// @tags: [requires_fastcount]
-t = db.date3;
+let t = db.date3;
t.drop();
-d1 = new Date(-1000);
-dz = new Date(0);
-d2 = new Date(1000);
+let d1 = new Date(-1000);
+let dz = new Date(0);
+let d2 = new Date(1000);
t.save({x: 3, d: dz});
t.save({x: 2, d: d2});
diff --git a/jstests/core/query/dbref/dbref1.js b/jstests/core/query/dbref/dbref1.js
index b5bb06f230d2d..da88f8d552548 100644
--- a/jstests/core/query/dbref/dbref1.js
+++ b/jstests/core/query/dbref/dbref1.js
@@ -1,6 +1,6 @@
-a = db.dbref1a;
-b = db.dbref1b;
+let a = db.dbref1a;
+let b = db.dbref1b;
a.drop();
b.drop();
diff --git a/jstests/core/query/dbref/dbref2.js b/jstests/core/query/dbref/dbref2.js
index be0deefeb2d9f..4efddccbbac4a 100644
--- a/jstests/core/query/dbref/dbref2.js
+++ b/jstests/core/query/dbref/dbref2.js
@@ -1,6 +1,6 @@
-a = db.dbref2a;
-b = db.dbref2b;
-c = db.dbref2c;
+let a = db.dbref2a;
+let b = db.dbref2b;
+let c = db.dbref2c;
a.drop();
b.drop();
diff --git a/jstests/core/query/dbref/ref.js b/jstests/core/query/dbref/ref.js
index 0d1160482fc50..696b44c7d9174 100644
--- a/jstests/core/query/dbref/ref.js
+++ b/jstests/core/query/dbref/ref.js
@@ -10,7 +10,7 @@ assert.throws(function() {
});
db.things.save({name: "abc"});
-x = db.things.findOne();
+let x = db.things.findOne();
x.o = new DBPointer("otherthings", other._id);
db.things.save(x);
diff --git a/jstests/core/query/dbref/ref2.js b/jstests/core/query/dbref/ref2.js
index 6b284b1f59f2e..a17b084e8b108 100644
--- a/jstests/core/query/dbref/ref2.js
+++ b/jstests/core/query/dbref/ref2.js
@@ -1,16 +1,10 @@
// @tags: [requires_fastcount]
-t = db.ref2;
+let t = db.ref2;
t.drop();
-a = {
- $ref: "foo",
- $id: 1
-};
-b = {
- $ref: "foo",
- $id: 2
-};
+let a = {$ref: "foo", $id: 1};
+let b = {$ref: "foo", $id: 2};
t.save({name: "a", r: a});
t.save({name: "b", r: b});
diff --git a/jstests/core/query/dbref/ref3.js b/jstests/core/query/dbref/ref3.js
index 4406863d89966..f73c7d0fad404 100644
--- a/jstests/core/query/dbref/ref3.js
+++ b/jstests/core/query/dbref/ref3.js
@@ -8,7 +8,7 @@ var other = {s: "other thing", n: 1};
db.otherthings3.save(other);
db.things3.save({name: "abc"});
-x = db.things3.findOne();
+let x = db.things3.findOne();
x.o = new DBRef("otherthings3", other._id);
db.things3.save(x);
diff --git a/jstests/core/query/dbref/ref4.js b/jstests/core/query/dbref/ref4.js
index 882253f38837a..45519b8cac168 100644
--- a/jstests/core/query/dbref/ref4.js
+++ b/jstests/core/query/dbref/ref4.js
@@ -1,6 +1,6 @@
-a = db.ref4a;
-b = db.ref4b;
+let a = db.ref4a;
+let b = db.ref4b;
a.drop();
b.drop();
@@ -11,7 +11,7 @@ b.save(other);
a.save({name: "abc", others: [new DBRef("ref4b", other._id), new DBPointer("ref4b", other._id)]});
assert(a.findOne().others[0].fetch().n == 17, "dbref broken 1");
-x = Array.fetchRefs(a.findOne().others);
+let x = Array.fetchRefs(a.findOne().others);
assert.eq(2, x.length, "A");
assert.eq(17, x[0].n, "B");
assert.eq(17, x[1].n, "C");
diff --git a/jstests/core/query/distinct/distinct2.js b/jstests/core/query/distinct/distinct2.js
index fc6ff7779b75a..9b886e56b433e 100644
--- a/jstests/core/query/distinct/distinct2.js
+++ b/jstests/core/query/distinct/distinct2.js
@@ -1,5 +1,5 @@
-t = db.distinct2;
+let t = db.distinct2;
t.drop();
t.save({a: null});
diff --git a/jstests/core/query/distinct/distinct3.js b/jstests/core/query/distinct/distinct3.js
index c2aaaad79e5a5..eb3a0077d9c38 100644
--- a/jstests/core/query/distinct/distinct3.js
+++ b/jstests/core/query/distinct/distinct3.js
@@ -6,35 +6,35 @@
// Yield and delete test case for query optimizer cursor. SERVER-4401
-t = db.jstests_distinct3;
+let t = db.jstests_distinct3;
t.drop();
t.createIndex({a: 1});
t.createIndex({b: 1});
var bulk = t.initializeUnorderedBulkOp();
-for (i = 0; i < 50; ++i) {
- for (j = 0; j < 2; ++j) {
+for (let i = 0; i < 50; ++i) {
+ for (let j = 0; j < 2; ++j) {
bulk.insert({a: i, c: i, d: j});
}
}
-for (i = 0; i < 100; ++i) {
+for (let i = 0; i < 100; ++i) {
bulk.insert({b: i, c: i + 50});
}
assert.commandWorked(bulk.execute());
// Attempt to remove the last match for the {a:1} index scan while distinct is yielding.
-p = startParallelShell('for( i = 0; i < 100; ++i ) { ' +
- ' var bulk = db.jstests_distinct3.initializeUnorderedBulkOp();' +
- ' bulk.find( { a:49 } ).remove(); ' +
- ' for( j = 0; j < 20; ++j ) { ' +
- ' bulk.insert( { a:49, c:49, d:j } ); ' +
- ' } ' +
- ' assert.commandWorked(bulk.execute()); ' +
- '} ');
+let p = startParallelShell('for( i = 0; i < 100; ++i ) { ' +
+ ' var bulk = db.jstests_distinct3.initializeUnorderedBulkOp();' +
+ ' bulk.find( { a:49 } ).remove(); ' +
+ ' for( j = 0; j < 20; ++j ) { ' +
+ ' bulk.insert( { a:49, c:49, d:j } ); ' +
+ ' } ' +
+ ' assert.commandWorked(bulk.execute()); ' +
+ '} ');
-for (i = 0; i < 100; ++i) {
- count = t.distinct('c', {$or: [{a: {$gte: 0}, d: 0}, {b: {$gte: 0}}]}).length;
+for (let i = 0; i < 100; ++i) {
+ let count = t.distinct('c', {$or: [{a: {$gte: 0}, d: 0}, {b: {$gte: 0}}]}).length;
assert.gt(count, 100);
}
diff --git a/jstests/core/query/distinct/distinct_array1.js b/jstests/core/query/distinct/distinct_array1.js
index cb82c25bbff14..a812c1c23f0ad 100644
--- a/jstests/core/query/distinct/distinct_array1.js
+++ b/jstests/core/query/distinct/distinct_array1.js
@@ -1,4 +1,4 @@
-t = db.distinct_array1;
+let t = db.distinct_array1;
t.drop();
t.save({a: [1, 2, 3]});
@@ -7,7 +7,7 @@ t.save({a: [3, 4, 5]});
t.save({a: 9});
// Without index.
-res = t.distinct("a").sort();
+let res = t.distinct("a").sort();
assert.eq("1,2,3,4,5,9", res.toString());
// Array element 0 without index.
diff --git a/jstests/core/query/distinct/distinct_compound_index.js b/jstests/core/query/distinct/distinct_compound_index.js
index dfca2d712f35b..d7ec41e39c7f7 100644
--- a/jstests/core/query/distinct/distinct_compound_index.js
+++ b/jstests/core/query/distinct/distinct_compound_index.js
@@ -4,11 +4,12 @@
// # Asserts that some queries use a collection scan.
// assumes_no_implicit_index_creation,
// ]
-(function() {
-"use strict";
-
load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-load("jstests/libs/analyze_plan.js"); // For planHasStage.
+import {
+ getWinningPlan,
+ planHasStage,
+ assertStagesForExplainOfCommand
+} from "jstests/libs/analyze_plan.js";
const coll = db.distinct_multikey_index;
@@ -55,10 +56,12 @@ assertStagesForExplainOfCommand({
assert.commandWorked(coll.dropIndexes());
assert.commandWorked(coll.createIndex({a: 1, b: 1, text: "text"}));
-assertStagesForExplainOfCommand({
+// TODO SERVER-76084: build a test similar to this to check that the distinct output contains the
+// prefix according to expectPrefix value/presence ie.
+// if (!expectPrefix) assert.eq(result["queryPlanner"]["namespace"], "test.distinct_multikey_index")
+let result = assertStagesForExplainOfCommand({
coll: coll,
cmdObj: cmdObj,
expectedStages: ["COLLSCAN"],
stagesNotExpected: ["DISTINCT_SCAN"]
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/query/distinct/distinct_hint.js b/jstests/core/query/distinct/distinct_hint.js
new file mode 100644
index 0000000000000..3b8749e60e5fc
--- /dev/null
+++ b/jstests/core/query/distinct/distinct_hint.js
@@ -0,0 +1,70 @@
+/**
+ * This test ensures that hint on the distinct command works.
+ *
+ * @tags: [
+ * assumes_unsharded_collection,
+ * requires_fcv_71,
+ * ]
+ */
+
+import {getPlanStage} from "jstests/libs/analyze_plan.js";
+
+const collName = "jstests_explain_distinct_hint";
+const coll = db[collName];
+
+coll.drop();
+
+// Insert the data to perform distinct() on.
+assert.commandWorked(db.coll.insert({a: 1, b: 2}));
+assert.commandWorked(db.coll.insert({a: 1, b: 2, c: 3}));
+assert.commandWorked(db.coll.insert({a: 2, b: 2, d: 3}));
+assert.commandWorked(db.coll.insert({a: 1, b: 2}));
+assert.commandWorked(db.coll.createIndex({a: 1}));
+assert.commandWorked(db.coll.createIndex({b: 1}));
+assert.commandWorked(db.coll.createIndex({x: 1}, {sparse: true}));
+
+// Use .explain() to make sure the index we specify is being used when we use a hint.
+let explain = db.coll.explain().distinct("a", {a: 1, b: 2});
+assert.eq(getPlanStage(explain, "IXSCAN").indexName, "a_1");
+
+explain = db.coll.explain().distinct("a", {a: 1, b: 2}, {hint: {b: 1}});
+let ixScanStage = getPlanStage(explain, "IXSCAN");
+assert(ixScanStage, tojson(explain));
+assert.eq(ixScanStage.indexName, "b_1", tojson(ixScanStage));
+assert.eq(explain.command.hint, {"b": 1});
+
+explain = db.coll.explain().distinct("a", {a: 1, b: 2}, {hint: "b_1"});
+ixScanStage = getPlanStage(explain, "IXSCAN");
+assert(ixScanStage, tojson(explain));
+assert.eq(ixScanStage.indexName, "b_1");
+assert.eq(explain.command.hint, "b_1");
+
+// Make sure the hint produces the right values when the query is run.
+let cmdObj = db.coll.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: {a: 1}});
+assert.eq(1, cmdObj.values);
+
+cmdObj = db.coll.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: "a_1"});
+assert.eq(1, cmdObj.values);
+
+cmdObj = db.coll.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: {b: 1}});
+assert.eq(1, cmdObj.values);
+
+cmdObj = db.coll.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: {x: 1}});
+assert.eq([], cmdObj.values);
+
+cmdObj = db.coll.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: "x_1"});
+assert.eq([], cmdObj.values);
+
+assert.throws(function() {
+ db.coll.explain().distinct("a", {a: 1, b: 2}, {hint: {bad: 1, hint: 1}});
+});
+
+assert.throws(function() {
+ db.coll.explain().distinct("a", {a: 1, b: 2}, {hint: "BAD HINT"});
+});
+
+let cmdRes =
+ db.coll.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: {bad: 1, hint: 1}});
+assert.commandFailedWithCode(cmdRes, ErrorCodes.BadValue, cmdRes);
+var regex = new RegExp("hint provided does not correspond to an existing index");
+assert(regex.test(cmdRes.errmsg));
diff --git a/jstests/core/query/distinct/distinct_index1.js b/jstests/core/query/distinct/distinct_index1.js
index 75a7f8adc223e..44c99d2087b9c 100644
--- a/jstests/core/query/distinct/distinct_index1.js
+++ b/jstests/core/query/distinct/distinct_index1.js
@@ -5,8 +5,7 @@
* assumes_read_concern_local,
* ]
*/
-(function() {
-load("jstests/libs/analyze_plan.js"); // For getPlanStage.
+import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js";
const coll = db.distinct_index1;
coll.drop();
@@ -80,5 +79,4 @@ assert.commandWorked(coll.createIndex({a: "hashed"}));
explain = getDistinctExplainWithExecutionStats("a", {$or: [{a: 3}, {a: 5}]});
assert.eq(188, explain.executionStats.nReturned);
const indexScanStage = getPlanStage(getWinningPlan(explain.queryPlanner), "IXSCAN");
-assert.eq("hashed", indexScanStage.keyPattern.a);
-})();
+assert.eq("hashed", indexScanStage.keyPattern.a);
\ No newline at end of file
diff --git a/jstests/core/query/distinct/distinct_multikey.js b/jstests/core/query/distinct/distinct_multikey.js
index 21d060bddf2ca..ca98b6e1d32c5 100644
--- a/jstests/core/query/distinct/distinct_multikey.js
+++ b/jstests/core/query/distinct/distinct_multikey.js
@@ -4,10 +4,7 @@
* assumes_read_concern_local,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js";
let coll = db.jstest_distinct_multikey;
coll.drop();
@@ -110,5 +107,4 @@ assert.eq([1, 7, 8], result.sort());
explain = coll.explain("queryPlanner").distinct("b.c", {a: 3});
winningPlan = getWinningPlan(explain.queryPlanner);
assert(planHasStage(db, winningPlan, "PROJECTION_DEFAULT"));
-assert(planHasStage(db, winningPlan, "DISTINCT_SCAN"));
-}());
+assert(planHasStage(db, winningPlan, "DISTINCT_SCAN"));
\ No newline at end of file
diff --git a/jstests/core/query/distinct/distinct_multikey_dotted_path.js b/jstests/core/query/distinct/distinct_multikey_dotted_path.js
index c8530fe679997..3bbbfd92a8b5e 100644
--- a/jstests/core/query/distinct/distinct_multikey_dotted_path.js
+++ b/jstests/core/query/distinct/distinct_multikey_dotted_path.js
@@ -11,9 +11,7 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-load("jstests/libs/analyze_plan.js"); // For planHasStage().
+import {getAggPlanStages, getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js";
const coll = db.distinct_multikey;
coll.drop();
@@ -205,5 +203,4 @@ assert.commandWorked(coll.insert({a: {b: {c: []}}}));
// Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will
// only treat '0' as a field name (not array index).
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/core/query/distinct/distinct_with_hashed_index.js b/jstests/core/query/distinct/distinct_with_hashed_index.js
index 8476c5c5f14a3..203867652aa66 100644
--- a/jstests/core/query/distinct/distinct_with_hashed_index.js
+++ b/jstests/core/query/distinct/distinct_with_hashed_index.js
@@ -8,9 +8,14 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-load("jstests/libs/analyze_plan.js"); // For planHasStage().
+import {
+ getAggPlanStage,
+ getWinningPlan,
+ isCollscan,
+ isIndexOnly,
+ isIxscan,
+ planHasStage,
+} from "jstests/libs/analyze_plan.js";
const coll = db.distinct_with_hashed_index;
coll.drop();
@@ -150,5 +155,4 @@ pipeline = [{$group: {_id: "$b"}}];
assert.eq(26, coll.aggregate(pipeline).itcount());
explainPlan = coll.explain().aggregate(pipeline);
assert.eq(null, getAggPlanStage(explainPlan, "DISTINCT_SCAN"), explainPlan);
-assert.neq(null, getAggPlanStage(explainPlan, "COLLSCAN"), explainPlan);
-})();
+assert.neq(null, getAggPlanStage(explainPlan, "COLLSCAN"), explainPlan);
\ No newline at end of file
diff --git a/jstests/core/query/elemmatch/elemmatch_or_pushdown_paths.js b/jstests/core/query/elemmatch/elemmatch_or_pushdown_paths.js
new file mode 100644
index 0000000000000..9d5b73b5b3999
--- /dev/null
+++ b/jstests/core/query/elemmatch/elemmatch_or_pushdown_paths.js
@@ -0,0 +1,69 @@
+/**
+ * Test OR-pushdown fixes for elemMatch based on SERVER-74954.
+ */
+load("jstests/aggregation/extras/utils.js"); // for "arrayEq".
+
+const coll = db.jstests_elemmatch_or_pushdown_paths;
+
+coll.drop();
+
+assert.commandWorked(coll.insert([
+ {a: 1, b: [{c: 1}]},
+ {a: 2, b: [{c: 1}]},
+ {a: 3, b: [{c: 1}]},
+ {a: 4, b: [{c: 1}]},
+]));
+assert.commandWorked(coll.createIndex({"b.c": 1, a: 1}));
+
+// Test exact bounds.
+assert(arrayEq(coll.find({
+ $and: [
+ {$or: [{a: {$lt: 2}}, {a: {$gt: 3}}]},
+ {b: {$elemMatch: {c: {$eq: 1, $exists: true}}}}
+ ]
+ },
+ {_id: 0})
+ .hint({"b.c": 1, a: 1})
+ .toArray(),
+ [
+ {a: 1, b: [{c: 1}]},
+ {a: 4, b: [{c: 1}]},
+ ]));
+
+// Similar test, but use $mod instead of $exists.
+const results = coll.find({
+ $and: [
+ {$or: [{a: {$lt: 2}}, {a: {$gt: 3}}]},
+ {b: {$elemMatch: {c: {$eq: 1, $mod: [2, 1]}}}}
+ ]
+ },
+ {_id: 0})
+ .toArray();
+
+assert(arrayEq(results,
+ [
+ {a: 1, b: [{c: 1}]},
+ {a: 4, b: [{c: 1}]},
+ ]),
+ results);
+
+assert(coll.drop());
+assert.commandWorked(coll.insert([
+ {a: 5, b: [{c: 5, d: 6, e: 7}]},
+ {a: 5, b: [{c: 5, d: 6, e: 8}]},
+ {a: 5, b: [{c: 5, d: 5, e: 7}]},
+ {a: 4, b: [{c: 5, d: 6, e: 7}]},
+]));
+assert.commandWorked(coll.createIndex({"b.d": 1, "b.c": 1}));
+assert.commandWorked(coll.createIndex({"b.e": 1, "b.c": 1}));
+
+// Test OR within elemmatch.
+assert(arrayEq(
+ coll.find({$and: [{a: 5}, {b: {$elemMatch: {$and: [{c: 5}, {$or: [{d: 6}, {e: 7}]}]}}}]},
+ {_id: 0})
+ .toArray(),
+ [
+ {a: 5, b: [{c: 5, d: 6, e: 7}]},
+ {a: 5, b: [{c: 5, d: 6, e: 8}]},
+ {a: 5, b: [{c: 5, d: 5, e: 7}]},
+ ]));
\ No newline at end of file
diff --git a/jstests/core/query/exists/exists.js b/jstests/core/query/exists/exists.js
index b85d80c36a7b6..f29320b002eb4 100644
--- a/jstests/core/query/exists/exists.js
+++ b/jstests/core/query/exists/exists.js
@@ -2,7 +2,7 @@
// requires_fastcount,
// ]
-t = db.jstests_exists;
+let t = db.jstests_exists;
t.drop();
t.save({});
diff --git a/jstests/core/query/exists/exists2.js b/jstests/core/query/exists/exists2.js
index 0764d859c3b51..6d175584eec5a 100644
--- a/jstests/core/query/exists/exists2.js
+++ b/jstests/core/query/exists/exists2.js
@@ -1,4 +1,4 @@
-t = db.exists2;
+let t = db.exists2;
t.drop();
t.save({a: 1, b: 1});
diff --git a/jstests/core/query/exists/exists3.js b/jstests/core/query/exists/exists3.js
index 510d63c37526c..1b5939f07ceb7 100644
--- a/jstests/core/query/exists/exists3.js
+++ b/jstests/core/query/exists/exists3.js
@@ -1,6 +1,6 @@
// Check exists with non empty document, based on SERVER-2470 example.
-t = db.jstests_exists3;
+let t = db.jstests_exists3;
t.drop();
t.insert({a: 1, b: 2});
diff --git a/jstests/core/query/exists/exists4.js b/jstests/core/query/exists/exists4.js
index a533ca53e9ca6..4960fe32ff75c 100644
--- a/jstests/core/query/exists/exists4.js
+++ b/jstests/core/query/exists/exists4.js
@@ -1,6 +1,6 @@
// Check various exists cases, based on SERVER-1735 example.
-t = db.jstests_exists4;
+let t = db.jstests_exists4;
t.drop();
t.createIndex({date: -1, country_code: 1, user_id: 1}, {unique: 1, background: 1});
diff --git a/jstests/core/query/exists/exists5.js b/jstests/core/query/exists/exists5.js
index 2f4b1a9b8de41..112e8243b99ea 100644
--- a/jstests/core/query/exists/exists5.js
+++ b/jstests/core/query/exists/exists5.js
@@ -1,6 +1,6 @@
// Test some $not/$exists cases.
-t = db.jstests_exists5;
+let t = db.jstests_exists5;
t.drop();
t.save({a: 1});
diff --git a/jstests/core/query/exists/exists6.js b/jstests/core/query/exists/exists6.js
index 736574db8953d..71fd20d3c2669 100644
--- a/jstests/core/query/exists/exists6.js
+++ b/jstests/core/query/exists/exists6.js
@@ -1,6 +1,6 @@
// SERVER-393 Test indexed matching with $exists.
-t = db.jstests_exists6;
+let t = db.jstests_exists6;
t.drop();
t.createIndex({b: 1});
diff --git a/jstests/core/query/exists/exists7.js b/jstests/core/query/exists/exists7.js
index 285559e82f8f7..1f25db5fedf69 100644
--- a/jstests/core/query/exists/exists7.js
+++ b/jstests/core/query/exists/exists7.js
@@ -2,7 +2,7 @@
// Test that non boolean value types are allowed with $exists spec. SERVER-2322
-t = db.jstests_exists7;
+let t = db.jstests_exists7;
t.drop();
function testIntegerExistsSpec() {
diff --git a/jstests/core/query/exists/exists8.js b/jstests/core/query/exists/exists8.js
index f22a3be44f49e..3c506cd578108 100644
--- a/jstests/core/query/exists/exists8.js
+++ b/jstests/core/query/exists/exists8.js
@@ -2,7 +2,7 @@
// Test $exists with array element field names SERVER-2897
-t = db.jstests_exists8;
+let t = db.jstests_exists8;
t.drop();
t.save({a: [1]});
diff --git a/jstests/core/query/exists/exists9.js b/jstests/core/query/exists/exists9.js
index c187bb3a10193..d822ae95f0af5 100644
--- a/jstests/core/query/exists/exists9.js
+++ b/jstests/core/query/exists/exists9.js
@@ -1,6 +1,6 @@
// SERVER-393 Test exists with various empty array and empty object cases.
-t = db.jstests_exists9;
+let t = db.jstests_exists9;
t.drop();
// Check existence of missing nested field.
diff --git a/jstests/core/query/exists/existsb.js b/jstests/core/query/exists/existsb.js
index 64ee3cf9a889a..b327311a4efda 100644
--- a/jstests/core/query/exists/existsb.js
+++ b/jstests/core/query/exists/existsb.js
@@ -20,7 +20,7 @@
// everything but {} will have an index entry.
// Let's make sure we handle this properly!
-t = db.jstests_existsb;
+let t = db.jstests_existsb;
t.drop();
t.save({});
diff --git a/jstests/core/query/explain/explain1.js b/jstests/core/query/explain/explain1.js
index 2de3e2f89d208..cdbd8751ddae5 100644
--- a/jstests/core/query/explain/explain1.js
+++ b/jstests/core/query/explain/explain1.js
@@ -2,16 +2,14 @@
// assumes_read_concern_local,
// ]
-t = db.explain1;
+let t = db.explain1;
t.drop();
for (var i = 0; i < 100; i++) {
t.save({x: i});
}
-q = {
- x: {$gt: 50}
-};
+let q = {x: {$gt: 50}};
assert.eq(49, t.find(q).count(), "A");
assert.eq(49, t.find(q).itcount(), "B");
diff --git a/jstests/core/query/explain/explain4.js b/jstests/core/query/explain/explain4.js
index e49b188cb65f8..f3e2b460c5e1c 100644
--- a/jstests/core/query/explain/explain4.js
+++ b/jstests/core/query/explain/explain4.js
@@ -3,16 +3,16 @@
// assumes_read_concern_local,
// ]
-t = db.jstests_explain4;
+let t = db.jstests_explain4;
t.drop();
t.createIndex({a: 1});
-for (i = 0; i < 10; ++i) {
+for (let i = 0; i < 10; ++i) {
t.save({a: i, b: 0});
}
-explain = t.find({a: {$gte: 0}, b: 0}).sort({a: 1}).hint({a: 1}).limit(5).explain(true);
+let explain = t.find({a: {$gte: 0}, b: 0}).sort({a: 1}).hint({a: 1}).limit(5).explain(true);
// Five results are expected, matching the limit spec.
assert.eq(5, explain.executionStats.nReturned);
diff --git a/jstests/core/query/explain/explain6.js b/jstests/core/query/explain/explain6.js
index fbb6ecf6d7655..73bfced511673 100644
--- a/jstests/core/query/explain/explain6.js
+++ b/jstests/core/query/explain/explain6.js
@@ -6,7 +6,7 @@
// Basic test which checks the number of documents returned, keys examined, and documents
// examined as reported by explain.
-t = db.jstests_explain6;
+let t = db.jstests_explain6;
t.drop();
t.createIndex({a: 1, b: 1});
@@ -15,7 +15,7 @@ t.createIndex({b: 1, a: 1});
t.save({a: 0, b: 1});
t.save({a: 1, b: 0});
-explain = t.find({a: {$gte: 0}, b: {$gte: 0}}).explain(true);
+let explain = t.find({a: {$gte: 0}, b: {$gte: 0}}).explain(true);
assert.eq(2, explain.executionStats.nReturned);
assert.eq(2, explain.executionStats.totalKeysExamined);
diff --git a/jstests/core/query/explain/explain_agg_write_concern.js b/jstests/core/query/explain/explain_agg_write_concern.js
index 9ff556489fa7f..ec246140abef3 100644
--- a/jstests/core/query/explain/explain_agg_write_concern.js
+++ b/jstests/core/query/explain/explain_agg_write_concern.js
@@ -5,6 +5,7 @@
// assumes_unsharded_collection,
// assumes_write_concern_unchanged,
// does_not_support_stepdowns,
+// references_foreign_collection,
// requires_non_retryable_commands,
// ]
diff --git a/jstests/core/query/explain/explain_batch_size.js b/jstests/core/query/explain/explain_batch_size.js
index d31b54b3f16ff..aec1124f171ba 100644
--- a/jstests/core/query/explain/explain_batch_size.js
+++ b/jstests/core/query/explain/explain_batch_size.js
@@ -8,11 +8,11 @@
// requires_fastcount,
// ]
-t = db.explain_batch_size;
+let t = db.explain_batch_size;
t.drop();
var n = 3;
-for (i = 0; i < n; i++) {
+for (let i = 0; i < n; i++) {
t.save({x: i});
}
diff --git a/jstests/core/query/explain/explain_count.js b/jstests/core/query/explain/explain_count.js
index 8dc0854ab154c..92c960bd9acb5 100644
--- a/jstests/core/query/explain/explain_count.js
+++ b/jstests/core/query/explain/explain_count.js
@@ -2,7 +2,7 @@
//
// @tags: [requires_fastcount]
-load("jstests/libs/analyze_plan.js"); // For assertExplainCount.
+import {assertExplainCount, getPlanStage} from "jstests/libs/analyze_plan.js";
load("jstests/libs/fixture_helpers.js"); // For isMongos and isSharded.
var collName = "jstests_explain_count";
diff --git a/jstests/core/query/explain/explain_distinct.js b/jstests/core/query/explain/explain_distinct.js
index 19059b8b1bd90..eae3bdd34fe16 100644
--- a/jstests/core/query/explain/explain_distinct.js
+++ b/jstests/core/query/explain/explain_distinct.js
@@ -7,10 +7,7 @@
/**
* This test ensures that explain on the distinct command works.
*/
-(function() {
-'use strict';
-
-load("jstests/libs/analyze_plan.js");
+import {getPlanStage, getWinningPlan, isCollscan, planHasStage} from "jstests/libs/analyze_plan.js";
const collName = "jstests_explain_distinct";
const coll = db[collName];
@@ -99,5 +96,4 @@ winningPlan = getWinningPlan(explain.queryPlanner);
assert.eq(1, explain.executionStats.nReturned);
assert(!planHasStage(db, winningPlan, "FETCH"));
assert(planHasStage(db, winningPlan, "PROJECTION_COVERED"));
-assert(planHasStage(db, winningPlan, "DISTINCT_SCAN"));
-})();
+assert(planHasStage(db, winningPlan, "DISTINCT_SCAN"));
\ No newline at end of file
diff --git a/jstests/core/query/explain/explain_multi_plan_count.js b/jstests/core/query/explain/explain_multi_plan_count.js
index 2ac52019cf36f..066a3b835f3c0 100644
--- a/jstests/core/query/explain/explain_multi_plan_count.js
+++ b/jstests/core/query/explain/explain_multi_plan_count.js
@@ -8,10 +8,14 @@
// assumes_unsharded_collection,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {
+ assertExplainCount,
+ getRejectedPlan,
+ getRejectedPlans,
+ getWinningPlan,
+ isIndexOnly,
+ isIxscan,
+} from "jstests/libs/analyze_plan.js";
const coll = db.explain_multi_plan_count;
coll.drop();
@@ -38,5 +42,4 @@ for (let curRejectedPlan of rejectedPlans) {
isIxscan(db, rejectedPlan);
}
-assert(coll.drop());
-}());
+assert(coll.drop());
\ No newline at end of file
diff --git a/jstests/core/query/explain/explain_multikey.js b/jstests/core/query/explain/explain_multikey.js
index 9bea359edb44a..7b34492178081 100644
--- a/jstests/core/query/explain/explain_multikey.js
+++ b/jstests/core/query/explain/explain_multikey.js
@@ -5,10 +5,7 @@
// @tags: [
// assumes_unsharded_collection,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getPlanStage, getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js";
const coll = db.explain_multikey;
const keyPattern = {
@@ -81,5 +78,4 @@ verifyMultikeyInfoInExplainOutput({
verifyMultikeyInfoInExplainOutput({
commandObj: {distinct: coll.getName(), key: "a"},
stage: "DISTINCT_SCAN",
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/query/explain/explain_plan_scores.js b/jstests/core/query/explain/explain_plan_scores.js
index 80788bd455856..ae00c00f0bce2 100644
--- a/jstests/core/query/explain/explain_plan_scores.js
+++ b/jstests/core/query/explain/explain_plan_scores.js
@@ -9,10 +9,7 @@
// assumes_against_mongod_not_mongos,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getRejectedPlan, getRejectedPlans, getWinningPlan} from "jstests/libs/analyze_plan.js";
const coll = db.explain_plan_scores;
coll.drop();
@@ -51,5 +48,4 @@ assert.commandWorked(coll.createIndex({a: 1, b: 1}));
const explain = coll.find({a: {$gte: 0}}).explain(verbosity);
assert.commandWorked(explain);
checkExplainOutput(explain, verbosity);
-});
-}());
+});
\ No newline at end of file
diff --git a/jstests/core/query/explain/explain_sample.js b/jstests/core/query/explain/explain_sample.js
index efb6d2b6f8ac6..2b001ec838595 100644
--- a/jstests/core/query/explain/explain_sample.js
+++ b/jstests/core/query/explain/explain_sample.js
@@ -4,10 +4,7 @@
* assumes_read_concern_local,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
const coll = db.explain_sample;
coll.drop();
@@ -32,5 +29,4 @@ assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.advanced, 0),
tojson(multiIteratorStages));
assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.works, 0),
0,
- tojson(multiIteratorStages));
-}());
+ tojson(multiIteratorStages));
\ No newline at end of file
diff --git a/jstests/core/query/explain/explain_shell_helpers.js b/jstests/core/query/explain/explain_shell_helpers.js
index 67b13b0b858d9..1223bffd22191 100644
--- a/jstests/core/query/explain/explain_shell_helpers.js
+++ b/jstests/core/query/explain/explain_shell_helpers.js
@@ -12,13 +12,12 @@
*/
// Tests for the .explain() shell helper, which provides syntactic sugar for the explain command.
+// Include helpers for analyzing explain output.
+import {getPlanStage, getWinningPlan, isIxscan, planHasStage} from "jstests/libs/analyze_plan.js";
var t = db.jstests_explain_helpers;
t.drop();
-// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
-
var explain;
var stage;
diff --git a/jstests/core/query/explain/explain_sort_type.js b/jstests/core/query/explain/explain_sort_type.js
index df4d91c0bbf9c..df1ba7b1248e1 100644
--- a/jstests/core/query/explain/explain_sort_type.js
+++ b/jstests/core/query/explain/explain_sort_type.js
@@ -10,10 +10,7 @@
* requires_non_retryable_writes,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js";
const coll = db.explain_sort_type;
coll.drop();
@@ -99,5 +96,4 @@ explain =
winningPlan = getWinningPlan(explain.queryPlanner);
sortStage = getPlanStage(winningPlan, "SORT");
assert.neq(null, sortStage, explain);
-assert.eq("default", sortStage.type, explain);
-}());
+assert.eq("default", sortStage.type, explain);
\ No newline at end of file
diff --git a/jstests/core/query/explain/explain_writecmd_nonexistent_collection.js b/jstests/core/query/explain/explain_writecmd_nonexistent_collection.js
index 2496f4b63a092..79431aa20727c 100644
--- a/jstests/core/query/explain/explain_writecmd_nonexistent_collection.js
+++ b/jstests/core/query/explain/explain_writecmd_nonexistent_collection.js
@@ -2,10 +2,7 @@
//
// @tags: [requires_non_retryable_writes, requires_fastcount,
// assumes_no_implicit_collection_creation_after_drop]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {planHasStage} from "jstests/libs/analyze_plan.js";
function assertCollectionDoesNotExist(collName) {
const collectionList = db.getCollectionInfos({name: collName});
@@ -36,5 +33,4 @@ explain = assert.commandWorked(db.runCommand(
{explain: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 1}}, upsert: true}]}}));
assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain);
assert(!planHasStage(db, explain.queryPlanner.winningPlan, "UPDATE"), explain);
-assertCollectionDoesNotExist(collName);
-}());
+assertCollectionDoesNotExist(collName);
\ No newline at end of file
diff --git a/jstests/core/query/explode_for_sort_equality_to_array.js b/jstests/core/query/explode_for_sort_equality_to_array.js
index 9f976f89ef88f..94d5644b37107 100644
--- a/jstests/core/query/explode_for_sort_equality_to_array.js
+++ b/jstests/core/query/explode_for_sort_equality_to_array.js
@@ -1,31 +1,27 @@
-/**
- * Test that a query eligible for the "explode for sort" optimization works correctly when the query
- * involves an equality-to-array predicate. Specifically, we use an `$all` where the constants
- * inside the `$all` list are singleton arrays rather than scalars.
- *
- * This test was originally designed to reproduce SERVER-75304.
- *
- * @tags: [
- * # explain does not support majority read concern
- * assumes_read_concern_local,
- * ]
- */
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-
-const testDB = db.getSiblingDB(jsTestName());
-assert.commandWorked(testDB.dropDatabase());
-const coll = testDB.explode_for_sort_equality_to_array;
-
-assert.commandWorked(coll.createIndex({array: -1, num: 1}));
-assert.commandWorked(coll.insert({array: [[1], [2]]}));
-assert.commandWorked(coll.insert({array: [[1]]}));
-assert.commandWorked(coll.insert({array: [[2]]}));
-const explain = assert.commandWorked(
- coll.find({array: {$all: [[1], [2]]}}).sort({num: 1}).explain('executionStats'));
-assert.gt(
- getPlanStages(getWinningPlan(explain.queryPlanner), "SORT_MERGE").length, 0, tojson(explain));
-assert.eq(1, explain.executionStats.nReturned, tojson(explain));
-}());
+/**
+ * Test that a query eligible for the "explode for sort" optimization works correctly when the query
+ * involves an equality-to-array predicate. Specifically, we use an `$all` where the constants
+ * inside the `$all` list are singleton arrays rather than scalars.
+ *
+ * This test was originally designed to reproduce SERVER-75304.
+ *
+ * @tags: [
+ * # explain does not support majority read concern
+ * assumes_read_concern_local,
+ * ]
+ */
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
+
+const testDB = db.getSiblingDB(jsTestName());
+assert.commandWorked(testDB.dropDatabase());
+const coll = testDB.explode_for_sort_equality_to_array;
+
+assert.commandWorked(coll.createIndex({array: -1, num: 1}));
+assert.commandWorked(coll.insert({array: [[1], [2]]}));
+assert.commandWorked(coll.insert({array: [[1]]}));
+assert.commandWorked(coll.insert({array: [[2]]}));
+const explain = assert.commandWorked(
+ coll.find({array: {$all: [[1], [2]]}}).sort({num: 1}).explain('executionStats'));
+assert.gt(
+ getPlanStages(getWinningPlan(explain.queryPlanner), "SORT_MERGE").length, 0, tojson(explain));
+assert.eq(1, explain.executionStats.nReturned, tojson(explain));
\ No newline at end of file
diff --git a/jstests/core/query/explode_for_sort_plan_cache.js b/jstests/core/query/explode_for_sort_plan_cache.js
index 53d343b35d8ff..b630610a93066 100644
--- a/jstests/core/query/explode_for_sort_plan_cache.js
+++ b/jstests/core/query/explode_for_sort_plan_cache.js
@@ -22,11 +22,12 @@
* does_not_support_repeated_reads,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js");
+import {
+ getPlanCacheKeyFromShape,
+ getPlanStages,
+ getWinningPlan
+} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const isSBEEnabled = checkSBEEnabled(db);
const coll = db.explode_for_sort_plan_cache;
@@ -216,4 +217,3 @@ assertExplodeForSortCacheParameterizedCorrectly({
newQueryCount: 0,
reuseEntry: false,
});
-}());
diff --git a/jstests/core/query/expr/expr.js b/jstests/core/query/expr/expr.js
index 737cab39f156b..81ebf13a8d3a6 100644
--- a/jstests/core/query/expr/expr.js
+++ b/jstests/core/query/expr/expr.js
@@ -15,13 +15,10 @@
"use strict";
load("jstests/libs/sbe_assert_error_override.js"); // For 'assert.errorCodeEq'.
+load("jstests/libs/fixture_helpers.js");
const coll = db.expr;
-const hello = db.runCommand("hello");
-assert.commandWorked(hello);
-const isMongos = (hello.msg === "isdbgrid");
-
//
// $expr in aggregate.
//
@@ -126,7 +123,7 @@ assert.throws(function() {
// 'executionSuccess' field.
let explain = coll.find({$expr: {$divide: [1, "$a"]}}).explain("executionStats");
// Accommodate format differences between explain via mongos and explain directly on a mongod.
-if (!isMongos) {
+if (!FixtureHelpers.isMongos(db)) {
assert(explain.hasOwnProperty("executionStats"), explain);
assert.eq(explain.executionStats.executionSuccess, false, explain);
assert.errorCodeEq(explain.executionStats.errorCode, [16609, ErrorCodes.TypeMismatch], explain);
diff --git a/jstests/core/query/expr/expr_index_use.js b/jstests/core/query/expr/expr_index_use.js
index f7a04aed48237..2a2f096b0d4f6 100644
--- a/jstests/core/query/expr/expr_index_use.js
+++ b/jstests/core/query/expr/expr_index_use.js
@@ -4,11 +4,8 @@
// requires_fcv_63,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getAggPlanStage, getPlanStage, hasRejectedPlans} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const coll = db.expr_index_use;
coll.drop();
@@ -330,4 +327,3 @@ confirmExpectedExprExecution({$lt: ["$w", {z: undefined, u: ["array"]}]},
{nReturned: 6, expectedIndex: {w: 1}});
confirmExpectedExprExecution({$lte: ["$w", {z: undefined, u: ["array"]}]},
{nReturned: 7, expectedIndex: {w: 1}});
-})();
diff --git a/jstests/core/query/expr/expr_valid_positions.js b/jstests/core/query/expr/expr_valid_positions.js
index cd3ae2bf91753..83529eb55726a 100644
--- a/jstests/core/query/expr/expr_valid_positions.js
+++ b/jstests/core/query/expr/expr_valid_positions.js
@@ -20,4 +20,4 @@ assert.throws(function() {
assert.throws(function() {
coll.find({a: {$_internalSchemaObjectMatch: {$expr: {$eq: ["$foo", "$bar"]}}}}).itcount();
});
-}());
\ No newline at end of file
+}());
diff --git a/jstests/core/query/find/find2.js b/jstests/core/query/find/find2.js
index a793c60b804f1..747775bd4b0cd 100644
--- a/jstests/core/query/find/find2.js
+++ b/jstests/core/query/find/find2.js
@@ -3,13 +3,13 @@
// @tags: [requires_fastcount]
function testObjectIdFind(db) {
- r = db.ed_db_find2_oif;
+ let r = db.ed_db_find2_oif;
r.drop();
- for (i = 0; i < 3; ++i)
+ for (let i = 0; i < 3; ++i)
r.save({});
- f = r.find().sort({_id: 1});
+ let f = r.find().sort({_id: 1});
assert.eq(3, f.count());
assert(f[0]._id < f[1]._id);
assert(f[1]._id < f[2]._id);
diff --git a/jstests/core/query/find/find6.js b/jstests/core/query/find/find6.js
index d76cc1d5fb515..4b90cfe8cf753 100644
--- a/jstests/core/query/find/find6.js
+++ b/jstests/core/query/find/find6.js
@@ -4,7 +4,7 @@
// requires_scripting,
// ]
-t = db.find6;
+let t = db.find6;
t.drop();
t.save({a: 1});
@@ -16,7 +16,7 @@ assert.eq(1, t.find("function() { return this.b == null; }").itcount(), "C");
assert.eq(1, t.find("function() { return this.b == null; }").count(), "D");
/* test some stuff with dot array notation */
-q = db.find6a;
+let q = db.find6a;
q.drop();
q.insert({"a": [{"0": 1}]});
q.insert({"a": [{"0": 2}]});
diff --git a/jstests/core/query/find/find7.js b/jstests/core/query/find/find7.js
index ed18dcbb0ff0a..56fbd859299ba 100644
--- a/jstests/core/query/find/find7.js
+++ b/jstests/core/query/find/find7.js
@@ -1,10 +1,7 @@
-t = db.find7;
+let t = db.find7;
t.drop();
-x = {
- "_id": {"d": 3649, "w": "signed"},
- "u": {"3649": 5}
-};
+let x = {"_id": {"d": 3649, "w": "signed"}, "u": {"3649": 5}};
t.insert(x);
assert.eq(x, t.findOne(), "A1");
assert.eq(x, t.findOne({_id: x._id}), "A2");
diff --git a/jstests/core/query/find/find8.js b/jstests/core/query/find/find8.js
index 14930a056e72c..a64f89496f5d3 100644
--- a/jstests/core/query/find/find8.js
+++ b/jstests/core/query/find/find8.js
@@ -1,6 +1,6 @@
// SERVER-1932 Test unindexed matching of a range that is only valid in a multikey context.
-t = db.jstests_find8;
+let t = db.jstests_find8;
t.drop();
t.save({a: [1, 10]});
diff --git a/jstests/core/query/find/find9.js b/jstests/core/query/find/find9.js
index be6bfdb2ccf77..e4858e094905d 100644
--- a/jstests/core/query/find/find9.js
+++ b/jstests/core/query/find/find9.js
@@ -2,11 +2,11 @@
// Test that the MaxBytesToReturnToClientAtOnce limit is enforced.
-t = db.jstests_find9;
+let t = db.jstests_find9;
t.drop();
-big = new Array(500000).toString();
-for (i = 0; i < 60; ++i) {
+let big = new Array(500000).toString();
+for (let i = 0; i < 60; ++i) {
t.save({a: i, b: big});
}
@@ -18,12 +18,12 @@ assert.gt(60, t.find().objsLeftInBatch());
assert.eq(60, t.find({}, {a: 1}).batchSize(80).objsLeftInBatch());
assert.gt(60, t.find().batchSize(80).objsLeftInBatch());
-for (i = 0; i < 60; ++i) {
+for (let i = 0; i < 60; ++i) {
t.save({a: i, b: big});
}
// Check size limit with get more.
-c = t.find().batchSize(80);
+let c = t.find().batchSize(80);
while (c.hasNext()) {
assert.gt(60, c.objsLeftInBatch());
c.next();
diff --git a/jstests/core/query/find/find_project_sort.js b/jstests/core/query/find/find_project_sort.js
index 3d359fd9e41cb..16fb1193da38d 100644
--- a/jstests/core/query/find/find_project_sort.js
+++ b/jstests/core/query/find/find_project_sort.js
@@ -33,9 +33,6 @@ const documents = [
];
assert.commandWorked(coll.insert(documents));
-assert.commandWorked(coll.createIndex({a: 1}));
-assert.commandWorked(coll.createIndex({z: 1}));
-
function checkQuery(
{expected = [], query = {}, proj = {}, sort = null, limit = null, skip = null, desc = null},
hint) {
@@ -803,6 +800,10 @@ runIDHackTest();
runCollScanTests();
runFindTestsWithHint({$natural: 1});
+
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({z: 1}));
+
runFindTestsWithHint({a: 1});
runFindTestsWithHint({z: 1}); // Multi-key
}());
diff --git a/jstests/core/query/find/finda.js b/jstests/core/query/find/finda.js
index 2e1a93cbad8de..2a3799f72bf2d 100644
--- a/jstests/core/query/find/finda.js
+++ b/jstests/core/query/find/finda.js
@@ -4,10 +4,10 @@
// Tests where the QueryOptimizerCursor enters takeover mode during a query rather than a get more.
-t = db.jstests_finda;
+let t = db.jstests_finda;
t.drop();
-numDocs = 200;
+let numDocs = 200;
function clearQueryPlanCache() {
t.createIndex({c: 1});
@@ -16,12 +16,12 @@ function clearQueryPlanCache() {
function assertAllFound(matches) {
// printjson( matches );
- found = new Array(numDocs);
+ let found = new Array(numDocs);
for (var i = 0; i < numDocs; ++i) {
found[i] = false;
}
for (var i in matches) {
- m = matches[i];
+ let m = matches[i];
found[m._id] = true;
}
for (var i = 0; i < numDocs; ++i) {
@@ -34,7 +34,7 @@ function makeCursor(query, projection, sort, batchSize, returnKey) {
printjson(query);
print("proj:");
printjson(projection);
- cursor = t.find(query, projection);
+ let cursor = t.find(query, projection);
if (sort) {
cursor.sort(sort);
print("sort:");
@@ -53,7 +53,7 @@ function makeCursor(query, projection, sort, batchSize, returnKey) {
function checkCursorWithBatchSizeProjection(
query, projection, sort, batchSize, expectedLeftInBatch) {
clearQueryPlanCache();
- cursor = makeCursor(query, projection, sort, batchSize);
+ let cursor = makeCursor(query, projection, sort, batchSize);
if (TestData.batchSize && batchSize == null) {
expectedLeftInBatch = Math.min(TestData.batchSize, expectedLeftInBatch);
}
@@ -71,10 +71,10 @@ function checkCursorWithBatchSize(query, sort, batchSize, expectedLeftInBatch) {
// from the a,_id index.
clearQueryPlanCache();
if (expectedLeftInBatch > 110) {
- cursor = makeCursor(query, {}, sort, batchSize, true);
- lastNonAIndexResult = -1;
+ let cursor = makeCursor(query, {}, sort, batchSize, true);
+ let lastNonAIndexResult = -1;
for (var i = 0; i < expectedLeftInBatch; ++i) {
- next = cursor.next();
+ let next = cursor.next();
// Identify the query plan used by checking the fields of a returnKey query.
if (!friendlyEqual(['a', '_id'], Object.keySet(next))) {
lastNonAIndexResult = i;
diff --git a/jstests/core/query/idhack.js b/jstests/core/query/idhack.js
index 1ddab70d4cd2a..0ab6622ebd67e 100644
--- a/jstests/core/query/idhack.js
+++ b/jstests/core/query/idhack.js
@@ -3,16 +3,13 @@
// requires_multi_updates,
// requires_non_retryable_writes,
// ]
-(function() {
-"use strict";
+// Include helpers for analyzing explain output.
+import {getWinningPlan, isIdhack} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const t = db.idhack;
t.drop();
-// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-
assert.commandWorked(t.insert({_id: {x: 1}, z: 1}));
assert.commandWorked(t.insert({_id: {x: 2}, z: 2}));
assert.commandWorked(t.insert({_id: {x: 3}, z: 3}));
@@ -114,4 +111,3 @@ assert.eq(0, t.find({_id: 1}).hint({_id: 1}).min({_id: 2}).itcount());
explain = t.find({_id: 2}).hint({_id: 1}).min({_id: 1}).max({_id: 3}).explain();
winningPlan = getWinningPlan(explain.queryPlanner);
assert(!isIdhack(db, winningPlan), winningPlan);
-})();
diff --git a/jstests/core/query/in/in.js b/jstests/core/query/in/in.js
index ca8cd77b1895e..f2a3ab6fcff3c 100644
--- a/jstests/core/query/in/in.js
+++ b/jstests/core/query/in/in.js
@@ -1,5 +1,5 @@
-t = db.in1;
+let t = db.in1;
t.drop();
t.save({a: 1});
diff --git a/jstests/core/query/in/in2.js b/jstests/core/query/in/in2.js
index 6682bbc79c187..48f94f6aed64f 100644
--- a/jstests/core/query/in/in2.js
+++ b/jstests/core/query/in/in2.js
@@ -1,5 +1,5 @@
-t = db.in2;
+let t = db.in2;
function go(name, index) {
t.drop();
diff --git a/jstests/core/query/in/in3.js b/jstests/core/query/in/in3.js
index bd64329ded498..5bc2b353b45e1 100644
--- a/jstests/core/query/in/in3.js
+++ b/jstests/core/query/in/in3.js
@@ -1,6 +1,6 @@
// SERVER-2829 Test arrays matching themselves within a $in expression.
-t = db.jstests_in8;
+let t = db.jstests_in8;
t.drop();
t.save({key: [1]});
diff --git a/jstests/core/query/in/in4.js b/jstests/core/query/in/in4.js
index e916ca7c82d93..8d18a8b4287af 100644
--- a/jstests/core/query/in/in4.js
+++ b/jstests/core/query/in/in4.js
@@ -2,7 +2,7 @@
// SERVER-2343 Test $in empty array matching.
-t = db.jstests_in9;
+let t = db.jstests_in9;
t.drop();
function someData() {
diff --git a/jstests/core/query/in/in5.js b/jstests/core/query/in/in5.js
index 80f37e6b473f7..5a8e63d188881 100644
--- a/jstests/core/query/in/in5.js
+++ b/jstests/core/query/in/in5.js
@@ -4,28 +4,28 @@
// assumes_no_implicit_index_creation,
// ]
-t = db.in5;
+let t = db.in5;
function go(fn) {
t.drop();
- o = {};
+ let o = {};
o[fn] = {a: 1, b: 2};
t.insert(o);
- x = {};
+ let x = {};
x[fn] = {a: 1, b: 2};
assert.eq(1, t.find(x).itcount(), "A1 - " + fn);
- y = {};
+ let y = {};
y[fn] = {$in: [{a: 1, b: 2}]};
assert.eq(1, t.find(y).itcount(), "A2 - " + fn);
- z = {};
+ let z = {};
z[fn + ".a"] = 1;
z[fn + ".b"] = {$in: [2]};
assert.eq(1, t.find(z).itcount(), "A3 - " + fn); // SERVER-1366
- i = {};
+ let i = {};
i[fn] = 1;
t.createIndex(i);
diff --git a/jstests/core/query/in/in6.js b/jstests/core/query/in/in6.js
index ab8322cfe0a00..8584e1ee95435 100644
--- a/jstests/core/query/in/in6.js
+++ b/jstests/core/query/in/in6.js
@@ -1,4 +1,4 @@
-t = db.jstests_in6;
+let t = db.jstests_in6;
t.drop();
t.save({});
diff --git a/jstests/core/query/in/in7.js b/jstests/core/query/in/in7.js
index 2f6c9e3ff1aa7..ee2de08c0f48f 100644
--- a/jstests/core/query/in/in7.js
+++ b/jstests/core/query/in/in7.js
@@ -1,6 +1,6 @@
// Uassert when $elemMatch is attempted within $in SERVER-3545
-t = db.jstests_ina;
+let t = db.jstests_ina;
t.drop();
t.save({});
@@ -20,4 +20,4 @@ assert.throws(function() {
// NOTE Above we don't check cases like {b:2,$elemMatch:{b:3,4}} - generally
// we assume that the first key is $elemMatch if any key is, and validating
-// every key is expensive in some cases.
\ No newline at end of file
+// every key is expensive in some cases.
diff --git a/jstests/core/query/in/in8.js b/jstests/core/query/in/in8.js
index f1b6188347c8a..42a99407ce0a3 100644
--- a/jstests/core/query/in/in8.js
+++ b/jstests/core/query/in/in8.js
@@ -1,6 +1,6 @@
// Test $in regular expressions with overlapping index bounds. SERVER-4677
-t = db.jstests_inb;
+let t = db.jstests_inb;
t.drop();
function checkResults(query) {
diff --git a/jstests/core/query/inc/inc1.js b/jstests/core/query/inc/inc1.js
index 551d15e0f63b5..2470b146d70f7 100644
--- a/jstests/core/query/inc/inc1.js
+++ b/jstests/core/query/inc/inc1.js
@@ -1,6 +1,6 @@
// @tags: [requires_fastcount]
-t = db.inc1;
+let t = db.inc1;
t.drop();
function test(num, name) {
diff --git a/jstests/core/query/inc/inc2.js b/jstests/core/query/inc/inc2.js
index debdfde82cb0a..1d253723b2896 100644
--- a/jstests/core/query/inc/inc2.js
+++ b/jstests/core/query/inc/inc2.js
@@ -1,5 +1,5 @@
-t = db.inc2;
+let t = db.inc2;
t.drop();
t.save({_id: 1, x: 1});
diff --git a/jstests/core/query/inc/inc3.js b/jstests/core/query/inc/inc3.js
index e24165876c697..51a9c9c1587ee 100644
--- a/jstests/core/query/inc/inc3.js
+++ b/jstests/core/query/inc/inc3.js
@@ -3,7 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.inc3;
+let t = db.inc3;
t.drop();
t.save({_id: 1, z: 1, a: 1});
diff --git a/jstests/core/query/index_key_expression.js b/jstests/core/query/index_key_expression.js
index fcdfe066c281d..6b255722910a2 100644
--- a/jstests/core/query/index_key_expression.js
+++ b/jstests/core/query/index_key_expression.js
@@ -6,10 +6,7 @@
* requires_fcv_63,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil"
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const collection = db.index_key_expression;
@@ -1094,5 +1091,4 @@ testScenarios.forEach(testScenario => {
assert.throwsWithCode(() => collection.aggregate(pipeline).toArray(),
testScenario.expectedErrorCode);
}
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/query/internal_hash_eq/expr_rewrites.js b/jstests/core/query/internal_hash_eq/expr_rewrites.js
index b95284dc9c67d..e71de20f3d914 100644
--- a/jstests/core/query/internal_hash_eq/expr_rewrites.js
+++ b/jstests/core/query/internal_hash_eq/expr_rewrites.js
@@ -2,12 +2,13 @@
* Tests that $expr with equality of $toHashedIndexKey to a NumberLong results in an IXSCAN plan
* with a point bound. This is because we rewrite this structure to a $_internalEqHash expression
* and generate a tight index bound.
- * @tags: [requires_fcv_70]
+ * @tags: [
+ * # explain doesn't support read concern
+ * assumes_read_concern_unchanged,
+ * requires_fcv_70,
+ * ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getExecutionStages, getPlanStages, isIxscan} from "jstests/libs/analyze_plan.js";
const collName = jsTestName();
const coll = db.getCollection(collName);
@@ -165,5 +166,4 @@ function assertExplainIxscan(explainPlan, expectedIndexSpec, expectedKeysExamine
// We couldn't create a tight bound for the index scan as the index is not hashed.
assertExplainIxscan(explainPlan, indexSpec, 3 /* keyExamined */);
-})();
})();
\ No newline at end of file
diff --git a/jstests/core/query/internal_hash_eq/lookup_using_hash_key.js b/jstests/core/query/internal_hash_eq/lookup_using_hash_key.js
index 8e46265133790..9343e5f0e564b 100644
--- a/jstests/core/query/internal_hash_eq/lookup_using_hash_key.js
+++ b/jstests/core/query/internal_hash_eq/lookup_using_hash_key.js
@@ -7,12 +7,9 @@
* @tags: [
* does_not_support_transactions,
* requires_fcv_70,
+ * references_foreign_collection,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For 'isCollscan()' and similar.
load("jstests/aggregation/extras/utils.js"); // For 'resultsEq().'
const coll = db.lookup_using_hash_key;
@@ -45,5 +42,4 @@ let results = coll.aggregate(
// We essentially just looked up ourselves for each document.
let expected = allDocs.map(doc => Object.merge(doc, {relookup: [doc]}));
-assert(resultsEq(results, expected, true), [results, expected]);
-}());
+assert(resultsEq(results, expected, true), [results, expected]);
\ No newline at end of file
diff --git a/jstests/core/query/internal_hash_eq/match_internal_eq_hash.js b/jstests/core/query/internal_hash_eq/match_internal_eq_hash.js
index d5135f57587dc..2795c8973a874 100644
--- a/jstests/core/query/internal_hash_eq/match_internal_eq_hash.js
+++ b/jstests/core/query/internal_hash_eq/match_internal_eq_hash.js
@@ -1,11 +1,13 @@
/**
* Basic tests for the $_internalEqHash match expression.
- * @tags: [requires_fcv_70]
+ * @tags: [
+ * # explain doesn't support read concern
+ * assumes_read_concern_unchanged,
+ * requires_fcv_70,
+ * ]
*/
-(function() {
-"use strict";
+import {isCollscan, isIxscan} from "jstests/libs/analyze_plan.js";
-load("jstests/libs/analyze_plan.js"); // For 'isCollscan()' and similar.
load("jstests/aggregation/extras/utils.js"); // For 'resultsEq().'
const coll = db.match_internal_eq_hash;
@@ -144,5 +146,4 @@ const coll = db.match_internal_eq_hash;
assert.commandFailedWithCode(
db.runCommand({find: "match_internal_eq_hash", filter: {a: {$_internalEqHash: v}}}), 2);
});
-})();
-}());
+})();
\ No newline at end of file
diff --git a/jstests/core/query/introspect_hidden_index_plan_cache_entries.js b/jstests/core/query/introspect_hidden_index_plan_cache_entries.js
index 1d56012bcbff9..6d5afe613ecde 100644
--- a/jstests/core/query/introspect_hidden_index_plan_cache_entries.js
+++ b/jstests/core/query/introspect_hidden_index_plan_cache_entries.js
@@ -13,9 +13,7 @@
* ]
*/
-(function() {
-'use strict';
-load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
+import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js";
const collName = 'introspect_hidden_index_plan_cache_entries';
const collNotAffectedName = 'introspect_hidden_index_plan_cache_entries_unaffected';
@@ -97,5 +95,4 @@ assert.gt(cachedPlan.length, 0);
// Unhide an index.
assert.commandWorked(coll.unhideIndex("b_1"));
cachedPlan = getPlansForCacheEntry(queryShape, coll);
-assert.eq(0, cachedPlan.length);
-})();
+assert.eq(0, cachedPlan.length);
\ No newline at end of file
diff --git a/jstests/core/query/js/js1.js b/jstests/core/query/js/js1.js
index f569606ba5417..0a9254c015419 100644
--- a/jstests/core/query/js/js1.js
+++ b/jstests/core/query/js/js1.js
@@ -4,7 +4,7 @@
// requires_scripting,
// ]
-t = db.jstests_js1;
+let t = db.jstests_js1;
t.remove({});
t.save({z: 1});
diff --git a/jstests/core/query/js/js2.js b/jstests/core/query/js/js2.js
index a278a520a8841..05daee8394004 100644
--- a/jstests/core/query/js/js2.js
+++ b/jstests/core/query/js/js2.js
@@ -2,10 +2,10 @@
// requires_non_retryable_writes
//]
-t = db.jstests_js2;
+let t = db.jstests_js2;
t.remove({});
-t2 = db.jstests_js2_2;
+let t2 = db.jstests_js2_2;
t2.remove({});
assert.eq(0, t2.find().length(), "A");
diff --git a/jstests/core/query/js/js3.js b/jstests/core/query/js/js3.js
index c9239bb417685..cb2b03e778789 100644
--- a/jstests/core/query/js/js3.js
+++ b/jstests/core/query/js/js3.js
@@ -7,13 +7,13 @@
// requires_scripting,
// ]
-t = db.jstests_js3;
+let t = db.jstests_js3;
-debug = function(s) {
+let debug = function(s) {
// printjson( s );
};
-for (z = 0; z < 2; z++) {
+for (let z = 0; z < 2; z++) {
debug(z);
t.drop();
@@ -23,7 +23,7 @@ for (z = 0; z < 2; z++) {
t.createIndex({i: 1});
}
- for (i = 0; i < 1000; i++)
+ for (let i = 0; i < 1000; i++)
t.save({
i: i,
z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
@@ -57,17 +57,17 @@ for (z = 0; z < 2; z++) {
debug("before indexed find");
- arr = t.find({
- $where: function() {
- return obj.i == 7 || obj.i == 8;
- }
- }).toArray();
+ let arr = t.find({
+ $where: function() {
+ return obj.i == 7 || obj.i == 8;
+ }
+ }).toArray();
debug(arr);
assert.eq(2, arr.length);
debug("after indexed find");
- for (i = 1000; i < 2000; i++)
+ for (let i = 1000; i < 2000; i++)
t.save({
i: i,
z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
diff --git a/jstests/core/query/js/js4.js b/jstests/core/query/js/js4.js
index a8beab4a8f6f9..11e67b2cd2cb6 100644
--- a/jstests/core/query/js/js4.js
+++ b/jstests/core/query/js/js4.js
@@ -3,17 +3,10 @@
// requires_scripting,
// ]
-t = db.jstests_js4;
+let t = db.jstests_js4;
t.drop();
-real = {
- a: 1,
- b: "abc",
- c: /abc/i,
- d: new Date(111911100111),
- e: null,
- f: true
-};
+let real = {a: 1, b: "abc", c: /abc/i, d: new Date(111911100111), e: null, f: true};
t.save(real);
diff --git a/jstests/core/query/js/js5.js b/jstests/core/query/js/js5.js
index 05071a2b6a69b..f41a4de78d4aa 100644
--- a/jstests/core/query/js/js5.js
+++ b/jstests/core/query/js/js5.js
@@ -3,7 +3,7 @@
// requires_scripting
// ]
-t = db.jstests_js5;
+let t = db.jstests_js5;
t.drop();
t.save({a: 1});
diff --git a/jstests/core/query/js/js8.js b/jstests/core/query/js/js8.js
index 4bdf942e2c671..1954464923670 100644
--- a/jstests/core/query/js/js8.js
+++ b/jstests/core/query/js/js8.js
@@ -3,7 +3,7 @@
// requires_scripting,
// ]
-t = db.jstests_js8;
+let t = db.jstests_js8;
t.drop();
t.save({a: 1, b: [2, 3, 4]});
diff --git a/jstests/core/query/js/js_jit.js b/jstests/core/query/js/js_jit.js
index 72290d457589d..110d07c03128c 100644
--- a/jstests/core/query/js/js_jit.js
+++ b/jstests/core/query/js/js_jit.js
@@ -37,4 +37,4 @@ function testDBQuery() {
testDBCollection();
testDB();
testDBQuery();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/core/query/mr/mr_agg_explain.js b/jstests/core/query/mr/mr_agg_explain.js
index 801c2793b61be..cc224f439464f 100644
--- a/jstests/core/query/mr/mr_agg_explain.js
+++ b/jstests/core/query/mr/mr_agg_explain.js
@@ -8,10 +8,7 @@
* requires_scripting,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
const coll = db.mr_explain;
coll.drop();
@@ -61,5 +58,4 @@ runTest("out_collection");
runTest({out: {inline: 1}});
// Explain on mapReduce fails when the 3rd 'optionsOrOutString' argument is missing.
-assert.throws(() => coll.explain().mapReduce(mapFunc, reduceFunc));
-}());
+assert.throws(() => coll.explain().mapReduce(mapFunc, reduceFunc));
\ No newline at end of file
diff --git a/jstests/core/query/mr/mr_bigobject.js b/jstests/core/query/mr/mr_bigobject.js
index 83089935f3550..57d8283af235e 100644
--- a/jstests/core/query/mr/mr_bigobject.js
+++ b/jstests/core/query/mr/mr_bigobject.js
@@ -37,7 +37,7 @@ assert.eq([{_id: 1, value: 1}], outputColl.find().toArray());
// The reduce function processes the expected amount of data.
reduceFn = function(k, v) {
- total = 0;
+ let total = 0;
for (let i = 0; i < v.length; i++) {
const x = v[i];
if (typeof (x) == "number")
diff --git a/jstests/core/query/mr/mr_bigobject_replace.js b/jstests/core/query/mr/mr_bigobject_replace.js
index db7d0bc3712cf..ba534bed0961b 100644
--- a/jstests/core/query/mr/mr_bigobject_replace.js
+++ b/jstests/core/query/mr/mr_bigobject_replace.js
@@ -59,13 +59,19 @@ function runTest(testOptions) {
},
testOptions));
- // In most cases we expect this to fail because it tries to insert a document that is too large.
+ // In most cases we expect this to fail because it tries to insert a document that is too large,
+ // or we see a particular error code which happens when the input is too large to reduce.
+ //
// In some cases we may see the javascript execution interrupted because it takes longer than
// our default time limit, so we allow that possibility.
- assert.commandFailedWithCode(res,
- [ErrorCodes.BadValue, ErrorCodes.Interrupted],
- "creating a document larger than 16MB didn't fail");
- if (res.code != ErrorCodes.Interrupted) {
+ const kCannotReduceLargeObjCode = 31392;
+ assert.commandFailedWithCode(
+ res,
+ [ErrorCodes.BadValue, ErrorCodes.Interrupted, kCannotReduceLargeObjCode],
+ "creating a document larger than 16MB didn't fail");
+ // If we see 'BadValue', make sure the message indicates it's the kind of error we were
+ // expecting.
+ if (res.code === ErrorCodes.BadValue) {
assert.lte(
0,
res.errmsg.indexOf("object to insert too large"),
diff --git a/jstests/core/query/mr/mr_reduce_merge_other_db.js b/jstests/core/query/mr/mr_reduce_merge_other_db.js
index 5c730ada182cd..78559f6788e4c 100644
--- a/jstests/core/query/mr/mr_reduce_merge_other_db.js
+++ b/jstests/core/query/mr/mr_reduce_merge_other_db.js
@@ -23,7 +23,7 @@ const outDb = db.getMongo().getDB(outDbStr);
const outColl = outDb[outCollStr];
const mapFn = function() {
- for (i = 0; i < this.a.length; i++)
+ for (let i = 0; i < this.a.length; i++)
emit(this.a[i], 1);
};
const reduceFn = function(k, vs) {
diff --git a/jstests/core/query/mr/mr_replace_into_other_db.js b/jstests/core/query/mr/mr_replace_into_other_db.js
index 010b9d114afe9..a807b01f9686b 100644
--- a/jstests/core/query/mr/mr_replace_into_other_db.js
+++ b/jstests/core/query/mr/mr_replace_into_other_db.js
@@ -25,7 +25,7 @@ assert.commandWorked(outDb.random_coll.insert({val: 1}));
const outColl = outDb[outCollStr];
const mapFn = function() {
- for (i = 0; i < this.a.length; i++)
+ for (let i = 0; i < this.a.length; i++)
emit(this.a[i], 1);
};
const reduceFn = function(k, vs) {
diff --git a/jstests/core/query/mr/mr_sort.js b/jstests/core/query/mr/mr_sort.js
index 11ced9f2722f8..b01eb832e7168 100644
--- a/jstests/core/query/mr/mr_sort.js
+++ b/jstests/core/query/mr/mr_sort.js
@@ -11,7 +11,7 @@
// requires_scripting,
// ]
-t = db.mr_sort;
+let t = db.mr_sort;
t.drop();
t.createIndex({x: 1});
@@ -27,15 +27,15 @@ t.insert({x: 7});
t.insert({x: 5});
t.insert({x: 6});
-m = function() {
+let m = function() {
emit("a", this.x);
};
-r = function(k, v) {
+let r = function(k, v) {
return Array.sum(v);
};
-out = db.mr_sort_out;
+let out = db.mr_sort_out;
assert.commandWorked(t.mapReduce(m, r, out.getName()));
assert.eq([{_id: "a", value: 55}], out.find().toArray(), "A1");
out.drop();
diff --git a/jstests/core/query/ne/ne1.js b/jstests/core/query/ne/ne1.js
index 5069637eb30ee..5135294a43323 100644
--- a/jstests/core/query/ne/ne1.js
+++ b/jstests/core/query/ne/ne1.js
@@ -1,4 +1,4 @@
-t = db.ne1;
+let t = db.ne1;
t.drop();
t.save({x: 1});
diff --git a/jstests/core/query/ne/ne2.js b/jstests/core/query/ne/ne2.js
index 8f2b3d52f4c63..4c8654560d3d2 100644
--- a/jstests/core/query/ne/ne2.js
+++ b/jstests/core/query/ne/ne2.js
@@ -3,7 +3,7 @@
// assumes_read_concern_local,
// ]
-t = db.jstests_ne2;
+let t = db.jstests_ne2;
t.drop();
t.createIndex({a: 1});
@@ -12,7 +12,7 @@ t.save({a: 0});
t.save({a: 0});
t.save({a: 0.5});
-e = t.find({a: {$ne: 0}}).explain(true);
+let e = t.find({a: {$ne: 0}}).explain(true);
assert.eq(2, e.executionStats.nReturned, 'A');
e = t.find({a: {$gt: -1, $lt: 1, $ne: 0}}).explain(true);
diff --git a/jstests/core/query/ne/ne3.js b/jstests/core/query/ne/ne3.js
index 5c38858c019dd..ccdb06a357f51 100644
--- a/jstests/core/query/ne/ne3.js
+++ b/jstests/core/query/ne/ne3.js
@@ -1,6 +1,6 @@
// don't allow most operators with regex
-t = db.jstests_ne3;
+let t = db.jstests_ne3;
t.drop();
assert.throws(function() {
diff --git a/jstests/core/query/nin/nin.js b/jstests/core/query/nin/nin.js
index b9887e4882899..f327bb1315ba7 100644
--- a/jstests/core/query/nin/nin.js
+++ b/jstests/core/query/nin/nin.js
@@ -1,6 +1,6 @@
// @tags: [requires_fastcount]
-t = db.jstests_nin;
+let t = db.jstests_nin;
t.drop();
function checkEqual(name, key, value) {
@@ -16,7 +16,7 @@ function checkEqual(name, key, value) {
" != " + t.find().count());
}
-doTest = function(n) {
+let doTest = function(n) {
t.save({a: [1, 2, 3]});
t.save({a: [1, 2, 4]});
t.save({a: [1, 8, 5]});
diff --git a/jstests/core/query/nin/nin2.js b/jstests/core/query/nin/nin2.js
index d134f5ad4a496..de373c7119059 100644
--- a/jstests/core/query/nin/nin2.js
+++ b/jstests/core/query/nin/nin2.js
@@ -2,13 +2,13 @@
// Check that $nin is the opposite of $in SERVER-3264
-t = db.jstests_nin2;
+let t = db.jstests_nin2;
t.drop();
// Check various operator types.
function checkOperators(array, inMatches) {
- inCount = inMatches ? 1 : 0;
- notInCount = 1 - inCount;
+ let inCount = inMatches ? 1 : 0;
+ let notInCount = 1 - inCount;
assert.eq(inCount, t.count({foo: {$in: array}}));
assert.eq(notInCount, t.count({foo: {$not: {$in: array}}}));
assert.eq(notInCount, t.count({foo: {$nin: array}}));
diff --git a/jstests/core/query/not/not1.js b/jstests/core/query/not/not1.js
index 0726895ebbd83..8ccfeaf7d8769 100644
--- a/jstests/core/query/not/not1.js
+++ b/jstests/core/query/not/not1.js
@@ -1,6 +1,6 @@
// @tags: [requires_fastcount]
-t = db.not1;
+let t = db.not1;
t.drop();
t.insert({a: 1});
diff --git a/jstests/core/query/null_field_name.js b/jstests/core/query/null_field_name.js
index f90ce65cc872e..58d6441ce1cb0 100644
--- a/jstests/core/query/null_field_name.js
+++ b/jstests/core/query/null_field_name.js
@@ -5,4 +5,4 @@ assert.throws(function() {
assert.throws(function() {
Object.bsonsize({"\0asdf": 1});
-}, [], "null char in field name");
\ No newline at end of file
+}, [], "null char in field name");
diff --git a/jstests/core/query/null_query_semantics.js b/jstests/core/query/null_query_semantics.js
index 71cd4b7c84fcc..2a1369a44caa4 100644
--- a/jstests/core/query/null_query_semantics.js
+++ b/jstests/core/query/null_query_semantics.js
@@ -11,12 +11,9 @@
// not_allowed_with_security_token,
// ]
//
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For 'resultsEq'.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
function extractAValues(results) {
return results.map(function(res) {
@@ -817,5 +814,4 @@ coll = db.getCollection(collNamePrefix + collCount++);
coll.drop();
assert.commandFailedWithCode(
coll.createIndex({a: 1}, {partialFilterExpression: {a: {$elemMatch: {$ne: null}}}}),
- ErrorCodes.CannotCreateIndex);
-}());
+ ErrorCodes.CannotCreateIndex);
\ No newline at end of file
diff --git a/jstests/core/query/number/numberint.js b/jstests/core/query/number/numberint.js
index 55c923aea7943..f6ed7b5b8d7f4 100644
--- a/jstests/core/query/number/numberint.js
+++ b/jstests/core/query/number/numberint.js
@@ -1,14 +1,14 @@
assert.eq.automsg("0", "new NumberInt()");
-n = new NumberInt(4);
+let n = new NumberInt(4);
assert.eq.automsg("4", "n");
assert.eq.automsg("4", "n.toNumber()");
assert.eq.automsg("8", "n + 4");
assert.eq.automsg("'NumberInt(4)'", "n.toString()");
assert.eq.automsg("'NumberInt(4)'", "tojson( n )");
-a = {};
+let a = {};
a.a = n;
-p = tojson(a);
+let p = tojson(a);
assert.eq.automsg("'{ \"a\" : NumberInt(4) }'", "p");
assert.eq.automsg("NumberInt(4 )", "eval( tojson( NumberInt( 4 ) ) )");
@@ -63,12 +63,10 @@ assert(NumberInt(1), "to bool a");
// assert( ! NumberInt( 0 ) , "to bool b" );
// create doc with int value in db
-t = db.getCollection("numberint");
+let t = db.getCollection("numberint");
t.drop();
-o = {
- a: NumberInt(42)
-};
+let o = {a: NumberInt(42)};
t.save(o);
assert.eq(42, t.findOne().a, "save doc 1");
@@ -76,7 +74,7 @@ assert.eq(1, t.find({a: {$type: 16}}).count(), "save doc 2");
assert.eq(0, t.find({a: {$type: 1}}).count(), "save doc 3");
// roundtripping
-mod = t.findOne({a: 42});
+let mod = t.findOne({a: 42});
mod.a += 10;
mod.b = "foo";
delete mod._id;
diff --git a/jstests/core/query/number/numberlong.js b/jstests/core/query/number/numberlong.js
index a7dfd014539e1..d3477fd9ce0b8 100644
--- a/jstests/core/query/number/numberlong.js
+++ b/jstests/core/query/number/numberlong.js
@@ -1,14 +1,14 @@
assert.eq.automsg("0", "new NumberLong()");
-n = new NumberLong(4);
+let n = new NumberLong(4);
assert.eq.automsg("4", "n");
assert.eq.automsg("4", "n.toNumber()");
assert.eq.automsg("8", "n + 4");
assert.eq.automsg("'NumberLong(4)'", "n.toString()");
assert.eq.automsg("'NumberLong(4)'", "tojson( n )");
-a = {};
+let a = {};
a.a = n;
-p = tojson(a);
+let p = tojson(a);
assert.eq.automsg("'{ \"a\" : NumberLong(4) }'", "p");
assert.eq.automsg("NumberLong(4 )", "eval( tojson( NumberLong( 4 ) ) )");
diff --git a/jstests/core/query/number/numberlong2.js b/jstests/core/query/number/numberlong2.js
index be254027b7919..6d04ddd84e97b 100644
--- a/jstests/core/query/number/numberlong2.js
+++ b/jstests/core/query/number/numberlong2.js
@@ -4,7 +4,7 @@
// Test precision of NumberLong values with v1 index code SERVER-3717
-t = db.jstests_numberlong2;
+let t = db.jstests_numberlong2;
t.drop();
t.createIndex({x: 1});
@@ -24,8 +24,8 @@ chk(NumberLong("4503599627370497"));
t.remove({});
-s = "11235399833116571";
-for (i = 99; i >= 0; --i) {
+let s = "11235399833116571";
+for (let i = 99; i >= 0; --i) {
t.save({x: NumberLong(s + i)});
}
diff --git a/jstests/core/query/number/numberlong3.js b/jstests/core/query/number/numberlong3.js
index 0dabdec2a0506..ac89075ee7503 100644
--- a/jstests/core/query/number/numberlong3.js
+++ b/jstests/core/query/number/numberlong3.js
@@ -1,26 +1,26 @@
// Test sorting with long longs and doubles - SERVER-3719
-t = db.jstests_numberlong3;
+let t = db.jstests_numberlong3;
t.drop();
-s = "11235399833116571";
-for (i = 10; i >= 0; --i) {
- n = NumberLong(s + i);
+let s = "11235399833116571";
+for (let i = 10; i >= 0; --i) {
+ let n = NumberLong(s + i);
t.save({x: n});
if (0) { // SERVER-3719
t.save({x: n.floatApprox});
}
}
-ret = t.find().sort({x: 1}).toArray().filter(function(x) {
+let ret = t.find().sort({x: 1}).toArray().filter(function(x) {
return typeof (x.x.floatApprox) != 'undefined';
});
// printjson( ret );
-for (i = 1; i < ret.length; ++i) {
- first = ret[i - 1].x.toString();
- second = ret[i].x.toString();
+for (let i = 1; i < ret.length; ++i) {
+ let first = ret[i - 1].x.toString();
+ let second = ret[i].x.toString();
if (first.length == second.length) {
assert.lte(ret[i - 1].x.toString(), ret[i].x.toString());
}
diff --git a/jstests/core/query/number/numberlong4.js b/jstests/core/query/number/numberlong4.js
index d7d73898b34b9..2cd381cc61c9a 100644
--- a/jstests/core/query/number/numberlong4.js
+++ b/jstests/core/query/number/numberlong4.js
@@ -1,7 +1,7 @@
// Test handling of comparison between long longs and their double approximations in btrees -
// SERVER-3719.
-t = db.jstests_numberlong4;
+let t = db.jstests_numberlong4;
t.drop();
if (0) { // SERVER-3719
@@ -10,9 +10,9 @@ if (0) { // SERVER-3719
Random.setRandomSeed();
- s = "11235399833116571";
- for (i = 0; i < 10000; ++i) {
- n = NumberLong(s + Random.randInt(10));
+ let s = "11235399833116571";
+ for (let i = 0; i < 10000; ++i) {
+ let n = NumberLong(s + Random.randInt(10));
t.insert({x: (Random.randInt(2) ? n : n.floatApprox)});
}
diff --git a/jstests/core/query/objid/objid1.js b/jstests/core/query/objid/objid1.js
index d08089c26dbd9..5c252a9531db3 100644
--- a/jstests/core/query/objid/objid1.js
+++ b/jstests/core/query/objid/objid1.js
@@ -1,18 +1,16 @@
-t = db.objid1;
+let t = db.objid1;
t.drop();
-b = new ObjectId();
+let b = new ObjectId();
assert(b.str, "A");
-a = new ObjectId(b.str);
+let a = new ObjectId(b.str);
assert.eq(a.str, b.str, "B");
t.save({a: a});
assert(t.findOne().a.isObjectId, "C");
assert.eq(a.str, t.findOne().a.str, "D");
-x = {
- a: new ObjectId()
-};
+let x = {a: new ObjectId()};
eval(" y = " + tojson(x));
assert.eq(x.a.str, y.a.str, "E");
diff --git a/jstests/core/query/objid/objid2.js b/jstests/core/query/objid/objid2.js
index 247843b587b3f..d25b1a6d9dc76 100644
--- a/jstests/core/query/objid/objid2.js
+++ b/jstests/core/query/objid/objid2.js
@@ -1,4 +1,4 @@
-t = db.objid2;
+let t = db.objid2;
t.drop();
t.save({_id: 517, a: "hello"});
diff --git a/jstests/core/query/objid/objid3.js b/jstests/core/query/objid/objid3.js
index 12d45530e52cf..79e495e25cb49 100644
--- a/jstests/core/query/objid/objid3.js
+++ b/jstests/core/query/objid/objid3.js
@@ -1,4 +1,4 @@
-t = db.objid3;
+let t = db.objid3;
t.drop();
t.save({a: "bob", _id: 517});
diff --git a/jstests/core/query/objid/objid4.js b/jstests/core/query/objid/objid4.js
index 7513e07702967..de82d397b5830 100644
--- a/jstests/core/query/objid/objid4.js
+++ b/jstests/core/query/objid/objid4.js
@@ -1,13 +1,11 @@
-
-
-o = new ObjectId();
+let o = new ObjectId();
assert(o.str);
-a = new ObjectId(o.str);
+let a = new ObjectId(o.str);
assert.eq(o.str, a.str);
assert.eq(a.str, a.str.toString());
-b = ObjectId(o.str);
+let b = ObjectId(o.str);
assert.eq(o.str, b.str);
assert.eq(b.str, b.str.toString());
diff --git a/jstests/core/query/objid/objid5.js b/jstests/core/query/objid/objid5.js
index c656b286f5bde..c8599509c864b 100644
--- a/jstests/core/query/objid/objid5.js
+++ b/jstests/core/query/objid/objid5.js
@@ -1,11 +1,11 @@
-t = db.objid5;
+let t = db.objid5;
t.drop();
t.save({_id: 5.5});
assert.eq(18, Object.bsonsize(t.findOne()), "A");
-x = db.runCommand({features: 1});
-y = db.runCommand({features: 1, oidReset: 1});
+let x = db.runCommand({features: 1});
+let y = db.runCommand({features: 1, oidReset: 1});
if (!x.ok)
print("x: " + tojson(x));
diff --git a/jstests/core/query/objid/objid7.js b/jstests/core/query/objid/objid7.js
index 4c3505f8965b1..2fc289b600054 100644
--- a/jstests/core/query/objid/objid7.js
+++ b/jstests/core/query/objid/objid7.js
@@ -1,7 +1,6 @@
-
-a = new ObjectId("4c1a478603eba73620000000");
-b = new ObjectId("4c1a478603eba73620000000");
-c = new ObjectId();
+let a = new ObjectId("4c1a478603eba73620000000");
+let b = new ObjectId("4c1a478603eba73620000000");
+let c = new ObjectId();
assert.eq(a.toString(), b.toString(), "A");
assert.eq(a.toString(), "ObjectId(\"4c1a478603eba73620000000\")", "B");
diff --git a/jstests/core/query/or/or1.js b/jstests/core/query/or/or1.js
index d90947d5f8716..df3f1eb0ac415 100644
--- a/jstests/core/query/or/or1.js
+++ b/jstests/core/query/or/or1.js
@@ -1,22 +1,22 @@
-t = db.jstests_or1;
+let t = db.jstests_or1;
t.drop();
-checkArrs = function(a, b) {
+let checkArrs = function(a, b) {
assert.eq(a.length, b.length);
- aStr = [];
- bStr = [];
+ let aStr = [];
+ let bStr = [];
a.forEach(function(x) {
aStr.push(tojson(x));
});
b.forEach(function(x) {
bStr.push(tojson(x));
});
- for (i = 0; i < aStr.length; ++i) {
+ for (let i = 0; i < aStr.length; ++i) {
assert.neq(-1, bStr.indexOf(aStr[i]));
}
};
-doTest = function() {
+let doTest = function() {
t.save({_id: 0, a: 1});
t.save({_id: 1, a: 2});
t.save({_id: 2, b: 1});
@@ -36,10 +36,10 @@ doTest = function() {
t.find({$or: ["a"]}).toArray();
});
- a1 = t.find({$or: [{a: 1}]}).toArray();
+ let a1 = t.find({$or: [{a: 1}]}).toArray();
checkArrs([{_id: 0, a: 1}, {_id: 4, a: 1, b: 1}, {_id: 5, a: 1, b: 2}], a1);
- a1b2 = t.find({$or: [{a: 1}, {b: 2}]}).toArray();
+ let a1b2 = t.find({$or: [{a: 1}, {b: 2}]}).toArray();
checkArrs(
[
{_id: 0, a: 1},
diff --git a/jstests/core/query/or/or2.js b/jstests/core/query/or/or2.js
index 03acfc32174ec..e572255afd1cc 100644
--- a/jstests/core/query/or/or2.js
+++ b/jstests/core/query/or/or2.js
@@ -2,11 +2,8 @@
// assumes_read_concern_local,
// ]
-(function() {
-"use strict";
-
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js";
const t = db.jstests_or2;
t.drop();
@@ -82,5 +79,4 @@ doTest();
assert(t.drop());
assert.commandWorked(t.createIndex({x: 1, a: 1, b: 1}));
-doTest();
-})();
+doTest();
\ No newline at end of file
diff --git a/jstests/core/query/or/or3.js b/jstests/core/query/or/or3.js
index 57b151d8f4bc0..7502afee49cc6 100644
--- a/jstests/core/query/or/or3.js
+++ b/jstests/core/query/or/or3.js
@@ -2,11 +2,8 @@
// assumes_read_concern_local,
// ]
-(function() {
-"use strict";
-
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js";
const t = db.jstests_or3;
t.drop();
@@ -81,5 +78,4 @@ doTest();
assert(t.drop());
assert.commandWorked(t.createIndex({x: 1, a: 1, b: 1}));
-doTest();
-})();
+doTest();
\ No newline at end of file
diff --git a/jstests/core/query/or/or5.js b/jstests/core/query/or/or5.js
index 1843c3d6cb770..ba4f529359c5c 100644
--- a/jstests/core/query/or/or5.js
+++ b/jstests/core/query/or/or5.js
@@ -2,7 +2,7 @@
// requires_getmore,
// ]
-t = db.jstests_or5;
+let t = db.jstests_or5;
t.drop();
t.createIndex({a: 1});
@@ -28,7 +28,7 @@ assert.eq.automsg("6", "t.find( {$or:[{a:6},{b:3},{c:4}]} ).toArray().length");
assert.eq.automsg("6", "t.find( {$or:[{a:2},{b:6},{c:4}]} ).toArray().length");
assert.eq.automsg("6", "t.find( {$or:[{a:2},{b:3},{c:6}]} ).toArray().length");
-for (i = 2; i <= 7; ++i) {
+for (var i = 2; i <= 7; ++i) {
assert.eq.automsg("7", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( i ).toArray().length");
assert.eq.automsg("6", "t.find( {$or:[{a:6},{b:3},{c:4}]} ).batchSize( i ).toArray().length");
assert.eq.automsg("6", "t.find( {$or:[{a:2},{b:6},{c:4}]} ).batchSize( i ).toArray().length");
diff --git a/jstests/core/query/or/or7.js b/jstests/core/query/or/or7.js
index e639a1957392c..414fbff858e3c 100644
--- a/jstests/core/query/or/or7.js
+++ b/jstests/core/query/or/or7.js
@@ -1,6 +1,6 @@
// @tags: [requires_non_retryable_writes]
-t = db.jstests_or7;
+let t = db.jstests_or7;
t.drop();
t.createIndex({a: 1});
diff --git a/jstests/core/query/or/or8.js b/jstests/core/query/or/or8.js
index f33ef3146ce2d..00d1ea0cbed12 100644
--- a/jstests/core/query/or/or8.js
+++ b/jstests/core/query/or/or8.js
@@ -4,7 +4,7 @@
// missing collection
-t = db.jstests_or8;
+let t = db.jstests_or8;
t.drop();
t.find({"$or": [{"PropA": {"$lt": "b"}}, {"PropA": {"$lt": "b", "$gt": "a"}}]}).toArray();
diff --git a/jstests/core/query/or/or9.js b/jstests/core/query/or/or9.js
index 4938e84ca0443..bcd527bb601cd 100644
--- a/jstests/core/query/or/or9.js
+++ b/jstests/core/query/or/or9.js
@@ -2,7 +2,7 @@
// index skipping and previous index range negation
-t = db.jstests_or9;
+let t = db.jstests_or9;
t.drop();
t.createIndex({a: 1, b: 1});
diff --git a/jstests/core/query/or/or_to_in.js b/jstests/core/query/or/or_to_in.js
index 332a2e2e55936..2f8ce52353fa1 100644
--- a/jstests/core/query/or/or_to_in.js
+++ b/jstests/core/query/or/or_to_in.js
@@ -4,14 +4,11 @@
// This test is not prepared to handle explain output for sharded collections.
// @tags: [
// assumes_unsharded_collection,
-// requires_fcv_63,
+// requires_fcv_70,
// ]
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan} from "jstests/libs/analyze_plan.js";
var coll = db.orToIn;
coll.drop();
@@ -168,5 +165,4 @@ coll.drop();
assert.commandWorked(db.createCollection("orToIn", {collation: {locale: 'de'}}));
coll = db.orToIn;
assert.commandWorked(coll.insert(data));
-testOrToIn(positiveTestQueries);
-}());
+testOrToIn(positiveTestQueries);
\ No newline at end of file
diff --git a/jstests/core/query/or/orb.js b/jstests/core/query/or/orb.js
index aa75bb97e0be9..1c4a65ad91256 100644
--- a/jstests/core/query/or/orb.js
+++ b/jstests/core/query/or/orb.js
@@ -15,4 +15,4 @@ t.createIndex({a: 1, b: -1});
assert.eq.automsg("1", "t.count( {$or: [ { a: { $gt:0,$lt:2 } }, { a: { $gt:-1,$lt:3 } } ] } )");
assert.eq.automsg(
- "1", "t.count( {$or: [ { a:1, b: { $gt:0,$lt:2 } }, { a:1, b: { $gt:-1,$lt:3 } } ] } )");
\ No newline at end of file
+ "1", "t.count( {$or: [ { a:1, b: { $gt:0,$lt:2 } }, { a:1, b: { $gt:-1,$lt:3 } } ] } )");
diff --git a/jstests/core/query/or/orc.js b/jstests/core/query/or/orc.js
index 001d6f4bc9fe1..bb632e0ce6cb7 100644
--- a/jstests/core/query/or/orc.js
+++ b/jstests/core/query/or/orc.js
@@ -1,7 +1,7 @@
// @tags: [requires_non_retryable_writes]
// test that or duplicates are dropped in certain special cases
-t = db.jstests_orc;
+let t = db.jstests_orc;
t.drop();
// The goal here will be to ensure the full range of valid values is scanned for each or clause, in
diff --git a/jstests/core/query/or/ore.js b/jstests/core/query/or/ore.js
index 756db6215c59f..775d5e3b2c113 100644
--- a/jstests/core/query/or/ore.js
+++ b/jstests/core/query/or/ore.js
@@ -1,7 +1,7 @@
// verify that index direction is considered when deduping based on an earlier
// index
-t = db.jstests_ore;
+let t = db.jstests_ore;
t.drop();
t.createIndex({a: -1});
diff --git a/jstests/core/query/or/org.js b/jstests/core/query/or/org.js
index 0c6808330c8ee..4780c54582bf2 100644
--- a/jstests/core/query/or/org.js
+++ b/jstests/core/query/or/org.js
@@ -2,7 +2,7 @@
// SERVER-2282 $or de duping with sparse indexes
-t = db.jstests_org;
+let t = db.jstests_org;
t.drop();
t.createIndex({a: 1}, {sparse: true});
diff --git a/jstests/core/query/or/orh.js b/jstests/core/query/or/orh.js
index 91ce121e5a4d3..9a870cf27b10c 100644
--- a/jstests/core/query/or/orh.js
+++ b/jstests/core/query/or/orh.js
@@ -2,7 +2,7 @@
// SERVER-2831 Demonstration of sparse index matching semantics in a multi index $or query.
-t = db.jstests_orh;
+let t = db.jstests_orh;
t.drop();
t.createIndex({a: 1}, {sparse: true});
diff --git a/jstests/core/query/or/orj.js b/jstests/core/query/or/orj.js
index 6aabb3c39c7f4..9ff2288dc6bcc 100644
--- a/jstests/core/query/or/orj.js
+++ b/jstests/core/query/or/orj.js
@@ -1,6 +1,6 @@
// Test nested $or clauses SERVER-2585 SERVER-3192
-t = db.jstests_orj;
+let t = db.jstests_orj;
t.drop();
t.save({a: 1, b: 2});
diff --git a/jstests/core/query/or/ork.js b/jstests/core/query/or/ork.js
index 8ce2346b6f322..525181f9061b7 100644
--- a/jstests/core/query/or/ork.js
+++ b/jstests/core/query/or/ork.js
@@ -1,6 +1,6 @@
// SERVER-2585 Test $or clauses within indexed top level $or clauses.
-t = db.jstests_ork;
+let t = db.jstests_ork;
t.drop();
t.createIndex({a: 1});
diff --git a/jstests/core/query/or/oro.js b/jstests/core/query/or/oro.js
index 5ceda73818ec9..43bd0b87a811d 100644
--- a/jstests/core/query/or/oro.js
+++ b/jstests/core/query/or/oro.js
@@ -5,26 +5,26 @@
// Test $or query with several clauses on separate indexes.
-t = db.jstests_oro;
+let t = db.jstests_oro;
t.drop();
-orClauses = [];
-for (idxKey = 'a'; idxKey <= 'aaaaaaaaaa'; idxKey += 'a') {
- idx = {};
+let orClauses = [];
+for (let idxKey = 'a'; idxKey <= 'aaaaaaaaaa'; idxKey += 'a') {
+ let idx = {};
idx[idxKey] = 1;
t.createIndex(idx);
- for (i = 0; i < 200; ++i) {
+ for (let i = 0; i < 200; ++i) {
t.insert(idx);
}
orClauses.push(idx);
}
printjson(t.find({$or: orClauses}).explain());
-c = t.find({$or: orClauses}).batchSize(100);
-count = 0;
+let c = t.find({$or: orClauses}).batchSize(100);
+let count = 0;
while (c.hasNext()) {
- for (i = 0; i < 50 && c.hasNext(); ++i, c.next(), ++count)
+ for (let i = 0; i < 50 && c.hasNext(); ++i, c.next(), ++count)
;
// Interleave with another operation.
t.stats();
diff --git a/jstests/core/query/or_use_clustered_collection.js b/jstests/core/query/or_use_clustered_collection.js
new file mode 100644
index 0000000000000..fce3f3347cd4d
--- /dev/null
+++ b/jstests/core/query/or_use_clustered_collection.js
@@ -0,0 +1,373 @@
+/**
+ * Verifies that $or queries on clustered collections produce plans with IXSCAN and
+ * CLUSTERED_IXSCAN stages when possible.
+ * @tags: [
+ * requires_fcv_71,
+ * # Explain for the aggregate command cannot run within a multi-document transaction.
+ * does_not_support_transactions,
+ * # Refusing to run a test that issues an aggregation command with explain because it may return
+ * # incomplete results if interrupted by a stepdown.
+ * does_not_support_stepdowns
+ * ]
+ */
+
+import {
+ getAggPlanStages,
+ getPlanStage,
+ getPlanStages,
+ getWinningPlan
+} from "jstests/libs/analyze_plan.js";
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+
+const coll = db.or_use_clustered_collection;
+assertDropCollection(db, coll.getName());
+
+// Create a clustered collection and create indexes.
+assert.commandWorked(
+ db.createCollection(coll.getName(), {clusteredIndex: {key: {_id: 1}, unique: true}}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({c: 1}));
+assert.commandWorked(coll.createIndex({b: "text"}));
+
+// Insert documents, and store them to be used later in the test.
+const docs = [];
+const textFields = ["foo", "one", "two", "three", "four", "foo", "foo", "seven", "eight", "nine"];
+const numDocs = textFields.length;
+for (let i = 0; i < numDocs; i++) {
+ docs.push({b: textFields[i], a: i, _id: i, c: i * 2, d: [{e: i * 2}, {g: i / 2}], noIndex: i});
+}
+assert.commandWorked(coll.insertMany(docs));
+
+function haveShardMergeStage(winningPlan, stage = "SHARD_MERGE") {
+ let shardMergeStage = getPlanStages(winningPlan, stage);
+ return shardMergeStage.length > 0;
+}
+
+function assertCorrectResults({query, expectedDocIds, projection, limit, skip}) {
+ // Test different find queries. With and without a sort, and with and without a projection.
+ let results = query.toArray();
+ let expectedResults = [];
+ // Create the document set that we expect.
+ if (skip) {
+ // Confirm we only skipped 1 document.
+ assert.eq(results.length, expectedDocIds.length - 1);
+ // Remove the document that was skipped.
+ expectedDocIds = expectedDocIds.filter(id => results.some(el => el["_id"] == id));
+ }
+ expectedDocIds.forEach(id => projection
+ ? expectedResults.push({"_id": docs[id]["_id"], "a": docs[id]["a"]})
+ : expectedResults.push(docs[id]));
+ if (limit) {
+ assert.eq(results.length, 2);
+ assert.neq(results[0]["_id"], results[1]["_id"]);
+ for (let i = 0; i < results.length; ++i) {
+ let doc = expectedResults.filter(r => r["_id"] == results[i]["_id"]);
+ assert.eq(1, doc.length);
+ assert.docEq(doc[0], results[i]);
+ }
+ return;
+ }
+
+ assert.sameMembers(results, expectedResults);
+}
+
+// $or query which uses a clustered collection scan plan for one branch and secondary index plan for
+// the other, and returns no matching documents.
+assertCorrectResults({query: coll.find({$or: [{_id: 123}, {a: 11}]}), expectedDocIds: []});
+
+//$or query which uses a clustered collection scan plan and secondary index plan, and each predicate
+// matches some of the documents.
+assertCorrectResults(
+ {query: coll.find({$or: [{_id: 9}, {a: {$lte: 3}}]}), expectedDocIds: [0, 1, 2, 3, 9]});
+
+// $or query which uses a clustered collection scan plan and secondary index plan, and some
+// documents match both predicates.
+assertCorrectResults(
+ {query: coll.find({$or: [{_id: {$lt: 2}}, {a: {$lte: 3}}]}), expectedDocIds: [0, 1, 2, 3]});
+
+// $or query that uses two clustered collection scan plans.
+assertCorrectResults(
+ {query: coll.find({$or: [{_id: {$lt: 2}}, {_id: {$gt: 8}}]}), expectedDocIds: [0, 1, 9]});
+
+// $or query that uses two secondary index scan plans.
+assertCorrectResults(
+ {query: coll.find({$or: [{a: {$lt: 2}}, {a: {$gt: 8}}]}), expectedDocIds: [0, 1, 9]});
+
+function validateQueryPlan({query, expectedStageCount, expectedDocIds, noFetchWithCount}) {
+ // TODO SERVER-77601 add coll.find(query).sort({_id: 1}) to 'queries'.
+ const testCases = [
+ {
+ explainQuery: coll.explain().find(query).finish(),
+ additionalStages: {},
+ actualQuery: coll.find(query)
+ },
+ {
+ explainQuery: coll.explain().find(query, {_id: 1, a: 1}).limit(2).finish(),
+ additionalStages: {"LIMIT": 1, "PROJECTION_SIMPLE": 1},
+ actualQuery: coll.find(query, {_id: 1, a: 1}).limit(2),
+ },
+ {
+ explainQuery: coll.explain().find(query).limit(2).finish(),
+ additionalStages: {"LIMIT": 1},
+ actualQuery: coll.find(query).limit(2),
+ },
+ {
+ explainQuery: coll.explain().find(query).skip(1).finish(),
+ additionalStages: {"SKIP": 1},
+ actualQuery: coll.find(query).skip(1),
+ },
+ {
+ explainQuery: coll.explain().aggregate([{$match: query}, {$project: {_id: 1, a: 1}}]),
+ additionalStages: {"PROJECTION_SIMPLE": 1},
+ actualQuery: coll.aggregate([{$match: query}, {$project: {_id: 1, a: 1}}]),
+ aggregate: true,
+ },
+ {
+ explainQuery: coll.explain().aggregate(
+ [{$match: query}, {$group: {_id: null, count: {$sum: 1}}}]),
+ additionalStages: {"GROUP": 1},
+ actualQuery: coll.aggregate([{$match: query}, {$group: {_id: null, count: {$sum: 1}}}]),
+ aggregate: true
+ },
+ {
+ explainQuery: coll.explain().find(query).count(),
+ additionalStages: {"COUNT": 1},
+ actualQuery: coll.find(query).count(),
+ }
+ ];
+
+ testCases.forEach(test => {
+ const explain = test.explainQuery;
+
+ // If there is a 'SHARD_MERGE' stage or 'shards', then we should expect more than our
+ // 'expectedStageCount', since each stage will appear for each shard.
+ const shardMergeStage = getPlanStage(explain, "SHARD_MERGE");
+ const shards = "shards" in explain;
+
+ // There won't be a 'FETCH' stage if we have a 'COUNT' or 'GROUP' stage with just index scan
+ // plans.
+ const count = test.additionalStages.hasOwnProperty('COUNT');
+ const fetch = expectedStageCount.hasOwnProperty('FETCH');
+ const group = test.additionalStages.hasOwnProperty('GROUP');
+ if (noFetchWithCount && (count || group) && fetch) {
+ expectedStageCount["FETCH"] = 0;
+ }
+
+ // Classic engine doesn't have a GROUP stage like SBE for $group.
+ if (group && !checkSBEEnabled(db)) {
+ test.additionalStages["GROUP"] = 0;
+ }
+
+ // Validate all the stages appear the correct number of times in the winning plan.
+ const expectedStages = Object.assign({}, expectedStageCount, test.additionalStages);
+ for (let stage in expectedStages) {
+ let planStages =
+ test.aggregate ? getAggPlanStages(explain, stage) : getPlanStages(explain, stage);
+ assert(planStages, tojson(explain));
+ if (shardMergeStage || shards) {
+ assert.gte(planStages.length,
+ expectedStages[stage],
+ "Expected " + stage + " to appear, but got plan: " + tojson(explain));
+ } else {
+ assert.eq(planStages.length,
+ expectedStages[stage],
+ "Expected " + stage + " to appear, but got plan: " + tojson(explain));
+ }
+ }
+
+ const projection = test.additionalStages.hasOwnProperty('PROJECTION_SIMPLE');
+ const limit = test.additionalStages.hasOwnProperty('LIMIT');
+ const skip = test.additionalStages.hasOwnProperty('SKIP');
+ if (count || group) {
+ // If we have GROUP stage we are in an aggregation pipeline.
+ let results = group ? test.actualQuery.toArray()[0]["count"] : test.actualQuery;
+ assert.eq(expectedDocIds.length,
+ results,
+ "Expected " + expectedDocIds.length.toString() + " number of docs, but got " +
+ tojson(test.actualQuery));
+ } else {
+ assertCorrectResults({
+ query: test.actualQuery,
+ expectedDocIds: expectedDocIds,
+ projection: projection,
+ limit: limit,
+ skip: skip,
+ });
+ }
+ });
+}
+
+// Validates that we use an OR stage with the correct plans for each child branch.
+function validateQueryOR({query, expectedStageCount, expectedDocIds, noFetchWithCount}) {
+ expectedStageCount["OR"] = 1;
+ validateQueryPlan({
+ query: query,
+ expectedStageCount: expectedStageCount,
+ expectedDocIds: expectedDocIds,
+ noFetchWithCount: noFetchWithCount
+ });
+}
+
+// $or with a CLUSTERED_IXSCAN stage and a IXSCAN stage.
+validateQueryOR({
+ query: {$or: [{_id: {$lt: 2}}, {a: 5}]},
+ expectedStageCount: {"CLUSTERED_IXSCAN": 1, "IXSCAN": 1, "FETCH": 1},
+ expectedDocIds: [0, 1, 5],
+});
+
+validateQueryOR({
+ query: {$or: [{_id: 5}, {a: 6}]},
+ expectedStageCount: {"CLUSTERED_IXSCAN": 1, "IXSCAN": 1, "FETCH": 1},
+ expectedDocIds: [5, 6],
+});
+
+// $or with two IXSCAN stages.
+validateQueryOR({
+ query: {$or: [{c: {$gte: 10}}, {a: 0}]},
+ expectedStageCount: {"IXSCAN": 2, "FETCH": 1},
+ expectedDocIds: [0, 5, 6, 7, 8, 9],
+ // This is an optimization for IXSCAN for count queries that does not exist for plans with
+ // clustered indexes.
+ noFetchWithCount: true
+});
+
+// $or with 2 CLUSTERED_IXSCAN stages.
+validateQueryOR({
+ query: {$or: [{_id: {$lt: 1}}, {_id: {$gt: 8}}]},
+ expectedStageCount: {"CLUSTERED_IXSCAN": 2},
+ expectedDocIds: [0, 9]
+});
+
+validateQueryOR({
+ query: {$or: [{_id: {$gt: 5}}, {_id: 8}]},
+ expectedStageCount: {"CLUSTERED_IXSCAN": 2},
+ expectedDocIds: [6, 7, 8, 9]
+});
+
+// $or with many children branches that are either IXSCAN or CLUSTERED_IXSCAN stages. Note that we
+// expect our IXSCAN nodes to be optimized down to one stage.
+validateQueryOR({
+ query: {$or: [{_id: {$gt: 5}}, {_id: 8}, {a: 1}, {a: 1}, {a: {$gte: 8}}]},
+ expectedStageCount: {"CLUSTERED_IXSCAN": 2, "IXSCAN": 1},
+ expectedDocIds: [1, 6, 7, 8, 9]
+});
+
+// $or with many children branches that are either IXSCAN or CLUSTERED_IXSCAN stages.
+validateQueryOR({
+ query: {$or: [{_id: {$gt: 7}}, {_id: 8}, {a: 1}, {a: {$gte: 8}}, {c: {$lt: 10}}]},
+ expectedStageCount: {"CLUSTERED_IXSCAN": 2, "IXSCAN": 2},
+ expectedDocIds: [0, 1, 2, 3, 4, 8, 9]
+});
+
+// $or query where the branch of the clustered collection scan is not a leaf node.
+validateQueryOR({
+ query: {$or: [{a: 1}, {$and: [{_id: {$gt: 7}}, {_id: {$lt: 10}}]}]},
+ expectedStageCount: {"CLUSTERED_IXSCAN": 1, "IXSCAN": 1, "FETCH": 1},
+ expectedDocIds: [1, 8, 9]
+});
+
+// $or inside an $and should not change, and still use a FETCH with an IXSCAN.
+validateQueryPlan({
+ query: {$and: [{a: {$gte: 8}}, {$or: [{_id: 2}, {c: {$gt: 10}}]}]},
+ expectedStageCount: {"FETCH": 1, "IXSCAN": 1, "OR": 0},
+ expectedDocIds: [8, 9],
+});
+
+// $or that can't use the clustered collection nor another index should still fallback to COLLSCAN.
+validateQueryPlan({
+ query: {$or: [{noIndex: 3}, {_id: 1}]},
+ expectedStageCount: {"COLLSCAN": 1, "OR": 0},
+ expectedDocIds: [1, 3],
+});
+
+validateQueryPlan({
+ query: {$or: [{noIndex: 3}, {a: 1}]},
+ expectedStageCount: {"COLLSCAN": 1, "OR": 0},
+ expectedDocIds: [1, 3],
+});
+
+//$or inside an $elemMatch that is not indexed should not change, and still use a COLLSCAN.
+validateQueryPlan({
+ query: {d: {$elemMatch: {$or: [{e: 6}, {g: 2}]}}},
+ expectedStageCount: {"COLLSCAN": 1, "OR": 0},
+ expectedDocIds: [3, 4]
+});
+
+// $or inside an $elemMatch that is indexed should use only IXSCAN.
+assert.commandWorked(coll.createIndex({"d.e": 1}));
+assert.commandWorked(coll.createIndex({"d.g": 1}));
+validateQueryOR({
+ query: {d: {$elemMatch: {$or: [{e: 10}, {g: 4}]}}},
+ expectedStageCount: {"IXSCAN": 2, "COLLSCAN": 0},
+ expectedDocIds: [5, 8],
+});
+
+// TODO SERVER-77601 remove this function, once supported in SBE.
+// We prevented allowing MERGE_SORT plans with clustered collection scans, so the plan should
+// fallback to using a collection scan.
+function validateQuerySort() {
+ let explain =
+ coll.explain().find({$or: [{_id: {$lt: 1}}, {_id: {$gt: 8}}]}).sort({_id: 1}).finish();
+ const winningPlan = getWinningPlan(explain.queryPlanner);
+ let expectedStageCount = {"MERGE_SORT": 0, "COLLSCAN": 1, "CLUSTERED_IXSCAN": 0, "OR": 0};
+ const shardMergeStage = haveShardMergeStage(winningPlan, "SHARD_MERGE_SORT");
+ const shards = "shards" in winningPlan;
+ for (var stage in expectedStageCount) {
+ let planStages = getPlanStages(winningPlan, stage);
+ assert(planStages, tojson(winningPlan));
+ if (shardMergeStage || shards) {
+ assert.gte(planStages.length,
+ expectedStageCount[stage],
+ "Expected " + stage + " to appear, but got plan: " + tojson(winningPlan));
+ } else {
+ assert.eq(planStages.length,
+ expectedStageCount[stage],
+ "Expected " + stage + " to appear, but got plan: " + tojson(winningPlan));
+ }
+ }
+ assertCorrectResults({
+ query: coll.find({$or: [{_id: {$lt: 1}}, {_id: {$gt: 8}}]}).sort({_id: 1}),
+ expectedDocIds: [0, 9]
+ });
+}
+validateQuerySort();
+
+//
+// These tests validate that $or queries with a text index work.
+//
+
+// Basic case $or with text and a clustered collection scan.
+validateQueryOR({
+ query: {$or: [{$text: {$search: "foo"}}, {_id: 1}]},
+ expectedStageCount: {"CLUSTERED_IXSCAN": 1, "TEXT_MATCH": 1, "IXSCAN": 1},
+ expectedDocIds: [0, 1, 5, 6]
+});
+
+// $or with a text index work with a clustered collection scan plan and a secondary index scan plan.
+// We expected 2 IXSCAN nodes because the TEXT_MATCH stage has a IXSCAN node child, and there is an
+// index scan plan for the {a: 9} predicate.
+validateQueryOR({
+ query: {$or: [{$text: {$search: "foo"}}, {_id: {$lt: 2}}, {a: 9}]},
+ expectedStageCount: {"CLUSTERED_IXSCAN": 1, "TEXT_MATCH": 1, "IXSCAN": 2},
+ expectedDocIds: [0, 1, 5, 6, 9]
+});
+
+// $or inside an and with a text index works.
+validateQueryPlan({
+ query: {$and: [{a: {$gte: 8}}, {$or: [{$text: {$search: "foo"}}, {c: {$gt: 10}}]}]},
+ expectedStageCount: {"FETCH": 2, "IXSCAN": 2, "TEXT_MATCH": 1},
+ expectedDocIds: [8, 9],
+});
+
+// $or inside an or with a text index works.
+validateQueryOR({
+ query: {$or: [{_id: {$gte: 8}}, {$or: [{$text: {$search: "foo"}}, {c: {$gt: 10}}]}]},
+ expectedStageCount: {"FETCH": 2, "IXSCAN": 2, "TEXT_MATCH": 1, "CLUSTERED_IXSCAN": 1},
+ expectedDocIds: [0, 5, 6, 7, 8, 9],
+});
+
+// $or with a text index and an unindexed field should still fail.
+const err =
+ assert.throws(() => coll.find({$or: [{$text: {$search: "foo"}}, {noIndex: 1}]}).toArray());
+assert.commandFailedWithCode(err, ErrorCodes.NoQueryExecutionPlans);
diff --git a/jstests/core/query/partial_index_logical.js b/jstests/core/query/partial_index_logical.js
index fad4d87136676..b9f3f210b8b29 100644
--- a/jstests/core/query/partial_index_logical.js
+++ b/jstests/core/query/partial_index_logical.js
@@ -3,8 +3,6 @@
* containing logical expressions ($and, $or).
*
* @tags: [
- * # TODO SERVER-67607: Test plan cache with CQF enabled.
- * cqf_incompatible,
* # Since the plan cache is per-node state, this test assumes that all operations are happening
* # against the same mongod.
* assumes_read_preference_unchanged,
@@ -18,10 +16,7 @@
* tenant_migration_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
+import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js";
const coll = db[jsTestName()];
coll.drop();
@@ -174,5 +169,4 @@ coll.drop();
// a very similar shape, however the predicate parameters are not satisfied by the partial
// filter expression.
assert.eq(2, coll.find({num: {$gt: 0, $lt: 12}}).itcount());
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js b/jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js
index 941ec0106b597..42d63a43d894d 100644
--- a/jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js
+++ b/jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js
@@ -18,20 +18,17 @@
// # Plan cache state is node-local and will not get migrated alongside tenant data.
// tenant_migration_incompatible,
// # TODO SERVER-67607: Test plan cache with CQF enabled.
-// cqf_incompatible,
+// cqf_experimental_incompatible,
// ]
-(function() {
-'use strict';
-
load("jstests/libs/profiler.js"); // getLatestProfileEntry.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is disabled");
- return;
+ quit();
}
-const testDb = db.getSiblingDB('cached_plan_trial_does_not_discard_work');
+const testDb = db.getSiblingDB('trial_does_not_discard_work');
assert.commandWorked(testDb.dropDatabase());
const coll = testDb.getCollection('test');
@@ -129,5 +126,4 @@ assert.eq(numResults, 0);
const replanProfileEntry = getLatestProfilerEntry(
testDb, {'command.find': coll.getName(), 'command.comment': lastComment});
-assert(replanProfileEntry.replanned, replanProfileEntry);
-}());
+assert(replanProfileEntry.replanned, replanProfileEntry);
\ No newline at end of file
diff --git a/jstests/core/query/plan_cache/collation_plan_cache.js b/jstests/core/query/plan_cache/collation_plan_cache.js
index 99e983dd2fa86..9a96ae27a6634 100644
--- a/jstests/core/query/plan_cache/collation_plan_cache.js
+++ b/jstests/core/query/plan_cache/collation_plan_cache.js
@@ -9,16 +9,16 @@
// # former operation may be routed to a secondary in the replica set, whereas the latter must be
// # routed to the primary.
// assumes_read_preference_unchanged,
+// # Make sure to obtain stable counts. Background tasks may create plan cache entries.
+// assumes_standalone_mongod,
// assumes_unsharded_collection,
// does_not_support_stepdowns,
// requires_fcv_61,
// # Plan cache state is node-local and will not get migrated alongside tenant data.
// tenant_migration_incompatible,
// ]
-(function() {
-'use strict';
-load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromExplain.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getPlanCacheKeyFromExplain} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
var coll = db.collation_plan_cache;
coll.drop();
@@ -227,4 +227,3 @@ assert.commandWorked(coll.runCommand('planCacheClearFilters',
assert.eq(0,
coll.runCommand('planCacheListFilters').filters.length,
'unexpected number of plan cache filters');
-})();
diff --git a/jstests/core/query/plan_cache/plan_cache_clear.js b/jstests/core/query/plan_cache/plan_cache_clear.js
index d03330ab08ec7..12d8c48137207 100644
--- a/jstests/core/query/plan_cache/plan_cache_clear.js
+++ b/jstests/core/query/plan_cache/plan_cache_clear.js
@@ -18,23 +18,71 @@
// # The SBE plan cache was first enabled in 6.3.
// requires_fcv_63,
// # TODO SERVER-67607: Test plan cache with CQF enabled.
-// cqf_incompatible,
+// cqf_experimental_incompatible,
+// references_foreign_collection,
// ]
-(function() {
-'use strict';
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getPlanCacheKeyFromPipeline, getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+load("jstests/libs/fixture_helpers.js");
const coll = db.jstests_plan_cache_clear;
coll.drop();
-function numPlanCacheEntries(coll) {
- return coll.aggregate([{$planCacheStats: {}}]).itcount();
+function planCacheContainsQuerySet(curCache, collArg, expectedQuerySetSize) {
+ const keyHashes = Array.from(curCache);
+ const res =
+ collArg.aggregate([{$planCacheStats: {}}, {$match: {planCacheKey: {$in: keyHashes}}}])
+ .toArray();
+ return (res.length == curCache.size && curCache.size == expectedQuerySetSize);
+}
+
+// Run query 'queryArg' against collection 'collArg' and add it to the map 'curCache'.
+// Check that the query produced 'resCount' results, and that 'curCache' contains
+// 'numCachedQueries' entries.
+// Essentially, this functions runs a query, and add its both to the query cache and to
+// the map 'curCache' which should mirror the queries in the query cache.
+// This allows the test to keep curCache in sync with the query cache.
+function addToQueryCache(
+ {queryArg = {}, projectArg = {}, collArg, resCount, curCache, numCachedQueries}) {
+ let keyHash = '';
+ if (queryArg instanceof Array) {
+ assert.eq(resCount, collArg.aggregate(queryArg).toArray().length);
+ keyHash = getPlanCacheKeyFromPipeline(queryArg, collArg, db);
+ } else {
+ assert.eq(resCount, collArg.find(queryArg, projectArg).itcount());
+ keyHash = getPlanCacheKeyFromShape(
+ {query: queryArg, projection: projectArg, collection: collArg, db: db});
+ }
+ curCache.add(keyHash);
+ assert.eq(curCache.size, numCachedQueries);
+}
+
+// Remove a query both from the query cache, and curCache.
+// In this way both are kept in sync.
+function deleteFromQueryCache(queryArg, collArg, curCache) {
+ const beforeClearKeys =
+ collArg.aggregate([{$planCacheStats: {}}, {$project: {planCacheKey: 1}}])
+ .toArray()
+ .map(k => k.planCacheKey);
+ assert.commandWorked(collArg.runCommand('planCacheClear', {query: queryArg}));
+ const afterClearKeys = collArg.aggregate([{$planCacheStats: {}}, {$project: {planCacheKey: 1}}])
+ .toArray()
+ .map(k => k.planCacheKey);
+ for (let key of beforeClearKeys) {
+ if (!afterClearKeys.includes(key)) {
+ curCache.delete(key);
+ }
+ }
+}
+
+function clearQueryCaches(collArg, curCache) {
+ assert.commandWorked(collArg.runCommand('planCacheClear', {}));
+ curCache.clear();
}
-function dumpPlanCacheState(coll) {
- return coll.aggregate([{$planCacheStats: {}}]).toArray();
+function dumpPlanCacheState(collArg) {
+ return collArg.aggregate([{$planCacheStats: {}}]).toArray();
}
assert.commandWorked(coll.insert({a: 1, b: 1}));
@@ -46,42 +94,72 @@ assert.commandWorked(coll.insert({a: 2, b: 2}));
assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({a: 1, b: 1}));
+// The queries in this set are expected to be in the query cache at any time.
+const cachedQueries = new Set();
+
// Run a query so that an entry is inserted into the cache.
-assert.eq(1, coll.find({a: 1, b: 1}).itcount());
+addToQueryCache({
+ queryArg: {a: 1, b: 1},
+ collArg: coll,
+ resCount: 1,
+ curCache: cachedQueries,
+ numCachedQueries: 1
+});
// Invalid key should be a no-op.
-assert.commandWorked(coll.runCommand('planCacheClear', {query: {unknownfield: 1}}));
-assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+deleteFromQueryCache({unknownfield: 1}, coll, cachedQueries);
+assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll));
// Introduce a second plan cache entry.
-assert.eq(0, coll.find({a: 1, b: 1, c: 1}).itcount());
-assert.eq(2, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+addToQueryCache({
+ queryArg: {a: 1, b: 1, c: 1},
+ collArg: coll,
+ resCount: 0,
+ curCache: cachedQueries,
+ numCachedQueries: 2
+});
+assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 2), dumpPlanCacheState(coll));
// Drop one of the two shapes from the cache.
-assert.commandWorked(coll.runCommand('planCacheClear', {query: {a: 1, b: 1}}),
- dumpPlanCacheState(coll));
-assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+deleteFromQueryCache({a: 1, b: 1}, coll, cachedQueries);
+assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll));
// Drop the second shape from the cache.
-assert.commandWorked(coll.runCommand('planCacheClear', {query: {a: 1, b: 1, c: 1}}),
- dumpPlanCacheState(coll));
-assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+deleteFromQueryCache({a: 1, b: 1, c: 1}, coll, cachedQueries);
+assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 0), dumpPlanCacheState(coll));
// planCacheClear can clear $expr queries.
-assert.eq(1, coll.find({a: 1, b: 1, $expr: {$eq: ['$a', 1]}}).itcount());
-assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
-assert.commandWorked(
- coll.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', 1]}}}));
-assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+addToQueryCache({
+ queryArg: {a: 1, b: 1, $expr: {$eq: ['$a', 1]}},
+ collArg: coll,
+ resCount: 1,
+ curCache: cachedQueries,
+ numCachedQueries: 1
+});
+deleteFromQueryCache({a: 1, b: 1, $expr: {$eq: ['$a', 1]}}, coll, cachedQueries);
+assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 0), dumpPlanCacheState(coll));
// planCacheClear fails with an $expr query with an unbound variable.
assert.commandFailed(
coll.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', '$$unbound']}}}));
// Insert two more shapes into the cache.
-assert.eq(1, coll.find({a: 1, b: 1}).itcount());
-assert.eq(1, coll.find({a: 1, b: 1}, {_id: 0, a: 1}).itcount());
-assert.eq(2, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+addToQueryCache({
+ queryArg: {a: 1, b: 1},
+ collArg: coll,
+ resCount: 1,
+ curCache: cachedQueries,
+ numCachedQueries: 1
+});
+addToQueryCache({
+ queryArg: {a: 1, b: 1},
+ projectArg: {_id: 0, a: 1},
+ collArg: coll,
+ resCount: 1,
+ curCache: cachedQueries,
+ numCachedQueries: 2
+});
+assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 2), dumpPlanCacheState(coll));
// Error cases.
assert.commandFailedWithCode(coll.runCommand('planCacheClear', {query: 12345}),
@@ -98,8 +176,7 @@ assert.commandFailedWithCode(coll.runCommand('planCacheClear', {projection: {_id
ErrorCodes.BadValue);
// Drop query cache. This clears all cached queries in the collection.
-assert.commandWorked(coll.runCommand('planCacheClear'));
-assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+clearQueryCaches(coll, cachedQueries);
// Clearing the plan cache for a non-existent collection should succeed.
const nonExistentColl = db.plan_cache_clear_nonexistent;
@@ -111,6 +188,7 @@ if (checkSBEEnabled(db)) {
// collections, when $lookup is pushed down into SBE.
const foreignColl = db.plan_cache_clear_foreign;
foreignColl.drop();
+ const foreignCachedQueries = new Set();
// We need two indices so that the multi-planner is executed.
assert.commandWorked(foreignColl.createIndex({b: 1}));
@@ -126,86 +204,151 @@ if (checkSBEEnabled(db)) {
// Test case 1: clear plan cache on the main collection.
//
// Run a query against the 'foreignColl' and make sure it's cached.
- assert.eq(0, foreignColl.find({b: 1, c: 1}).itcount());
- assert.eq(1, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl));
+ addToQueryCache({
+ queryArg: {a: 1, b: 1},
+ collArg: foreignColl,
+ resCount: 0,
+ curCache: foreignCachedQueries,
+ numCachedQueries: 1
+ });
+ assert.eq(true,
+ planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 1),
+ dumpPlanCacheState(foreignColl));
// Run the '$lookup' query and make sure it's cached.
- let results = coll.aggregate(pipeline).toArray();
- assert.eq(3, results.length, results);
- assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+ addToQueryCache({
+ queryArg: pipeline,
+ collArg: coll,
+ resCount: 3,
+ curCache: cachedQueries,
+ numCachedQueries: 1
+ });
+ assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll));
// Drop query cache on the main collection. This clears all cached queries in the main
// collection only.
- assert.commandWorked(coll.runCommand("planCacheClear"));
- assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
- assert.eq(1, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl));
+ clearQueryCaches(coll, cachedQueries);
+ assert.eq(true,
+ planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 1),
+ dumpPlanCacheState(foreignColl));
// Test case 2: clear plan cache on the foreign collection.
//
// Run the '$lookup' query again and make sure it's cached.
- results = coll.aggregate(pipeline).toArray();
- assert.eq(3, results.length, results);
- assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+ addToQueryCache({
+ queryArg: pipeline,
+ collArg: coll,
+ resCount: 3,
+ curCache: cachedQueries,
+ numCachedQueries: 1
+ });
+ assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll));
// Drop query cache on the foreign collection. Make sure that the plan cache on the main
// collection is not affected.
- assert.commandWorked(foreignColl.runCommand("planCacheClear"));
- assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
- assert.eq(0, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl));
+ clearQueryCaches(foreignColl, foreignCachedQueries);
+ assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll));
// Test case 3: clear plan cache on the main collection by query shape.
//
// Run a query against the 'foreignColl' and make sure it's cached.
- assert.eq(0, foreignColl.find({b: 1, c: 1}).itcount());
- assert.eq(1, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl));
+ addToQueryCache({
+ queryArg: {b: 1, c: 1},
+ collArg: foreignColl,
+ resCount: 0,
+ curCache: foreignCachedQueries,
+ numCachedQueries: 1
+ });
+ assert.eq(true,
+ planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 1),
+ dumpPlanCacheState(foreignColl));
// Run the '$lookup' query and make sure it's cached.
- results = coll.aggregate(pipeline).toArray();
- assert.eq(3, results.length, results);
- assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+ addToQueryCache({
+ queryArg: pipeline,
+ collArg: coll,
+ resCount: 3,
+ curCache: cachedQueries,
+ numCachedQueries: 1
+ });
+ assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll));
// Drop query cache by the query shape. This clears all cached queries in the main
// collection only.
- assert.commandWorked(coll.runCommand("planCacheClear", {query: {a: 1}}));
- assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
- assert.eq(1, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl));
+ deleteFromQueryCache(pipeline[0].$match, coll, cachedQueries);
+ assert.eq(true,
+ planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 1),
+ dumpPlanCacheState(foreignColl));
// Test case 4: clear plan cache on the foreign collection by (empty) query shape.
//
// Run two queries against the 'foreignColl' and make sure they're cached.
- assert.eq(2, foreignColl.find({}).itcount());
- assert.eq(0, foreignColl.find({b: 1, c: 1}).itcount());
- assert.eq(2, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl));
+ addToQueryCache({
+ queryArg: {},
+ collArg: foreignColl,
+ resCount: 2,
+ curCache: foreignCachedQueries,
+ numCachedQueries: 2
+ });
+ addToQueryCache({
+ queryArg: {b: 1, c: 1},
+ collArg: foreignColl,
+ resCount: 0,
+ curCache: foreignCachedQueries,
+ numCachedQueries: 2
+ });
+ assert.eq(true,
+ planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 2),
+ dumpPlanCacheState(foreignColl));
// Run the '$lookup' query and make sure it's cached.
- results = coll.aggregate(pipeline).toArray();
- assert.eq(3, results.length, results);
- assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+ addToQueryCache({
+ queryArg: pipeline,
+ collArg: coll,
+ resCount: 3,
+ curCache: cachedQueries,
+ numCachedQueries: 1
+ });
+ assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll));
// Drop query cache on the foreign collection by the query shape. This clears one cached
// query in the foreign collection only.
- assert.commandWorked(foreignColl.runCommand("planCacheClear", {query: {}}));
- assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
- assert.eq(1, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl));
+ deleteFromQueryCache({}, foreignColl, foreignCachedQueries);
+ assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll));
+ assert.eq(true,
+ planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 1),
+ dumpPlanCacheState(foreignColl));
// Test case 5: clear by query shape which matches $lookup and non-$lookup queries.
//
// Run the query on the main collection whose plan cache key command shape matches the shape of
// the $lookup query.
- results = coll.aggregate({$match: {a: 1}}).toArray();
- assert.eq(3, results.length, results);
- assert.eq(2, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+ addToQueryCache({
+ queryArg: [{$match: {a: 1}}],
+ collArg: coll,
+ resCount: 3,
+ curCache: cachedQueries,
+ numCachedQueries: 2
+ });
+ assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 2), dumpPlanCacheState(coll));
// Run another query on the main collection with a totally different shape.
- results = coll.aggregate({$match: {a: {$in: [1, 2]}}}).toArray();
- assert.eq(4, results.length, results);
- assert.eq(3, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+ addToQueryCache({
+ queryArg: [{$match: {a: {$in: [1, 2]}}}],
+ collArg: coll,
+ resCount: 4,
+ curCache: cachedQueries,
+ numCachedQueries: 3
+ });
+ assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 3), dumpPlanCacheState(coll));
// Drop query cache on the main collection by the query shape. This clears two cached queries in
// the main collection which match the query shape.
- assert.commandWorked(coll.runCommand("planCacheClear", {query: {a: 1}}));
- assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
- assert.eq(1, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl));
+ deleteFromQueryCache({a: 1}, coll, cachedQueries);
+ assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll));
+ assert.eq(true,
+ planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 1),
+ dumpPlanCacheState(foreignColl));
}
//
@@ -219,8 +362,7 @@ if (checkSBEEnabled(db)) {
//
// Make sure the cache is emtpy.
-assert.commandWorked(coll.runCommand('planCacheClear'));
-assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+clearQueryCaches(coll, cachedQueries);
// Case 1: The reIndex rebuilds the index.
// Steps:
@@ -228,13 +370,18 @@ assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
// Run reIndex on the collection.
// Confirm that cache is empty.
// (Only standalone mode supports the reIndex command.)
-const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid;
-const isStandalone = !isMongos && !db.runCommand({hello: 1}).hasOwnProperty('setName');
+const isStandalone =
+ !FixtureHelpers.isMongos(db) && !db.runCommand({hello: 1}).hasOwnProperty('setName');
if (isStandalone) {
- assert.eq(1, coll.find({a: 1, b: 1}).itcount());
- assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+ addToQueryCache({
+ queryArg: {a: 1, b: 1},
+ collArg: coll,
+ resCount: 1,
+ curCache: cachedQueries,
+ numCachedQueries: 1
+ });
+ assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll));
assert.commandWorked(coll.reIndex());
- assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
}
// Case 2: You add or drop an index.
@@ -242,11 +389,17 @@ if (isStandalone) {
// Populate the cache with 1 entry.
// Add an index.
// Confirm that cache is empty.
-assert.eq(1, coll.find({a: 1, b: 1}).itcount());
-assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+clearQueryCaches(coll, cachedQueries);
+addToQueryCache({
+ queryArg: {a: 1, b: 1},
+ collArg: coll,
+ resCount: 1,
+ curCache: cachedQueries,
+ numCachedQueries: 1
+});
+assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll));
assert.commandWorked(coll.createIndex({b: 1}));
-assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll));
+assert.eq(false, planCacheContainsQuerySet(cachedQueries, coll, 0), dumpPlanCacheState(coll));
// Case 3: The mongod process restarts
// Not applicable.
-})();
diff --git a/jstests/core/query/plan_cache/plan_cache_list_plans.js b/jstests/core/query/plan_cache/plan_cache_list_plans.js
index b0f7a24c61530..a0b42a9b2347e 100644
--- a/jstests/core/query/plan_cache/plan_cache_list_plans.js
+++ b/jstests/core/query/plan_cache/plan_cache_list_plans.js
@@ -17,14 +17,16 @@
// # Plan cache state is node-local and will not get migrated alongside tenant data.
// tenant_migration_incompatible,
// # TODO SERVER-67607: Test plan cache with CQF enabled.
-// cqf_incompatible,
+// cqf_experimental_incompatible,
+// references_foreign_collection,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {
+ getPlanCacheKeyFromPipeline,
+ getPlanCacheKeyFromShape,
+ getPlanStage
+} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
let coll = db.jstests_plan_cache_list_plans;
coll.drop();
@@ -192,4 +194,3 @@ if (!isSbeEnabled) {
const res = foreignColl.aggregate([{$planCacheStats: {}}]).toArray();
assert.eq(0, res.length, dumpPlanCacheState());
}
-})();
diff --git a/jstests/core/query/plan_cache/plan_cache_list_shapes.js b/jstests/core/query/plan_cache/plan_cache_list_shapes.js
index 48535eae0c383..e34b4edc52b43 100644
--- a/jstests/core/query/plan_cache/plan_cache_list_shapes.js
+++ b/jstests/core/query/plan_cache/plan_cache_list_shapes.js
@@ -14,15 +14,13 @@
// # Plan cache state is node-local and will not get migrated alongside tenant data.
// tenant_migration_incompatible,
// # TODO SERVER-67607: Test plan cache with CQF enabled.
-// cqf_incompatible,
+// cqf_experimental_incompatible,
// ]
-(function() {
-'use strict';
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
if (checkSBEEnabled(db)) {
jsTest.log("Skipping test because SBE is enabled.");
- return;
+ quit();
}
const coll = db.jstests_plan_cache_list_shapes;
@@ -89,5 +87,4 @@ regexQuery.s.$options = 'mi';
// There is one more result since the query is now case sensitive.
assert.eq(6, coll.find(regexQuery).itcount());
shapes = getCachedQueryShapes();
-assert.eq(4, shapes.length, shapes);
-})();
+assert.eq(4, shapes.length, shapes);
\ No newline at end of file
diff --git a/jstests/core/query/plan_cache/plan_cache_sbe.js b/jstests/core/query/plan_cache/plan_cache_sbe.js
index 2a35b786d70c4..5adb96f51b216 100644
--- a/jstests/core/query/plan_cache/plan_cache_sbe.js
+++ b/jstests/core/query/plan_cache/plan_cache_sbe.js
@@ -23,10 +23,7 @@
* assumes_no_implicit_index_creation,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const coll = db.plan_cache_sbe;
coll.drop();
@@ -67,5 +64,4 @@ if (isSbeEnabled) {
} else {
assert(!stats.cachedPlan.hasOwnProperty("queryPlan"), stats);
assert(!stats.cachedPlan.hasOwnProperty("slotBasedPlan"), stats);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/query/plan_cache/plan_cache_sbe_hinted.js b/jstests/core/query/plan_cache/plan_cache_sbe_hinted.js
index 7db4e8ba504d0..22b3550aaeb27 100644
--- a/jstests/core/query/plan_cache/plan_cache_sbe_hinted.js
+++ b/jstests/core/query/plan_cache/plan_cache_sbe_hinted.js
@@ -12,17 +12,14 @@
* # Multiple servers can mess up the plan cache list.
* assumes_standalone_mongod,
* # TODO SERVER-67607: Test plan cache with CQF enabled.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
if (!checkSBEEnabled(db)) {
jsTest.log("Skip running the test because SBE is not enabled");
- return;
+ quit();
}
const coll = db.plan_cache_sbe;
@@ -48,5 +45,4 @@ queryAndVerify({}, 2);
// Hinted query cached is reused.
queryAndVerify({a: 1}, 2);
// Query with different hint.
-queryAndVerify({a: 1, b: 1}, 3);
-})();
+queryAndVerify({a: 1, b: 1}, 3);
\ No newline at end of file
diff --git a/jstests/core/query/plan_cache/plan_cache_shell_helpers.js b/jstests/core/query/plan_cache/plan_cache_shell_helpers.js
index a102d8a001d9d..3a1384488fe66 100644
--- a/jstests/core/query/plan_cache/plan_cache_shell_helpers.js
+++ b/jstests/core/query/plan_cache/plan_cache_shell_helpers.js
@@ -13,13 +13,11 @@
// # Plan cache state is node-local and will not get migrated alongside tenant data.
// tenant_migration_incompatible,
// # TODO SERVER-67607: Test plan cache with CQF enabled.
-// cqf_incompatible,
+// cqf_experimental_incompatible,
// ]
-(function() {
-'use strict';
load('jstests/aggregation/extras/utils.js'); // For assertArrayEq.
-load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const isSbeEnabled = checkSBEEnabled(db);
var coll = db.jstests_plan_cache_shell_helpers;
@@ -178,5 +176,4 @@ assertCacheLength(0);
// Verify that explaining a find command does not write to the plan cache.
planCache.clear();
const explain = coll.find(queryB, projectionB).sort(sortC).explain(true);
-assertCacheLength(0);
-}());
+assertCacheLength(0);
\ No newline at end of file
diff --git a/jstests/core/query/plan_cache/plan_cache_stats_shard_and_host.js b/jstests/core/query/plan_cache/plan_cache_stats_shard_and_host.js
index 69a2214e6b93a..a73722425f095 100644
--- a/jstests/core/query/plan_cache/plan_cache_stats_shard_and_host.js
+++ b/jstests/core/query/plan_cache/plan_cache_stats_shard_and_host.js
@@ -9,12 +9,11 @@
// # Plan cache state is node-local and will not get migrated alongside tenant data.
// tenant_migration_incompatible,
// # TODO SERVER-67607: Test plan cache with CQF enabled.
-// cqf_incompatible,
+// cqf_experimental_incompatible,
+// requires_fcv_71,
// ]
-(function() {
-"use strict";
-
load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'.
+import {getPlanCacheKeyFromExplain} from "jstests/libs/analyze_plan.js";
const coll = db.plan_cache_stats_shard_and_host;
coll.drop();
@@ -26,8 +25,20 @@ assert.commandWorked(coll.createIndex({b: 1}));
assert.commandWorked(coll.insert({a: 2, b: 3}));
assert.eq(1, coll.find({a: 2, b: 3}).itcount());
-// List the contents of the plan cache for the collection.
-let planCacheContents = planCache.list();
+const explain = coll.find({a: 2, b: 3}).explain();
+const planCacheKey = getPlanCacheKeyFromExplain(explain, db);
+
+function filterPlanCacheEntriesByKey(planCacheKey, planCacheContents) {
+ let filteredPlanCacheEntries = [];
+ for (const entry of planCacheContents) {
+ if (entry.planCacheKey === planCacheKey) {
+ filteredPlanCacheEntries.push(entry);
+ }
+ }
+ return filteredPlanCacheEntries;
+}
+
+let planCacheContents = filterPlanCacheEntriesByKey(planCacheKey, planCache.list());
// We expect every shard that has a chunk for the collection to have produced a plan cache entry.
assert.eq(
@@ -49,11 +60,16 @@ for (const entry of planCacheContents) {
// shard/host. As a future improvement, we should return plan cache information from every host in
// every shard. But for now, we use regular host targeting to choose a particular host in each
// shard.
-planCacheContents = planCache.list([{$group: {_id: "$shard", count: {$sum: 1}}}]);
+planCacheContents = filterPlanCacheEntriesByKey(
+ planCacheKey, planCache.list([{$group: {_id: "$shard", count: {$sum: 1}}}]));
+
for (const entry of planCacheContents) {
assert.eq(entry.count, 1, entry);
}
-planCacheContents = planCache.list([{$group: {_id: "$host", count: {$sum: 1}}}]);
+
+planCacheContents = filterPlanCacheEntriesByKey(
+ planCacheKey, planCache.list([{$group: {_id: "$host", count: {$sum: 1}}}]));
+
for (const entry of planCacheContents) {
assert.eq(entry.count, 1, entry);
}
@@ -61,5 +77,4 @@ for (const entry of planCacheContents) {
// Clear the plan cache and verify that attempting to list the plan cache now returns an empty
// array.
coll.getPlanCache().clear();
-assert.eq([], planCache.list());
-}());
+assert.eq([], filterPlanCacheEntriesByKey(planCacheKey, planCache.list()));
\ No newline at end of file
diff --git a/jstests/core/query/project/proj_key1.js b/jstests/core/query/project/proj_key1.js
index c4834ffad49f4..0d6423156e0a8 100644
--- a/jstests/core/query/project/proj_key1.js
+++ b/jstests/core/query/project/proj_key1.js
@@ -2,12 +2,12 @@
// requires_getmore,
// ]
-t = db.proj_key1;
+let t = db.proj_key1;
t.drop();
-as = [];
+let as = [];
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
as.push({a: i});
t.insert({a: i, b: i});
}
diff --git a/jstests/core/query/project/projection_dotted_paths.js b/jstests/core/query/project/projection_dotted_paths.js
index 7f6eb1bfef2d5..63170fc2bcacb 100644
--- a/jstests/core/query/project/projection_dotted_paths.js
+++ b/jstests/core/query/project/projection_dotted_paths.js
@@ -8,10 +8,7 @@
* Test projections with dotted field paths. Also test that such projections result in covered plans
* when appropriate.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan, isIdhack, isIndexOnly, isIxscan} from "jstests/libs/analyze_plan.js";
load("jstests/aggregation/extras/utils.js"); // arrayEq
let coll = db["projection_dotted_paths"];
@@ -129,4 +126,3 @@ assert.eq(resultDoc, {x: {y: {y: null, z: null}, z: null}});
assert(arrayEq(coll.find({}, {_id: 0, "a.x": "$a.x", "a.b.x": "$a.x"}).toArray(),
[{a: {x: 1, b: {x: 1}}}]));
}
-}());
diff --git a/jstests/core/query/project/projection_semantics.js b/jstests/core/query/project/projection_semantics.js
index 1f811c84b36ec..c284916f3f8e7 100644
--- a/jstests/core/query/project/projection_semantics.js
+++ b/jstests/core/query/project/projection_semantics.js
@@ -14,19 +14,16 @@
* not_allowed_with_security_token,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
if (!checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"])) {
jsTestLog("Skipping test since columnstore Indexes are not enabled");
- return;
+ quit();
}
if (!setUpServerForColumnStoreIndexTest(db)) {
- return;
+ quit();
}
const coll = db.projection_semantics;
@@ -720,5 +717,4 @@ function testInputOutput({input, projection, expectedOutput, interestingIndexes
projection: {measurements: {humidity: 0, time: 0}, _id: 0},
expectedOutput: {measurements: {temperature: 20, pressure: 0.7}},
});
-}());
-}());
+}());
\ No newline at end of file
diff --git a/jstests/core/query/pull/pull.js b/jstests/core/query/pull/pull.js
index 612c65f9e6502..c5d1519f2aa47 100644
--- a/jstests/core/query/pull/pull.js
+++ b/jstests/core/query/pull/pull.js
@@ -3,7 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.jstests_pull;
+let t = db.jstests_pull;
t.drop();
t.save({a: [1, 2, 3]});
diff --git a/jstests/core/query/pull/pull2.js b/jstests/core/query/pull/pull2.js
index a1b79955bb0e0..f3c04d517ca67 100644
--- a/jstests/core/query/pull/pull2.js
+++ b/jstests/core/query/pull/pull2.js
@@ -3,7 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection, requires_fastcount]
-t = db.pull2;
+let t = db.pull2;
t.drop();
t.save({a: [{x: 1}, {x: 1, b: 2}]});
diff --git a/jstests/core/query/pull/pull_or.js b/jstests/core/query/pull/pull_or.js
index 9ef1e091d2942..42b0d93bb1587 100644
--- a/jstests/core/query/pull/pull_or.js
+++ b/jstests/core/query/pull/pull_or.js
@@ -3,13 +3,10 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.pull_or;
+let t = db.pull_or;
t.drop();
-doc = {
- _id: 1,
- a: {b: [{x: 1}, {y: 'y'}, {x: 2}, {z: 'z'}]}
-};
+let doc = {_id: 1, a: {b: [{x: 1}, {y: 'y'}, {x: 2}, {z: 'z'}]}};
t.insert(doc);
diff --git a/jstests/core/query/pull/pull_remove1.js b/jstests/core/query/pull/pull_remove1.js
index ab9368bab3bbc..307d84f237182 100644
--- a/jstests/core/query/pull/pull_remove1.js
+++ b/jstests/core/query/pull/pull_remove1.js
@@ -3,13 +3,10 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.pull_remove1;
+let t = db.pull_remove1;
t.drop();
-o = {
- _id: 1,
- a: [1, 2, 3, 4, 5, 6, 7, 8]
-};
+let o = {_id: 1, a: [1, 2, 3, 4, 5, 6, 7, 8]};
t.insert(o);
assert.eq(o, t.findOne(), "A1");
diff --git a/jstests/core/query/pull/pullall.js b/jstests/core/query/pull/pullall.js
index 2cd0872b0799c..7679bc6db71e0 100644
--- a/jstests/core/query/pull/pullall.js
+++ b/jstests/core/query/pull/pullall.js
@@ -3,7 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.jstests_pullall;
+let t = db.jstests_pullall;
t.drop();
t.save({a: [1, 2, 3]});
diff --git a/jstests/core/query/pull/pullall2.js b/jstests/core/query/pull/pullall2.js
index 10e8c89caa16b..e1bc26cb0bd9d 100644
--- a/jstests/core/query/pull/pullall2.js
+++ b/jstests/core/query/pull/pullall2.js
@@ -3,14 +3,11 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.pullall2;
+let t = db.pullall2;
t.drop();
-o = {
- _id: 1,
- a: []
-};
-for (i = 0; i < 5; i++)
+let o = {_id: 1, a: []};
+for (let i = 0; i < 5; i++)
o.a.push({x: i, y: i});
t.insert(o);
diff --git a/jstests/core/query/push/push2.js b/jstests/core/query/push/push2.js
index 10669aa258165..0e00617b1ec6a 100644
--- a/jstests/core/query/push/push2.js
+++ b/jstests/core/query/push/push2.js
@@ -1,14 +1,14 @@
(function() {
-t = db.push2;
+let t = db.push2;
t.drop();
t.save({_id: 1, a: []});
-s = new Array(700000).toString();
+let s = new Array(700000).toString();
-gotError = null;
+let gotError = null;
-for (x = 0; x < 100; x++) {
+for (let x = 0; x < 100; x++) {
print(x + " pushes");
var res = t.update({}, {$push: {a: s}});
gotError = res.hasWriteError();
diff --git a/jstests/core/query/push/push_sort.js b/jstests/core/query/push/push_sort.js
index 2d74a3909f8f7..9ff12d5698c39 100644
--- a/jstests/core/query/push/push_sort.js
+++ b/jstests/core/query/push/push_sort.js
@@ -7,7 +7,7 @@
// test exercises such $sort clause from the shell user's perspective.
//
-t = db.push_sort;
+let t = db.push_sort;
t.drop();
//
diff --git a/jstests/core/query/query1.js b/jstests/core/query/query1.js
index 7e16a03a8fcc0..08a31d8850273 100644
--- a/jstests/core/query/query1.js
+++ b/jstests/core/query/query1.js
@@ -1,14 +1,14 @@
// @tags: [requires_fastcount]
-t = db.query1;
+let t = db.query1;
t.drop();
t.save({num: 1});
t.save({num: 3});
t.save({num: 4});
-num = 0;
-total = 0;
+let num = 0;
+let total = 0;
t.find().forEach(function(z) {
num++;
diff --git a/jstests/core/query/query_hash_stability.js b/jstests/core/query/query_hash_stability.js
index c358f1c7d3455..bd26ac525d3bd 100644
--- a/jstests/core/query/query_hash_stability.js
+++ b/jstests/core/query/query_hash_stability.js
@@ -3,6 +3,10 @@
* across catalog changes.
* @tags: [
* assumes_read_concern_local,
+ * # This test expects query shapes and plans to stay the same at the beginning and
+ * # at the end of test run. That's just wrong expectation when chunks are moving
+ * # randomly across shards.
+ * assumes_balancer_off,
* requires_fcv_51,
* # The test expects the plan cache key on a given node to remain stable. However, the plan
* # cache key is allowed to change between versions. Therefore, this test cannot run in
@@ -10,10 +14,8 @@
* cannot_run_during_upgrade_downgrade,
* ]
*/
-(function() {
-"use strict";
load('jstests/libs/fixture_helpers.js'); // For and isMongos().
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const collName = "query_hash_stability";
const coll = db[collName];
@@ -120,5 +122,4 @@ if (!checkSBEEnabled(db)) {
planCacheField: 'planCacheKey',
expectedToMatch: true
});
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/query/queryoptimizera.js b/jstests/core/query/queryoptimizera.js
index 2f3f226684731..42ca0032ea11f 100644
--- a/jstests/core/query/queryoptimizera.js
+++ b/jstests/core/query/queryoptimizera.js
@@ -9,8 +9,8 @@
// constraint is printed at appropriate times. SERVER-5353
function numWarnings() {
- logs = db.adminCommand({getLog: "global"}).log;
- ret = 0;
+ let logs = db.adminCommand({getLog: "global"}).log;
+ let ret = 0;
logs.forEach(function(x) {
if (x.match(warningMatchRegexp)) {
++ret;
@@ -19,23 +19,23 @@ function numWarnings() {
return ret;
}
-collectionNameIndex = 0;
+let collectionNameIndex = 0;
// Generate a collection name not already present in the log.
do {
- testCollectionName = 'jstests_queryoptimizera__' + collectionNameIndex++;
- warningMatchString =
+ var testCollectionName = 'jstests_queryoptimizera__' + collectionNameIndex++;
+ var warningMatchString =
'unindexed _id query on capped collection.*collection: test.' + testCollectionName;
- warningMatchRegexp = new RegExp(warningMatchString);
+ var warningMatchRegexp = new RegExp(warningMatchString);
} while (numWarnings() > 0);
-t = db[testCollectionName];
+let t = db[testCollectionName];
t.drop();
-notCappedCollectionName = testCollectionName + '_notCapped';
+let notCappedCollectionName = testCollectionName + '_notCapped';
-notCapped = db.getSiblingDB("local").getCollection(notCappedCollectionName);
+let notCapped = db.getSiblingDB("local").getCollection(notCappedCollectionName);
notCapped.drop();
assert.commandWorked(db.createCollection(testCollectionName, {capped: true, size: 1000}));
@@ -45,14 +45,14 @@ assert.commandWorked(
t.insert({});
notCapped.insert({});
-oldNumWarnings = 0;
+let oldNumWarnings = 0;
function assertNoNewWarnings() {
assert.eq(oldNumWarnings, numWarnings());
}
function assertNewWarning() {
- newNumWarnings = numWarnings();
+ let newNumWarnings = numWarnings();
// Ensure that newNumWarnings > oldNumWarnings. It's not safe to test that oldNumWarnings + 1
// == newNumWarnings, because a (simulated) page fault exception may cause multiple messages to
// be logged instead of only one.
diff --git a/jstests/core/query/regex/regex.js b/jstests/core/query/regex/regex.js
index 363a03db20cff..409c69b0b59f7 100644
--- a/jstests/core/query/regex/regex.js
+++ b/jstests/core/query/regex/regex.js
@@ -1,16 +1,13 @@
// @tags: [
// assumes_read_concern_local,
// ]
+load("jstests/libs/fixture_helpers.js");
(function() {
'use strict';
const t = db.jstests_regex;
-const hello = db.runCommand("hello");
-assert.commandWorked(hello);
-const isMongos = (hello.msg === "isdbgrid");
-
t.drop();
assert.commandWorked(t.save({a: "bcd"}));
assert.eq(1, t.count({a: /b/}), "A");
@@ -48,7 +45,7 @@ assert.eq(1, t.count(query));
const result = t.find(query).explain();
assert.commandWorked(result);
-if (!isMongos) {
+if (!FixtureHelpers.isMongos(db)) {
assert(result.hasOwnProperty("queryPlanner"));
assert(result.queryPlanner.hasOwnProperty("parsedQuery"), tojson(result));
assert.eq(result.queryPlanner.parsedQuery, query);
diff --git a/jstests/core/query/regex/regex2.js b/jstests/core/query/regex/regex2.js
index 6ed1f2d290c3b..4801c7b807058 100644
--- a/jstests/core/query/regex/regex2.js
+++ b/jstests/core/query/regex/regex2.js
@@ -1,6 +1,6 @@
// @tags: [requires_fastcount]
-t = db.regex2;
+let t = db.regex2;
t.drop();
assert.commandWorked(t.save({a: "test"}));
@@ -15,8 +15,8 @@ assert.eq(2, t.find({a: /test/i}).count(), "F");
t.drop();
-a = "\u0442\u0435\u0441\u0442";
-b = "\u0422\u0435\u0441\u0442";
+let a = "\u0442\u0435\u0441\u0442";
+let b = "\u0422\u0435\u0441\u0442";
assert((new RegExp(a)).test(a), "B 1");
assert(!(new RegExp(a)).test(b), "B 2");
diff --git a/jstests/core/query/regex/regex3.js b/jstests/core/query/regex/regex3.js
index b21a7a81435ae..c84f64020e4ab 100644
--- a/jstests/core/query/regex/regex3.js
+++ b/jstests/core/query/regex/regex3.js
@@ -3,7 +3,7 @@
// assumes_read_concern_local,
// ]
-t = db.regex3;
+let t = db.regex3;
t.drop();
assert.commandWorked(t.save({name: "eliot"}));
diff --git a/jstests/core/query/regex/regex4.js b/jstests/core/query/regex/regex4.js
index 0f7963fdd1e9d..d07614b199466 100644
--- a/jstests/core/query/regex/regex4.js
+++ b/jstests/core/query/regex/regex4.js
@@ -3,7 +3,7 @@
// assumes_read_concern_local,
// ]
-t = db.regex4;
+let t = db.regex4;
t.drop();
assert.commandWorked(t.save({name: "eliot"}));
diff --git a/jstests/core/query/regex/regex5.js b/jstests/core/query/regex/regex5.js
index d0836a6268666..ea32b4864df96 100644
--- a/jstests/core/query/regex/regex5.js
+++ b/jstests/core/query/regex/regex5.js
@@ -1,5 +1,5 @@
-t = db.regex5;
+let t = db.regex5;
t.drop();
// Add filler data to make sure that indexed solutions are
@@ -11,10 +11,10 @@ for (var i = 0; i < 10; i++) {
t.save({x: ["abc", "xyz1"]});
t.save({x: ["ac", "xyz2"]});
-a = /.*b.*c/;
-x = /.*y.*/;
+let a = /.*b.*c/;
+let x = /.*y.*/;
-doit = function() {
+let doit = function() {
assert.eq(1, t.find({x: a}).count(), "A");
assert.eq(2, t.find({x: x}).count(), "B");
assert.eq(2, t.find({x: {$in: [x]}}).count(), "C"); // SERVER-322
diff --git a/jstests/core/query/regex/regex6.js b/jstests/core/query/regex/regex6.js
index cc7b507f610cb..168826bbae1af 100644
--- a/jstests/core/query/regex/regex6.js
+++ b/jstests/core/query/regex/regex6.js
@@ -4,7 +4,7 @@
// @tags: [
// assumes_unsharded_collection,
// ]
-t = db.regex6;
+let t = db.regex6;
t.drop();
t.save({name: "eliot"});
diff --git a/jstests/core/query/regex/regex7.js b/jstests/core/query/regex/regex7.js
index c9c5454dbc547..9a7e6153f1a06 100644
--- a/jstests/core/query/regex/regex7.js
+++ b/jstests/core/query/regex/regex7.js
@@ -1,4 +1,4 @@
-t = db.regex_matches_self;
+let t = db.regex_matches_self;
t.drop();
t.insert({r: /^a/});
@@ -23,4 +23,4 @@ assert.eq(/^b/, t.findOne({r: /^b/}).r, '3 1 b');
assert.eq(1, t.count({r: /^b/}), '3 2 b');
t.insert({r: "a"});
-assert.eq(2, t.count({r: /^a/}), 'c');
\ No newline at end of file
+assert.eq(2, t.count({r: /^a/}), 'c');
diff --git a/jstests/core/query/regex/regex8.js b/jstests/core/query/regex/regex8.js
index 20164acf464f1..e57d01db067d7 100644
--- a/jstests/core/query/regex/regex8.js
+++ b/jstests/core/query/regex/regex8.js
@@ -1,4 +1,4 @@
-t = db.regex8;
+let t = db.regex8;
t.drop();
t.insert({_id: 1, a: "abc"});
diff --git a/jstests/core/query/regex/regex9.js b/jstests/core/query/regex/regex9.js
index 96188d689dc71..497b1cbdabc93 100644
--- a/jstests/core/query/regex/regex9.js
+++ b/jstests/core/query/regex/regex9.js
@@ -1,5 +1,5 @@
-t = db.regex9;
+let t = db.regex9;
t.drop();
t.insert({_id: 1, a: ["a", "b", "c"]});
diff --git a/jstests/core/query/regex/regex_distinct.js b/jstests/core/query/regex/regex_distinct.js
index 7852950853c72..7611628aa6fc8 100644
--- a/jstests/core/query/regex/regex_distinct.js
+++ b/jstests/core/query/regex/regex_distinct.js
@@ -7,9 +7,7 @@
* ]
*/
-(function() {
-"use strict";
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+import {getPlanStages} from "jstests/libs/analyze_plan.js";
load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection.
const coll = db.regex_distinct;
@@ -24,4 +22,3 @@ const distinctScanStages =
getPlanStages(coll.explain().distinct("a", {a: {"$regex": "^ab.*"}}), "DISTINCT_SCAN");
assert.eq(distinctScanStages.length, FixtureHelpers.numberOfShardsForCollection(coll));
-})();
diff --git a/jstests/core/query/regex/regex_embed1.js b/jstests/core/query/regex/regex_embed1.js
index 11d92a6f3dc64..97a0cae65525f 100644
--- a/jstests/core/query/regex/regex_embed1.js
+++ b/jstests/core/query/regex/regex_embed1.js
@@ -1,4 +1,4 @@
-t = db.regex_embed1;
+let t = db.regex_embed1;
t.drop();
diff --git a/jstests/core/query/regex/regexa.js b/jstests/core/query/regex/regexa.js
index 694436e87a888..24ea8f906865d 100644
--- a/jstests/core/query/regex/regexa.js
+++ b/jstests/core/query/regex/regexa.js
@@ -1,6 +1,6 @@
// Test simple regex optimization with a regex | (bar) present - SERVER-3298
-t = db.jstests_regexa;
+let t = db.jstests_regexa;
t.drop();
function check() {
diff --git a/jstests/core/query/regex/regexb.js b/jstests/core/query/regex/regexb.js
index 09e3518728b6f..398b9c5b83659 100644
--- a/jstests/core/query/regex/regexb.js
+++ b/jstests/core/query/regex/regexb.js
@@ -1,6 +1,6 @@
// Test more than four regex expressions in a query -- SERVER-969
-t = db.jstests_regexb;
+let t = db.jstests_regexb;
t.drop();
t.save({a: 'a', b: 'b', c: 'c', d: 'd', e: 'e'});
diff --git a/jstests/core/query/rename/rename_operator.js b/jstests/core/query/rename/rename_operator.js
index e86cc077e65df..71965f269bd3c 100644
--- a/jstests/core/query/rename/rename_operator.js
+++ b/jstests/core/query/rename/rename_operator.js
@@ -11,7 +11,7 @@
* ]
*/
-t = db.jstests_rename_operator;
+let t = db.jstests_rename_operator;
t.drop();
function bad(f) {
diff --git a/jstests/core/query/rename/rename_operator_missing_source.js b/jstests/core/query/rename/rename_operator_missing_source.js
index ff219cda56378..d9c087af9aae3 100644
--- a/jstests/core/query/rename/rename_operator_missing_source.js
+++ b/jstests/core/query/rename/rename_operator_missing_source.js
@@ -8,7 +8,7 @@
* ]
*/
-t = db.jstests_rename5;
+let t = db.jstests_rename5;
t.drop();
t.createIndex({a: 1});
diff --git a/jstests/core/query/sbe_plan_cache_autoparameterize_ixscan.js b/jstests/core/query/sbe_plan_cache_autoparameterize_ixscan.js
index 4280d54a700d4..0591eb29408d5 100644
--- a/jstests/core/query/sbe_plan_cache_autoparameterize_ixscan.js
+++ b/jstests/core/query/sbe_plan_cache_autoparameterize_ixscan.js
@@ -12,20 +12,16 @@
* requires_fcv_63,
* # Plan cache state is node-local and will not get migrated alongside tenant data.
* tenant_migration_incompatible,
- * cqf_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'.
-load("jstests/libs/analyze_plan.js"); // For 'getQueryHashFromExplain'.
+import {getPlanCacheKeyFromExplain, getQueryHashFromExplain} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
// This test is specifically verifying the behavior of the SBE plan cache, which is only enabled
// when SBE is enabled.
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is not enabled");
- return;
+ quit();
}
const coll = db[jsTestName()];
@@ -87,5 +83,4 @@ assert.eq(newCacheEntry.planCacheKey, planCacheKey, newCacheEntry);
assert.eq(newCacheEntry.queryHash, queryHash, newCacheEntry);
// The query should also return the same results as before.
-assert.eq(results, cacheResults);
-}());
+assert.eq(results, cacheResults);
\ No newline at end of file
diff --git a/jstests/core/query/set/set1.js b/jstests/core/query/set/set1.js
index bae41fc5803f3..6914ddc217f92 100644
--- a/jstests/core/query/set/set1.js
+++ b/jstests/core/query/set/set1.js
@@ -1,5 +1,5 @@
-t = db.set1;
+let t = db.set1;
t.drop();
t.insert({_id: 1, emb: {}});
diff --git a/jstests/core/query/set/set2.js b/jstests/core/query/set/set2.js
index c5b6e1c95534e..9f37dd1e6b382 100644
--- a/jstests/core/query/set/set2.js
+++ b/jstests/core/query/set/set2.js
@@ -1,5 +1,5 @@
-t = db.set2;
+let t = db.set2;
t.drop();
t.save({_id: 1, x: true, y: {x: true}});
diff --git a/jstests/core/query/set/set3.js b/jstests/core/query/set/set3.js
index 4af579fa0b9a8..6dab2e3df2ba5 100644
--- a/jstests/core/query/set/set3.js
+++ b/jstests/core/query/set/set3.js
@@ -3,12 +3,12 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.set3;
+let t = db.set3;
t.drop();
t.insert({"test1": {"test2": {"abcdefghijklmnopqrstu": {"id": 1}}}});
t.update({}, {"$set": {"test1.test2.abcdefghijklmnopqrstuvwxyz": {"id": 2}}});
-x = t.findOne();
+let x = t.findOne();
assert.eq(1, x.test1.test2.abcdefghijklmnopqrstu.id, "A");
assert.eq(2, x.test1.test2.abcdefghijklmnopqrstuvwxyz.id, "B");
diff --git a/jstests/core/query/set/set4.js b/jstests/core/query/set/set4.js
index 99c0913b977d7..c1ccb87d1a046 100644
--- a/jstests/core/query/set/set4.js
+++ b/jstests/core/query/set/set4.js
@@ -3,13 +3,10 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.set4;
+let t = db.set4;
t.drop();
-orig = {
- _id: 1,
- a: [{x: 1}]
-};
+let orig = {_id: 1, a: [{x: 1}]};
t.insert(orig);
t.update({}, {$set: {'a.0.x': 2, 'foo.bar': 3}});
diff --git a/jstests/core/query/set/set5.js b/jstests/core/query/set/set5.js
index a848899f4affc..fbaa832c3a0cd 100644
--- a/jstests/core/query/set/set5.js
+++ b/jstests/core/query/set/set5.js
@@ -3,7 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.set5;
+let t = db.set5;
t.drop();
function check(want, err) {
diff --git a/jstests/core/query/set/set6.js b/jstests/core/query/set/set6.js
index 2f82eb40a68f8..ac96cee0228fc 100644
--- a/jstests/core/query/set/set6.js
+++ b/jstests/core/query/set/set6.js
@@ -3,13 +3,10 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.set6;
+let t = db.set6;
t.drop();
-x = {
- _id: 1,
- r: new DBRef("foo", new ObjectId())
-};
+let x = {_id: 1, r: new DBRef("foo", new ObjectId())};
t.insert(x);
assert.eq(x, t.findOne(), "A");
diff --git a/jstests/core/query/set/set7.js b/jstests/core/query/set/set7.js
index e1cdd0f3bf266..232e3de5f8d19 100644
--- a/jstests/core/query/set/set7.js
+++ b/jstests/core/query/set/set7.js
@@ -9,7 +9,7 @@
// test $set with array indicies
-t = db.jstests_set7;
+let t = db.jstests_set7;
var res;
t.drop();
diff --git a/jstests/core/query/sort/sort10.js b/jstests/core/query/sort/sort10.js
index 6819c76fa5b33..4369a179fcfbd 100644
--- a/jstests/core/query/sort/sort10.js
+++ b/jstests/core/query/sort/sort10.js
@@ -2,7 +2,7 @@
//
// @tags: [requires_fastcount]
-t = db.sort10;
+let t = db.sort10;
function checkSorting1(opts) {
t.drop();
@@ -20,25 +20,26 @@ checkSorting1({});
checkSorting1({"background": true});
function checkSorting2(dates, sortOrder) {
- cur = t.find().sort({x: sortOrder});
+ let cur = t.find().sort({x: sortOrder});
assert.eq(dates.length, cur.count(), "Incorrect number of results returned");
- index = 0;
+ let index = 0;
while (cur.hasNext()) {
- date = cur.next().x;
+ let date = cur.next().x;
assert.eq(dates[index].valueOf(), date.valueOf());
index++;
}
}
t.drop();
-dates = [new Date(-5000000000000), new Date(5000000000000), new Date(0), new Date(5), new Date(-5)];
+let dates =
+ [new Date(-5000000000000), new Date(5000000000000), new Date(0), new Date(5), new Date(-5)];
for (var i = 0; i < dates.length; i++) {
t.insert({x: dates[i]});
}
dates.sort(function(a, b) {
return a - b;
});
-reverseDates = dates.slice(0).reverse();
+let reverseDates = dates.slice(0).reverse();
checkSorting2(dates, 1);
checkSorting2(reverseDates, -1);
diff --git a/jstests/core/query/sort/sort2.js b/jstests/core/query/sort/sort2.js
index 1d373193fccbb..38dd69921c882 100644
--- a/jstests/core/query/sort/sort2.js
+++ b/jstests/core/query/sort/sort2.js
@@ -1,5 +1,5 @@
// test sorting, mainly a test ver simple with no index
-t = db.sort2;
+let t = db.sort2;
t.drop();
t.save({x: 1, y: {a: 5, b: 4}});
diff --git a/jstests/core/query/sort/sort6.js b/jstests/core/query/sort/sort6.js
index f2658bbcbada5..9198c2fc1361b 100644
--- a/jstests/core/query/sort/sort6.js
+++ b/jstests/core/query/sort/sort6.js
@@ -1,4 +1,4 @@
-t = db.sort6;
+let t = db.sort6;
function get(x) {
return t.find().sort({c: x}).map(function(z) {
diff --git a/jstests/core/query/sort/sort8.js b/jstests/core/query/sort/sort8.js
index fa0b0040de11d..ab40d4578e300 100644
--- a/jstests/core/query/sort/sort8.js
+++ b/jstests/core/query/sort/sort8.js
@@ -1,15 +1,15 @@
// Check sorting of arrays indexed by key SERVER-2884
-t = db.jstests_sort8;
+let t = db.jstests_sort8;
t.drop();
t.save({a: [1, 10]});
t.save({a: 5});
-unindexedForward = t.find().sort({a: 1}).toArray();
-unindexedReverse = t.find().sort({a: -1}).toArray();
+let unindexedForward = t.find().sort({a: 1}).toArray();
+let unindexedReverse = t.find().sort({a: -1}).toArray();
t.createIndex({a: 1});
-indexedForward = t.find().sort({a: 1}).hint({a: 1}).toArray();
-indexedReverse = t.find().sort({a: -1}).hint({a: 1}).toArray();
+let indexedForward = t.find().sort({a: 1}).hint({a: 1}).toArray();
+let indexedReverse = t.find().sort({a: -1}).hint({a: 1}).toArray();
assert.eq(unindexedForward, indexedForward);
assert.eq(unindexedReverse, indexedReverse);
diff --git a/jstests/core/query/sort/sort9.js b/jstests/core/query/sort/sort9.js
index 57496b40da15d..a606ec237fbbd 100644
--- a/jstests/core/query/sort/sort9.js
+++ b/jstests/core/query/sort/sort9.js
@@ -1,6 +1,6 @@
// Unindexed array sorting SERVER-2884
-t = db.jstests_sort9;
+let t = db.jstests_sort9;
t.drop();
t.save({a: []});
diff --git a/jstests/core/query/sort/sort_array.js b/jstests/core/query/sort/sort_array.js
index fccfe744a7139..3dd52164c84e2 100644
--- a/jstests/core/query/sort/sort_array.js
+++ b/jstests/core/query/sort/sort_array.js
@@ -6,10 +6,7 @@
/**
* Tests for sorting documents by fields that contain arrays.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {aggPlanHasStage, isQueryPlan, planHasStage} from "jstests/libs/analyze_plan.js";
let coll = db.jstests_array_sort;
@@ -307,5 +304,4 @@ testAggAndFindSort({
hint: {"a.x": 1},
expected: [{_id: 1}, {_id: 0}, {_id: 2}],
expectBlockingSort: false
-});
-}());
+});
\ No newline at end of file
diff --git a/jstests/core/query/sort/sort_dotted_paths_collation.js b/jstests/core/query/sort/sort_dotted_paths_collation.js
index b0b91f7126d46..fd9dafacfb994 100644
--- a/jstests/core/query/sort/sort_dotted_paths_collation.js
+++ b/jstests/core/query/sort/sort_dotted_paths_collation.js
@@ -11,6 +11,9 @@
* @tags: [
* does_not_support_transactions,
* assumes_no_implicit_collection_creation_after_drop,
+ * # Fixes behavior which was buggy in 7.0, so multiversion incompatible for now.
+ * # TODO SERVER-76127: Remove this tag.
+ * multiversion_incompatible,
* ]
*/
(function() {
@@ -286,4 +289,42 @@ testSortAndSortWithLimit(
testSortAndSortWithLimit(
{"a.b.c": -1, _id: -1},
[{_id: 8}, {_id: 6}, {_id: 9}, {_id: 5}, {_id: 1}, {_id: 4}, {_id: 7}, {_id: 3}, {_id: 2}]);
+
+// Tests for a case where all values are scalars and the sort components do not have a common
+// parent path.
+assert(coll.drop());
+assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+assert.commandWorked(coll.insert({_id: 1, a: "a", b: "X"}));
+assert.commandWorked(coll.insert({_id: 2, a: "a", b: "y"}));
+assert.commandWorked(coll.insert({_id: 3, a: "a", b: "Z"}));
+assert.commandWorked(coll.insert({_id: 4, a: "b", b: "x"}));
+assert.commandWorked(coll.insert({_id: 5, a: "B", b: "Y"}));
+assert.commandWorked(coll.insert({_id: 6, a: "B", b: "Z"}));
+testSortAndSortWithLimit({"a": 1, "b": 1},
+ [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}]);
+testSortAndSortWithLimit({"a": 1, "b": -1},
+ [{_id: 3}, {_id: 2}, {_id: 1}, {_id: 6}, {_id: 5}, {_id: 4}]);
+testSortAndSortWithLimit({"a": -1, "b": 1},
+ [{_id: 4}, {_id: 5}, {_id: 6}, {_id: 1}, {_id: 2}, {_id: 3}]);
+testSortAndSortWithLimit({"a": -1, "b": -1},
+ [{_id: 6}, {_id: 5}, {_id: 4}, {_id: 3}, {_id: 2}, {_id: 1}]);
+
+// Tests for a case where all values are scalar and the sort components have a common parent
+// path.
+assert(coll.drop());
+assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive));
+assert.commandWorked(coll.insert({_id: 1, obj: {a: "a", b: "X"}}));
+assert.commandWorked(coll.insert({_id: 2, obj: {a: "a", b: "y"}}));
+assert.commandWorked(coll.insert({_id: 3, obj: {a: "a", b: "Z"}}));
+assert.commandWorked(coll.insert({_id: 4, obj: {a: "b", b: "x"}}));
+assert.commandWorked(coll.insert({_id: 5, obj: {a: "B", b: "Y"}}));
+assert.commandWorked(coll.insert({_id: 6, obj: {a: "B", b: "Z"}}));
+testSortAndSortWithLimit({"obj.a": 1, "obj.b": 1},
+ [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}]);
+testSortAndSortWithLimit({"obj.a": 1, "obj.b": -1},
+ [{_id: 3}, {_id: 2}, {_id: 1}, {_id: 6}, {_id: 5}, {_id: 4}]);
+testSortAndSortWithLimit({"obj.a": -1, "obj.b": 1},
+ [{_id: 4}, {_id: 5}, {_id: 6}, {_id: 1}, {_id: 2}, {_id: 3}]);
+testSortAndSortWithLimit({"obj.a": -1, "obj.b": -1},
+ [{_id: 6}, {_id: 5}, {_id: 4}, {_id: 3}, {_id: 2}, {_id: 1}]);
})();
diff --git a/jstests/core/query/sort/sort_merge.js b/jstests/core/query/sort/sort_merge.js
index fe6e24917d784..2206f7b03ae4e 100644
--- a/jstests/core/query/sort/sort_merge.js
+++ b/jstests/core/query/sort/sort_merge.js
@@ -5,10 +5,7 @@
* assumes_read_concern_local,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getPlanStage, getPlanStages, isIndexOnly, isIxscan} from "jstests/libs/analyze_plan.js";
load("jstests/libs/fixture_helpers.js"); // For 'isMongos'.
const collNamePrefix = 'sort_merge_';
@@ -114,7 +111,7 @@ function runTest(sorts, filters, verifyCallback) {
// Check that there are no duplicates.
let ids = new Set();
for (let doc of res) {
- assert(!ids.has(doc._id), () => "Duplicate _id: " + tojson(_id));
+ assert(!ids.has(doc._id), () => "Duplicate _id: " + tojson(doc._id));
ids.add(doc._id);
}
}
@@ -332,4 +329,3 @@ function runTest(sorts, filters, verifyCallback) {
};
runTest([kSortPattern], [kCoveredFilter], verifyCoveredPlan);
})();
-})();
diff --git a/jstests/core/query/sort/sort_merge_collation.js b/jstests/core/query/sort/sort_merge_collation.js
index 38e6f33d9da64..015b41fedd835 100644
--- a/jstests/core/query/sort/sort_merge_collation.js
+++ b/jstests/core/query/sort/sort_merge_collation.js
@@ -5,10 +5,7 @@
* assumes_no_implicit_collection_creation_after_drop,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getPlanStages} from "jstests/libs/analyze_plan.js";
const numericOrdering = {
collation: {locale: "en_US", numericOrdering: true}
@@ -74,7 +71,7 @@ function runTest(sorts, filters) {
// Check that there are no duplicates.
let ids = new Set();
for (let doc of res) {
- assert(!ids.has(doc._id), () => "Duplicate _id: " + tojson(_id));
+ assert(!ids.has(doc._id), () => "Duplicate _id: " + tojson(doc._id));
ids.add(doc._id);
}
}
@@ -160,5 +157,4 @@ const kSorts = [
];
runTest(kSorts, kFilterPredicates);
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/core/query/sort/sort_numeric.js b/jstests/core/query/sort/sort_numeric.js
index 5ff3e71a06654..908a9e7184a6b 100644
--- a/jstests/core/query/sort/sort_numeric.js
+++ b/jstests/core/query/sort/sort_numeric.js
@@ -1,5 +1,5 @@
-t = db.sort_numeric;
+let t = db.sort_numeric;
t.drop();
// there are two numeric types int he db; make sure it handles them right
@@ -18,7 +18,7 @@ for (var pass = 0; pass < 2; pass++) {
var c = t.find().sort({a: 1});
var last = 0;
while (c.hasNext()) {
- current = c.next();
+ let current = c.next();
assert(current.a > last);
last = current.a;
}
diff --git a/jstests/core/query/sort/sortb.js b/jstests/core/query/sort/sortb.js
index 7c6abe340b478..1ec0851d4c9eb 100644
--- a/jstests/core/query/sort/sortb.js
+++ b/jstests/core/query/sort/sortb.js
@@ -5,31 +5,40 @@
load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-const t = db.jstests_sortb;
+const t = db[jsTestName()];
t.drop();
-t.createIndex({b: 1});
+assert.commandWorked(t.createIndex({b: 1}));
-let i;
-for (i = 0; i < 100; ++i) {
- t.save({a: i, b: i});
-}
+let docs = Array.from({length: 100}, (_, i) => {
+ return {a: i, b: i};
+});
+assert.commandWorked(t.insert(docs));
const numShards = FixtureHelpers.numberOfShardsForCollection(t);
const numLargeDocumentsToWrite = 120 * numShards;
+jsTestLog('numShards = ' + numShards + '; numLargeDocumentsToWrite = ' + numLargeDocumentsToWrite);
+
// These large documents will not be part of the initial set of "top 100" matches, and they will
// not be part of the final set of "top 100" matches returned to the client. However, they are
// an intermediate set of "top 100" matches and should trigger an in memory sort capacity
// exception.
const big = new Array(1024 * 1024).toString();
-for (; i < 100 + numLargeDocumentsToWrite; ++i) {
- t.save({a: i, b: i, big: big});
-}
+docs = Array.from({length: numLargeDocumentsToWrite}, (_, i) => {
+ const k = 100 + i;
+ return {a: k, b: k, big: big};
+});
+assert.commandWorked(t.insert(docs));
+
+docs = Array.from({length: 100}, (_, i) => {
+ const k = 100 + numLargeDocumentsToWrite + i;
+ return {a: k, b: k};
+});
+assert.commandWorked(t.insert(docs));
-for (; i < 200 + numLargeDocumentsToWrite; ++i) {
- t.save({a: i, b: i});
-}
+jsTestLog('Collection ' + t.getFullName() + ' populated with ' + t.countDocuments({}) +
+ ' documents. Checking allowDiskUse=false behavior.');
assert.throwsWithCode(
() => t.find().sort({a: -1}).allowDiskUse(false).hint({b: 1}).limit(100).itcount(),
@@ -38,5 +47,4 @@ assert.throwsWithCode(
() =>
t.find().sort({a: -1}).allowDiskUse(false).hint({b: 1}).showDiskLoc().limit(100).itcount(),
ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
-t.drop();
})();
diff --git a/jstests/core/query/sort/sortd.js b/jstests/core/query/sort/sortd.js
index 525f08ea9470e..39aacb7733111 100644
--- a/jstests/core/query/sort/sortd.js
+++ b/jstests/core/query/sort/sortd.js
@@ -4,12 +4,12 @@
// Test sorting with dups and multiple candidate query plans.
-t = db.jstests_sortd;
+let t = db.jstests_sortd;
function checkNumSorted(n, query) {
- docs = query.toArray();
+ let docs = query.toArray();
assert.eq(n, docs.length);
- for (i = 1; i < docs.length; ++i) {
+ for (let i = 1; i < docs.length; ++i) {
assert.lte(docs[i - 1].a, docs[i].a);
}
}
@@ -31,10 +31,10 @@ t.drop();
t.save({a: 1});
t.save({a: 10});
-for (i = 2; i <= 9; ++i) {
+for (let i = 2; i <= 9; ++i) {
t.save({a: i});
}
-for (i = 0; i < 30; ++i) {
+for (let i = 0; i < 30; ++i) {
t.save({a: 100});
}
t.createIndex({a: 1});
@@ -49,10 +49,10 @@ t.drop();
t.save({a: 1});
t.save({a: 200});
-for (i = 2; i <= 199; ++i) {
+for (let i = 2; i <= 199; ++i) {
t.save({a: i});
}
-for (i = 0; i < 30; ++i) {
+for (let i = 0; i < 30; ++i) {
t.save({a: 2000});
}
t.createIndex({a: 1});
@@ -65,7 +65,7 @@ checkNumSorted(200, t.find({a: {$gte: 0, $lte: 200}, b: null}).sort({a: 1}));
t.drop();
-for (i = 399; i >= 0; --i) {
+for (let i = 399; i >= 0; --i) {
t.save({a: i});
}
t.createIndex({a: 1});
diff --git a/jstests/core/query/sort/sortf.js b/jstests/core/query/sort/sortf.js
index 47c6a27ed9f4e..8efa55c794e3a 100644
--- a/jstests/core/query/sort/sortf.js
+++ b/jstests/core/query/sort/sortf.js
@@ -5,20 +5,20 @@
// Unsorted plan on {a:1}, sorted plan on {b:1}. The unsorted plan exhausts its memory limit before
// the sorted plan is chosen by the query optimizer.
-t = db.jstests_sortf;
+let t = db.jstests_sortf;
t.drop();
t.createIndex({a: 1});
t.createIndex({b: 1});
-for (i = 0; i < 100; ++i) {
+for (let i = 0; i < 100; ++i) {
t.save({a: 0, b: 0});
}
-big = new Array(10 * 1000 * 1000).toString();
-for (i = 0; i < 5; ++i) {
+let big = new Array(10 * 1000 * 1000).toString();
+for (let i = 0; i < 5; ++i) {
t.save({a: 1, b: 1, big: big});
}
assert.eq(5, t.find({a: 1}).sort({b: 1}).itcount());
-t.drop();
\ No newline at end of file
+t.drop();
diff --git a/jstests/core/query/sort/sorti.js b/jstests/core/query/sort/sorti.js
index e30739b4867d6..4816f83be7a8d 100644
--- a/jstests/core/query/sort/sorti.js
+++ b/jstests/core/query/sort/sorti.js
@@ -1,6 +1,6 @@
// Check that a projection is applied after an in memory sort.
-t = db.jstests_sorti;
+let t = db.jstests_sorti;
t.drop();
t.save({a: 1, b: 0});
@@ -9,10 +9,10 @@ t.save({a: 2, b: 2});
t.save({a: 4, b: 3});
function checkBOrder(query) {
- arr = query.toArray();
- order = [];
- for (i in arr) {
- a = arr[i];
+ let arr = query.toArray();
+ let order = [];
+ for (let i in arr) {
+ let a = arr[i];
order.push(a.b);
}
assert.eq([0, 2, 1, 3], order);
diff --git a/jstests/core/query/sort/sortk.js b/jstests/core/query/sort/sortk.js
index 1561d61675e11..adb829d182d7d 100644
--- a/jstests/core/query/sort/sortk.js
+++ b/jstests/core/query/sort/sortk.js
@@ -8,7 +8,7 @@
// requires_scripting,
// ]
-t = db.jstests_sortk;
+let t = db.jstests_sortk;
t.drop();
function resetCollection() {
@@ -25,7 +25,7 @@ resetCollection();
t.createIndex({a: 1, b: 1});
function simpleQuery(extraFields, sort, hint) {
- query = {a: {$in: [1, 2]}};
+ let query = {a: {$in: [1, 2]}};
Object.extend(query, extraFields);
sort = sort || {b: 1};
hint = hint || {a: 1, b: 1};
@@ -67,7 +67,7 @@ assert.eq(0, simpleQuery({}, {a: -1, b: 1}).limit(-1)[0].b);
// Without a hint, multiple cursors are attempted.
assert.eq(0, t.find({a: {$in: [1, 2]}}).sort({b: 1}).limit(-1)[0].b);
-explain = t.find({a: {$in: [1, 2]}}).sort({b: 1}).limit(-1).explain(true);
+let explain = t.find({a: {$in: [1, 2]}}).sort({b: 1}).limit(-1).explain(true);
assert.eq(1, explain.executionStats.nReturned);
// The expected first result now comes from the first interval.
diff --git a/jstests/core/query/type/type1.js b/jstests/core/query/type/type1.js
index 8066de2a5c4b9..49674d834eab4 100644
--- a/jstests/core/query/type/type1.js
+++ b/jstests/core/query/type/type1.js
@@ -1,6 +1,6 @@
// @tags: [requires_fastcount]
-t = db.type1;
+let t = db.type1;
t.drop();
t.save({x: 1.1});
diff --git a/jstests/core/query/type/type2.js b/jstests/core/query/type/type2.js
index d93d313d60f8d..701d99f816e51 100644
--- a/jstests/core/query/type/type2.js
+++ b/jstests/core/query/type/type2.js
@@ -1,6 +1,6 @@
// SERVER-1735 $type:10 matches null value, not missing value.
-t = db.jstests_type2;
+let t = db.jstests_type2;
t.drop();
t.save({a: null});
@@ -16,4 +16,4 @@ function test() {
test();
t.createIndex({a: 1});
-test();
\ No newline at end of file
+test();
diff --git a/jstests/core/query/type/type3.js b/jstests/core/query/type/type3.js
index 8b4858662dd04..14c7697bc6628 100644
--- a/jstests/core/query/type/type3.js
+++ b/jstests/core/query/type/type3.js
@@ -4,7 +4,7 @@
// Check query type bracketing SERVER-3222
-t = db.jstests_type3;
+let t = db.jstests_type3;
t.drop();
t.createIndex({a: 1});
diff --git a/jstests/core/query/type/type_operator_on_missing_values.js b/jstests/core/query/type/type_operator_on_missing_values.js
index 9a67b23b882e1..10130a930fba1 100644
--- a/jstests/core/query/type/type_operator_on_missing_values.js
+++ b/jstests/core/query/type/type_operator_on_missing_values.js
@@ -27,4 +27,4 @@ for (const type of bsonTypes) {
results = coll.find({a: {$not: {$type: type}}}).sort({_id: 1}).toArray();
assert.eq(results, documentList);
}
-}());
\ No newline at end of file
+}());
diff --git a/jstests/core/query/unset/unset.js b/jstests/core/query/unset/unset.js
index 14e18229723fd..c8e6ca501ef10 100644
--- a/jstests/core/query/unset/unset.js
+++ b/jstests/core/query/unset/unset.js
@@ -1,10 +1,7 @@
-t = db.unset;
+let t = db.unset;
t.drop();
-orig = {
- _id: 1,
- emb: {}
-};
+let orig = {_id: 1, emb: {}};
t.insert(orig);
t.update({_id: 1}, {$unset: {'emb.a': 1}});
diff --git a/jstests/core/query/unset/unset2.js b/jstests/core/query/unset/unset2.js
index e120ae2b6747d..4d192c5211e81 100644
--- a/jstests/core/query/unset/unset2.js
+++ b/jstests/core/query/unset/unset2.js
@@ -5,7 +5,7 @@
var res;
-t = db.unset2;
+let t = db.unset2;
t.drop();
t.save({a: ["a", "b", "c", "d"]});
diff --git a/jstests/core/query/where/where1.js b/jstests/core/query/where/where1.js
index 1082bb902e8bc..d093388c3d146 100644
--- a/jstests/core/query/where/where1.js
+++ b/jstests/core/query/where/where1.js
@@ -3,7 +3,7 @@
// requires_scripting,
// ]
-t = db.getCollection("where1");
+let t = db.getCollection("where1");
t.drop();
t.save({a: 1});
diff --git a/jstests/core/query/where/where2.js b/jstests/core/query/where/where2.js
index 6561c829353c7..12ba7382e3cb9 100644
--- a/jstests/core/query/where/where2.js
+++ b/jstests/core/query/where/where2.js
@@ -3,7 +3,7 @@
// requires_scripting,
// ]
-t = db.getCollection("where2");
+let t = db.getCollection("where2");
t.drop();
t.save({a: 1});
diff --git a/jstests/core/query/where/where3.js b/jstests/core/query/where/where3.js
index ffbe690312c7f..3b48bab56f535 100644
--- a/jstests/core/query/where/where3.js
+++ b/jstests/core/query/where/where3.js
@@ -3,7 +3,7 @@
// requires_scripting
// ]
-t = db.where3;
+let t = db.where3;
t.drop();
t.save({returned_date: 5});
diff --git a/jstests/core/queryable_encryption/basic_crud.js b/jstests/core/queryable_encryption/basic_crud.js
new file mode 100644
index 0000000000000..86d63b3b29638
--- /dev/null
+++ b/jstests/core/queryable_encryption/basic_crud.js
@@ -0,0 +1,152 @@
+/**
+ * Tests basic CRUD operations with queryable encrypted fields.
+ *
+ * @tags: [
+ * no_selinux,
+ * tenant_migration_incompatible,
+ * does_not_support_transactions,
+ * does_not_support_stepdowns,
+ * ]
+ */
+import {
+ assertIsIndexedEncryptedField,
+ EncryptedClient,
+ kSafeContentField
+} from "jstests/fle2/libs/encrypted_client_util.js";
+
+if (!(buildInfo().modules.includes("enterprise"))) {
+ jsTestLog("Skipping test as it requires the enterprise module");
+ quit();
+}
+
+const dbName = "qetestdb";
+const collName = "qetestcoll";
+const initialConn = db.getMongo();
+const localKMS = {
+ key: BinData(
+ 0,
+ "/tu9jUCBqZdwCelwE/EAm/4WqdxrSMi04B8e9uAV+m30rI1J2nhKZZtQjdvsSCwuI4erR6IEcEK+5eGUAODv43NDNIR9QheT2edWFewUfHKsl9cnzTc86meIzOmYl6dr")
+};
+
+// Some tests silently change the DB name to prefix it with a tenant ID, but we
+// need to pass the real DB name for the keyvault when setting up the auto encryption,
+// so that the internal connection for the key vault will target the right DB name.
+const kvDbName = (typeof (initialConn.getDbNameWithTenantPrefix) === "function")
+ ? initialConn.getDbNameWithTenantPrefix(dbName)
+ : dbName;
+jsTestLog("Using key vault db " + kvDbName);
+
+const clientSideFLEOptions = {
+ kmsProviders: {local: localKMS},
+ keyVaultNamespace: kvDbName + ".keystore",
+ schemaMap: {},
+};
+
+db.getSiblingDB(dbName).dropDatabase();
+
+assert(initialConn.setAutoEncryption(clientSideFLEOptions));
+initialConn.toggleAutoEncryption(true);
+
+let encryptedClient = new EncryptedClient(initialConn, dbName);
+assert.commandWorked(encryptedClient.createEncryptionCollection(collName, {
+ encryptedFields: {
+ "fields": [
+ {"path": "first", "bsonType": "string", "queries": {"queryType": "equality"}},
+ ]
+ }
+}));
+
+function runIndexedEqualityEncryptedCRUDTest(client, iterations) {
+ let conn = client.getDB().getMongo();
+ let ecoll = client.getDB()[collName];
+ let values =
+ [["frodo", "baggins"], ["sam", "gamgee"], ["pippin", "took"], ["merry", "brandybuck"]];
+ let count = 0;
+ let escCount = 0;
+ let ecocCount = 0;
+
+ // Do encrypted inserts
+ for (let it = 0; it < iterations; it++) {
+ for (let val of values) {
+ assert.commandWorked(ecoll.insert({_id: count, first: val[0], last: val[1]}));
+ count++;
+ client.assertEncryptedCollectionCounts(collName, count, count, count);
+ }
+ }
+ escCount = count;
+ ecocCount = count;
+
+ // Do finds using unencrypted connection
+ {
+ conn.toggleAutoEncryption(false);
+
+ let rawDocs = ecoll.find().toArray();
+ assert.eq(rawDocs.length, count);
+ for (let rawDoc of rawDocs) {
+ assertIsIndexedEncryptedField(rawDoc.first);
+ assert(rawDoc[kSafeContentField] !== undefined);
+ }
+ conn.toggleAutoEncryption(true);
+ }
+
+ // Do encrypted queries using encrypted connection
+ for (let mod = 0; mod < values.length; mod++) {
+ let docs = ecoll.find({last: values[mod][1]}).toArray();
+
+ for (let doc of docs) {
+ assert.eq(doc._id % values.length, mod);
+ assert.eq(doc.first, values[mod][0]);
+ assert(doc[kSafeContentField] !== undefined);
+ }
+ }
+
+ // Do updates on encrypted fields
+ for (let it = 0; it < iterations; it++) {
+ let res = assert.commandWorked(ecoll.updateOne(
+ {$and: [{last: "baggins"}, {first: "frodo"}]}, {$set: {first: "bilbo"}}));
+ assert.eq(res.matchedCount, 1);
+ assert.eq(res.modifiedCount, 1);
+ escCount++;
+ ecocCount++;
+ client.assertEncryptedCollectionCounts(collName, count, escCount, ecocCount);
+
+ res = assert.commandWorked(
+ ecoll.replaceOne({last: "took"}, {first: "paladin", last: "took"}));
+ assert.eq(res.matchedCount, 1);
+ assert.eq(res.modifiedCount, 1);
+ escCount++;
+ ecocCount++;
+ client.assertEncryptedCollectionCounts(collName, count, escCount, ecocCount);
+ }
+
+ // Do findAndModifies
+ for (let it = 0; it < iterations; it++) {
+ let res = assert.commandWorked(ecoll.runCommand({
+ findAndModify: ecoll.getName(),
+ query: {$and: [{last: "gamgee"}, {first: "sam"}]},
+ update: {$set: {first: "rosie"}},
+ }));
+ print(tojson(res));
+ assert.eq(res.value.first, "sam");
+ assert(res.value[kSafeContentField] !== undefined);
+ escCount++;
+ ecocCount++;
+ client.assertEncryptedCollectionCounts(collName, count, escCount, ecocCount);
+ }
+
+ // Do deletes
+ for (let it = 0; it < iterations; it++) {
+ let res = assert.commandWorked(
+ ecoll.deleteOne({last: "brandybuck"}, {writeConcern: {w: "majority"}}));
+ assert.eq(res.deletedCount, 1);
+ count--;
+ client.assertEncryptedCollectionCounts(collName, count, escCount, ecocCount);
+ }
+ assert.eq(ecoll.find({last: "brandybuck"}).count(), 0);
+}
+
+// Test CRUD on indexed equality encrypted fields
+runIndexedEqualityEncryptedCRUDTest(encryptedClient, 10);
+
+encryptedClient = undefined;
+initialConn.unsetAutoEncryption();
diff --git a/jstests/core/record_store_count.js b/jstests/core/record_store_count.js
index da64870dfe164..222f3e82a47d3 100644
--- a/jstests/core/record_store_count.js
+++ b/jstests/core/record_store_count.js
@@ -6,12 +6,9 @@
* ]
*/
-load("jstests/libs/analyze_plan.js"); // For 'planHasStage'.
+import {planHasStage} from "jstests/libs/analyze_plan.js";
load("jstests/libs/fixture_helpers.js"); // For isMongos and isSharded.
-(function() {
-"use strict";
-
var coll = db.record_store_count;
coll.drop();
@@ -63,7 +60,7 @@ if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) {
// In an unsharded collection we can use the COUNT_SCAN stage.
testExplainAndExpectStage(
{expectedStages: ["COUNT_SCAN"], unexpectedStages: [], hintIndex: {x: 1}});
- return;
+ quit();
}
// The remainder of the test is only relevant for sharded clusters.
@@ -87,4 +84,3 @@ testExplainAndExpectStage({
unexpectedStages: ["FETCH"],
hintIndex: kNewIndexSpec
});
-})();
diff --git a/jstests/core/resume_query.js b/jstests/core/resume_query.js
index 711c19980a16e..18fa29f4ce187 100644
--- a/jstests/core/resume_query.js
+++ b/jstests/core/resume_query.js
@@ -11,66 +11,136 @@
* ]
*/
-(function() {
-"use strict";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
const testName = TestData.testName;
-const testDb = db.getSiblingDB(testName);
-assert.commandWorked(testDb.dropDatabase());
-
-jsTestLog("Setting up the data.");
-const testData = [{_id: 0, a: 1}, {_id: 1, b: 2}, {_id: 2, c: 3}, {_id: 3, d: 4}];
-assert.commandWorked(testDb.test.insert(testData));
-
-jsTestLog("Running the initial query.");
-let res = assert.commandWorked(testDb.runCommand(
- {find: "test", hint: {$natural: 1}, batchSize: 1, $_requestResumeToken: true}));
-assert.eq(1, res.cursor.firstBatch.length);
-assert.contains(res.cursor.firstBatch[0], testData);
-let queryData = res.cursor.firstBatch;
-assert.hasFields(res.cursor, ["postBatchResumeToken"]);
-let resumeToken = res.cursor.postBatchResumeToken;
-
-// Kill the cursor before attempting to resume.
-assert.commandWorked(testDb.runCommand({killCursors: "test", cursors: [res.cursor.id]}));
-
-jsTestLog("Running the second query after killing the cursor.");
-res = assert.commandWorked(testDb.runCommand({
- find: "test",
- hint: {$natural: 1},
- batchSize: 1,
- $_requestResumeToken: true,
- $_resumeAfter: resumeToken
-}));
-assert.eq(1, res.cursor.firstBatch.length);
-// The return value should not be the same as the one before.
-assert.neq(queryData[0], res.cursor.firstBatch[0]);
-assert.contains(res.cursor.firstBatch[0], testData);
-queryData.push(res.cursor.firstBatch[0]);
-let cursorId = res.cursor.id;
-
-jsTestLog("Running getMore.");
-res =
- assert.commandWorked(testDb.runCommand({getMore: cursorId, collection: "test", batchSize: 1}));
-queryData.push(res.cursor.nextBatch[0]);
-assert.hasFields(res.cursor, ["postBatchResumeToken"]);
-resumeToken = res.cursor.postBatchResumeToken;
-
-// Kill the cursor before attempting to resume.
-assert.commandWorked(testDb.runCommand({killCursors: "test", cursors: [res.cursor.id]}));
-
-jsTestLog("Testing resume from getMore");
-res = assert.commandWorked(testDb.runCommand({
- find: "test",
- hint: {$natural: 1},
- batchSize: 10,
- $_requestResumeToken: true,
- $_resumeAfter: resumeToken
-}));
-assert.eq(1, res.cursor.firstBatch.length);
-// This should have exhausted the collection.
-assert.eq(0, res.cursor.id);
-queryData.push(res.cursor.firstBatch[0]);
-
-assert.sameMembers(testData, queryData);
-})();
+const testFindCmd = function() {
+ const testDb = db.getSiblingDB(testName);
+ assert.commandWorked(testDb.dropDatabase());
+
+ jsTestLog("[Find] Setting up the data.");
+ const testData = [{_id: 0, a: 1}, {_id: 1, b: 2}, {_id: 2, c: 3}, {_id: 3, d: 4}];
+ assert.commandWorked(testDb.test.insert(testData));
+ jsTestLog("[Find] Running the initial query.");
+ let res = assert.commandWorked(testDb.runCommand(
+ {find: "test", hint: {$natural: 1}, batchSize: 1, $_requestResumeToken: true}));
+ assert.eq(1, res.cursor.firstBatch.length);
+ assert.contains(res.cursor.firstBatch[0], testData);
+ let queryData = res.cursor.firstBatch;
+ assert.hasFields(res.cursor, ["postBatchResumeToken"]);
+ let resumeToken = res.cursor.postBatchResumeToken;
+
+ // Kill the cursor before attempting to resume.
+ assert.commandWorked(testDb.runCommand({killCursors: "test", cursors: [res.cursor.id]}));
+
+ jsTestLog("[Find] Running the second query after killing the cursor.");
+ res = assert.commandWorked(testDb.runCommand({
+ find: "test",
+ hint: {$natural: 1},
+ batchSize: 1,
+ $_requestResumeToken: true,
+ $_resumeAfter: resumeToken
+ }));
+ assert.eq(1, res.cursor.firstBatch.length);
+ // The return value should not be the same as the one before.
+ assert.neq(queryData[0], res.cursor.firstBatch[0]);
+ assert.contains(res.cursor.firstBatch[0], testData);
+ queryData.push(res.cursor.firstBatch[0]);
+ let cursorId = res.cursor.id;
+
+ jsTestLog("[Find] Running getMore.");
+ res = assert.commandWorked(
+ testDb.runCommand({getMore: cursorId, collection: "test", batchSize: 1}));
+ queryData.push(res.cursor.nextBatch[0]);
+ assert.hasFields(res.cursor, ["postBatchResumeToken"]);
+ resumeToken = res.cursor.postBatchResumeToken;
+
+ // Kill the cursor before attempting to resume.
+ assert.commandWorked(testDb.runCommand({killCursors: "test", cursors: [res.cursor.id]}));
+
+ jsTestLog("[Find] Testing resume from getMore");
+ res = assert.commandWorked(testDb.runCommand({
+ find: "test",
+ hint: {$natural: 1},
+ batchSize: 10,
+ $_requestResumeToken: true,
+ $_resumeAfter: resumeToken
+ }));
+ assert.eq(1, res.cursor.firstBatch.length);
+ // This should have exhausted the collection.
+ assert.eq(0, res.cursor.id);
+ queryData.push(res.cursor.firstBatch[0]);
+
+ assert.sameMembers(testData, queryData);
+};
+
+const testAggregateCmd = function() {
+ if (!FeatureFlagUtil.isEnabled(db, "ReshardingImprovements")) {
+ jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled.");
+ return;
+ }
+ const testDb = db.getSiblingDB(testName);
+ assert.commandWorked(testDb.dropDatabase());
+
+ jsTestLog("[Aggregate] Setting up the data.");
+ const testData = [{_id: 0, a: 1}, {_id: 1, b: 2}, {_id: 2, c: 3}, {_id: 3, d: 4}];
+ assert.commandWorked(testDb.test.insert(testData));
+ jsTestLog("[Aggregate] Running the initial query.");
+ let res = assert.commandWorked(testDb.runCommand({
+ aggregate: "test",
+ pipeline: [],
+ hint: {$natural: 1},
+ cursor: {batchSize: 1},
+ $_requestResumeToken: true
+ }));
+ assert.eq(1, res.cursor.firstBatch.length);
+ assert.contains(res.cursor.firstBatch[0], testData);
+ let queryData = res.cursor.firstBatch;
+ assert.hasFields(res.cursor, ["postBatchResumeToken"]);
+ let resumeToken = res.cursor.postBatchResumeToken;
+
+ assert.commandWorked(testDb.runCommand({killCursors: "test", cursors: [res.cursor.id]}));
+
+ jsTestLog("[Aggregate] Running the second query after killing the cursor.");
+ res = assert.commandWorked(testDb.runCommand({
+ aggregate: "test",
+ pipeline: [],
+ hint: {$natural: 1},
+ cursor: {batchSize: 1},
+ $_requestResumeToken: true,
+ $_resumeAfter: resumeToken
+ }));
+ assert.eq(1, res.cursor.firstBatch.length);
+ assert.neq(queryData[0], res.cursor.firstBatch[0]);
+ assert.contains(res.cursor.firstBatch[0], testData);
+ queryData.push(res.cursor.firstBatch[0]);
+ let cursorId = res.cursor.id;
+
+ jsTestLog("[Aggregate] Running getMore.");
+ res = assert.commandWorked(
+ testDb.runCommand({getMore: cursorId, collection: "test", batchSize: 1}));
+ queryData.push(res.cursor.nextBatch[0]);
+ assert.hasFields(res.cursor, ["postBatchResumeToken"]);
+ resumeToken = res.cursor.postBatchResumeToken;
+
+ assert.commandWorked(testDb.runCommand({killCursors: "test", cursors: [res.cursor.id]}));
+
+ jsTestLog("[Aggregate] Testing resume from getMore");
+ res = assert.commandWorked(testDb.runCommand({
+ aggregate: "test",
+ pipeline: [],
+ hint: {$natural: 1},
+ cursor: {batchSize: 10},
+ $_requestResumeToken: true,
+ $_resumeAfter: resumeToken
+ }));
+ assert.eq(1, res.cursor.firstBatch.length);
+ assert.eq(0, res.cursor.id);
+ queryData.push(res.cursor.firstBatch[0]);
+
+ assert.sameMembers(testData, queryData);
+};
+
+testFindCmd();
+testAggregateCmd();
\ No newline at end of file
diff --git a/jstests/core/resume_query_from_non_existent_record.js b/jstests/core/resume_query_from_non_existent_record.js
index 8dba152a97926..62bc8481bd56f 100644
--- a/jstests/core/resume_query_from_non_existent_record.js
+++ b/jstests/core/resume_query_from_non_existent_record.js
@@ -15,59 +15,121 @@
* ]
*/
-(function() {
-"use strict";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const collName = "resume_query_from_non_existent_record";
const coll = db[collName];
-coll.drop();
-
-const testData = [{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}];
-assert.commandWorked(coll.insert(testData));
-
-// Run the initial query and request to return a resume token. We're interested only in a single
-// document, so 'batchSize' is set to 1.
-let res = assert.commandWorked(
- db.runCommand({find: collName, hint: {$natural: 1}, batchSize: 1, $_requestResumeToken: true}));
-assert.eq(1, res.cursor.firstBatch.length);
-assert.contains(res.cursor.firstBatch[0], testData);
-const savedData = res.cursor.firstBatch;
-
-// Make sure the query returned a resume token which will be used to resume the query from.
-assert.hasFields(res.cursor, ["postBatchResumeToken"]);
-const resumeToken = res.cursor.postBatchResumeToken;
-
-// Kill the cursor before attempting to resume.
-assert.commandWorked(db.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
-
-// Try to resume the query from the saved resume token.
-res = assert.commandWorked(db.runCommand({
- find: collName,
- hint: {$natural: 1},
- batchSize: 1,
- $_requestResumeToken: true,
- $_resumeAfter: resumeToken
-}));
-assert.eq(1, res.cursor.firstBatch.length);
-assert.contains(res.cursor.firstBatch[0], testData);
-assert.neq(savedData[0], res.cursor.firstBatch[0]);
-
-// Kill the cursor before attempting to resume.
-assert.commandWorked(db.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
-
-// Delete a document which corresponds to the saved resume token, so that we can guarantee it does
-// not exist.
-assert.commandWorked(coll.remove({_id: savedData[0]._id}, {justOne: true}));
-
-// Try to resume the query from the same token and check that it fails to position the cursor to
-// the record specified in the resume token.
-assert.commandFailedWithCode(db.runCommand({
- find: collName,
- hint: {$natural: 1},
- batchSize: 1,
- $_requestResumeToken: true,
- $_resumeAfter: resumeToken
-}),
- ErrorCodes.KeyNotFound);
-})();
+const testFindCmd = function() {
+ coll.drop();
+
+ const testData = [{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}];
+ assert.commandWorked(coll.insert(testData));
+
+ jsTestLog("[Find] request a resumeToken then use it to resume.");
+ // Run the initial query and request to return a resume token. We're interested only in a single
+ // document, so 'batchSize' is set to 1.
+ let res = assert.commandWorked(db.runCommand(
+ {find: collName, hint: {$natural: 1}, batchSize: 1, $_requestResumeToken: true}));
+ assert.eq(1, res.cursor.firstBatch.length);
+ assert.contains(res.cursor.firstBatch[0], testData);
+ const savedData = res.cursor.firstBatch;
+
+ // Make sure the query returned a resume token which will be used to resume the query from.
+ assert.hasFields(res.cursor, ["postBatchResumeToken"]);
+ const resumeToken = res.cursor.postBatchResumeToken;
+
+ // Kill the cursor before attempting to resume.
+ assert.commandWorked(db.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
+
+ // Try to resume the query from the saved resume token.
+ res = assert.commandWorked(db.runCommand({
+ find: collName,
+ hint: {$natural: 1},
+ batchSize: 1,
+ $_requestResumeToken: true,
+ $_resumeAfter: resumeToken
+ }));
+ assert.eq(1, res.cursor.firstBatch.length);
+ assert.contains(res.cursor.firstBatch[0], testData);
+ assert.neq(savedData[0], res.cursor.firstBatch[0]);
+
+ // Kill the cursor before attempting to resume.
+ assert.commandWorked(db.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
+
+ jsTestLog(
+ "[Find] Delete the document which corresponds to the saved resume token, then resumeAfter should fail.");
+ // Delete a document which corresponds to the saved resume token, so that we can guarantee it
+ // does not exist.
+ assert.commandWorked(coll.remove({_id: savedData[0]._id}, {justOne: true}));
+
+ // Try to resume the query from the same token and check that it fails to position the cursor to
+ // the record specified in the resume token.
+ assert.commandFailedWithCode(db.runCommand({
+ find: collName,
+ hint: {$natural: 1},
+ batchSize: 1,
+ $_requestResumeToken: true,
+ $_resumeAfter: resumeToken
+ }),
+ ErrorCodes.KeyNotFound);
+};
+
+const testAggregateCmd = function() {
+ if (!FeatureFlagUtil.isEnabled(db, "ReshardingImprovements")) {
+ jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled.");
+ return;
+ }
+ coll.drop();
+
+ const testData = [{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}];
+ assert.commandWorked(coll.insert(testData));
+
+ jsTestLog("[Aggregate] request a resumeToken then use it to resume.");
+ let res = assert.commandWorked(db.runCommand({
+ aggregate: collName,
+ pipeline: [],
+ hint: {$natural: 1},
+ cursor: {batchSize: 1},
+ $_requestResumeToken: true
+ }));
+ assert.eq(1, res.cursor.firstBatch.length);
+ assert.contains(res.cursor.firstBatch[0], testData);
+ const savedData = res.cursor.firstBatch;
+
+ assert.hasFields(res.cursor, ["postBatchResumeToken"]);
+ const resumeToken = res.cursor.postBatchResumeToken;
+
+ assert.commandWorked(db.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
+
+ res = assert.commandWorked(db.runCommand({
+ aggregate: collName,
+ pipeline: [],
+ hint: {$natural: 1},
+ cursor: {batchSize: 1},
+ $_requestResumeToken: true,
+ $_resumeAfter: resumeToken
+ }));
+ assert.eq(1, res.cursor.firstBatch.length);
+ assert.contains(res.cursor.firstBatch[0], testData);
+ assert.neq(savedData[0], res.cursor.firstBatch[0]);
+
+ assert.commandWorked(db.runCommand({killCursors: collName, cursors: [res.cursor.id]}));
+
+ jsTestLog(
+ "[Aggregate] Delete the document which corresponds to the saved resume token, then resumeAfter should fail.");
+ assert.commandWorked(coll.remove({_id: savedData[0]._id}, {justOne: true}));
+
+ assert.commandFailedWithCode(db.runCommand({
+ aggregate: collName,
+ pipeline: [],
+ hint: {$natural: 1},
+ cursor: {batchSize: 1},
+ $_requestResumeToken: true,
+ $_resumeAfter: resumeToken
+ }),
+ ErrorCodes.KeyNotFound);
+};
+
+testFindCmd();
+testAggregateCmd();
\ No newline at end of file
diff --git a/jstests/core/return_key.js b/jstests/core/return_key.js
index 58c48360597a3..c23ff7800c0d8 100644
--- a/jstests/core/return_key.js
+++ b/jstests/core/return_key.js
@@ -8,10 +8,7 @@
/**
* Tests for returnKey.
*/
-load("jstests/libs/analyze_plan.js");
-
-(function() {
-'use strict';
+import {isIndexOnly} from "jstests/libs/analyze_plan.js";
var results;
var explain;
@@ -84,4 +81,3 @@ assert.eq(results, [{a: 3, c: [1], d: [1]}, {a: 2, c: [2], d: [2]}, {a: 1, c: [3
results =
coll.find({}, {"c.d": {$meta: "sortKey"}}).hint({a: 1}).sort({b: 1}).returnKey().toArray();
assert.eq(results, [{a: 3, c: {d: [1]}}, {a: 2, c: {d: [2]}}, {a: 1, c: {d: [3]}}]);
-})();
diff --git a/jstests/core/role_management_helpers.js b/jstests/core/role_management_helpers.js
index 9fd3d28ce5551..05e93f7a70de7 100644
--- a/jstests/core/role_management_helpers.js
+++ b/jstests/core/role_management_helpers.js
@@ -13,7 +13,7 @@
// It is not a comprehensive test of the functionality of the role manipulation commands
function assertHasRole(rolesArray, roleName, roleDB) {
- for (i in rolesArray) {
+ for (let i in rolesArray) {
var curRole = rolesArray[i];
if (curRole.role == roleName && curRole.db == roleDB) {
return;
@@ -23,7 +23,7 @@ function assertHasRole(rolesArray, roleName, roleDB) {
}
function assertHasPrivilege(privilegeArray, privilege) {
- for (i in privilegeArray) {
+ for (let i in privilegeArray) {
var curPriv = privilegeArray[i];
if (curPriv.resource.cluster == privilege.resource.cluster &&
curPriv.resource.anyResource == privilege.resource.anyResource &&
@@ -31,7 +31,7 @@ function assertHasPrivilege(privilegeArray, privilege) {
curPriv.resource.collection == privilege.resource.collection) {
// Same resource
assert.eq(curPriv.actions.length, privilege.actions.length);
- for (k in curPriv.actions) {
+ for (let k in curPriv.actions) {
assert.eq(curPriv.actions[k], privilege.actions[k]);
}
return;
diff --git a/jstests/core/sbe/from_plan_cache_flag.js b/jstests/core/sbe/from_plan_cache_flag.js
index 87cb795e0acb7..a7612e8049a74 100644
--- a/jstests/core/sbe/from_plan_cache_flag.js
+++ b/jstests/core/sbe/from_plan_cache_flag.js
@@ -8,16 +8,12 @@
// # TODO SERVER-67607: Test plan cache with CQF enabled.
// cqf_incompatible,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
if (!checkSBEEnabled(db)) {
jsTest.log("Skip running the test because SBE is not enabled");
- return;
+ quit();
}
const testDB = db.getSiblingDB("from_plan_cache_flag");
assert.commandWorked(testDB.dropDatabase());
@@ -42,4 +38,3 @@ assert.eq(!!profileObj.fromPlanCache, true, profileObj);
coll.aggregate([{$match: {a: 3}}], {comment}).toArray();
profileObj = getLatestProfilerEntry(testDB, {"command.comment": comment});
assert.eq(!!profileObj.fromPlanCache, true, profileObj);
-}());
diff --git a/jstests/core/sbe/plan_cache_sbe_with_or_queries.js b/jstests/core/sbe/plan_cache_sbe_with_or_queries.js
index f4a90d42dc159..3f27b34780e54 100644
--- a/jstests/core/sbe/plan_cache_sbe_with_or_queries.js
+++ b/jstests/core/sbe/plan_cache_sbe_with_or_queries.js
@@ -12,18 +12,15 @@
// # Plan cache state is node-local and will not get migrated alongside tenant data.
// tenant_migration_incompatible,
// # TODO SERVER-67607: Test plan cache with CQF enabled.
-// cqf_incompatible,
+// cqf_experimental_incompatible,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getPlanCacheKeyFromShape, getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
if (!checkSBEEnabled(db)) {
jsTest.log("Skip running the test because SBE is not enabled");
- return;
+ quit();
}
function getPlanCacheEntries(query, collection, db) {
@@ -59,4 +56,3 @@ assert.eq(true, planCacheEntries[0].isPinned, planCacheEntries);
assert.eq(true, planCacheEntries[0].isActive, planCacheEntries);
// Works is always 0 for pinned plan cache entries.
assert.eq(0, planCacheEntries[0].works, planCacheEntries);
-}());
diff --git a/jstests/core/sbe/sbe_explain_rejected_plans.js b/jstests/core/sbe/sbe_explain_rejected_plans.js
index a6ae25751868d..0dc66d494f030 100644
--- a/jstests/core/sbe/sbe_explain_rejected_plans.js
+++ b/jstests/core/sbe/sbe_explain_rejected_plans.js
@@ -5,17 +5,20 @@
* requires_fcv_63,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {
+ getExecutionStages,
+ getPlanStages,
+ getRejectedPlan,
+ getRejectedPlans,
+ getWinningPlan,
+} from "jstests/libs/analyze_plan.js";
load("jstests/libs/collection_drop_recreate.js");
-load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const isSBEEnabled = checkSBEEnabled(db);
if (!isSBEEnabled) {
jsTestLog("Skipping test because SBE is disabled");
- return;
+ quit();
}
const coll = assertDropAndRecreateCollection(db, "sbe_explain_rejected_plans");
@@ -66,4 +69,3 @@ for (let rejectedPlan of getRejectedPlans(explain)) {
rejectedPlan.slotBasedPlan.stages.includes("@\"b_1\""),
explain);
}
-})();
diff --git a/jstests/core/sbe/sbe_ixscan_explain.js b/jstests/core/sbe/sbe_ixscan_explain.js
index d79b8a209fc57..21062a5848ae0 100644
--- a/jstests/core/sbe/sbe_ixscan_explain.js
+++ b/jstests/core/sbe/sbe_ixscan_explain.js
@@ -7,16 +7,13 @@
// requires_fcv_63,
// ]
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js'); // For getPlanStages
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getPlanStages, getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const isSBEEnabled = checkSBEEnabled(db);
if (!isSBEEnabled) {
jsTestLog("Skipping test because SBE is disabled");
- return;
+ quit();
}
function assertStageContainsIndexName(stage) {
@@ -45,4 +42,3 @@ assert(ixscanStages.length !== 0);
for (let ixscanStage of ixscanStages) {
assertStageContainsIndexName(ixscanStage);
}
-}());
diff --git a/jstests/core/sbe_plan_cache_autoparameterize_collscan.js b/jstests/core/sbe_plan_cache_autoparameterize_collscan.js
index c1aaea782be6b..7f0ac77d90f5e 100644
--- a/jstests/core/sbe_plan_cache_autoparameterize_collscan.js
+++ b/jstests/core/sbe_plan_cache_autoparameterize_collscan.js
@@ -18,17 +18,14 @@
* requires_scripting,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js");
+import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
// This test is specifically verifying the behavior of the SBE plan cache, which is only enabled
// when SBE is enabled.
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is not enabled");
- return;
+ quit();
}
const coll = db.sbe_plan_cache_autoparameterize_collscan;
@@ -432,4 +429,3 @@ runTest({query: {a: {$type: ["string", "regex"]}}, projection: {_id: 1}},
{query: {a: {$type: ["string", "array"]}}, projection: {_id: 1}},
[{_id: 5}, {_id: 6}, {_id: 8}, {_id: 11}, {_id: 12}, {_id: 13}, {_id: 15}],
false);
-}());
diff --git a/jstests/core/server1470.js b/jstests/core/server1470.js
index c3c7d47aaeb7c..bbca047d8ee7c 100644
--- a/jstests/core/server1470.js
+++ b/jstests/core/server1470.js
@@ -3,15 +3,12 @@
// key.
// @tags: [assumes_unsharded_collection, requires_multi_updates, requires_non_retryable_writes]
-t = db.server1470;
+let t = db.server1470;
t.drop();
-q = {
- "name": "first",
- "pic": {"$ref": "foo", "$id": ObjectId("4c48d04cd33a5a92628c9af6")}
-};
+let q = {"name": "first", "pic": {"$ref": "foo", "$id": ObjectId("4c48d04cd33a5a92628c9af6")}};
t.update(q, {$set: {x: 1}}, true, true);
-ref = t.findOne().pic;
+let ref = t.findOne().pic;
assert.eq("object", typeof (ref));
assert.eq(q.pic["$ref"], ref["$ref"]);
assert.eq(q.pic["$id"], ref["$id"]);
diff --git a/jstests/core/server22053.js b/jstests/core/server22053.js
index d803c732b869c..5c93f60faaece 100644
--- a/jstests/core/server22053.js
+++ b/jstests/core/server22053.js
@@ -16,4 +16,4 @@ assert.eq(3, doc['mys'][2]);
assert.eq(undefined, doc['mys'][3]);
assert.eq(undefined, doc['mys'][4]);
assert.eq(6, doc['mys'][5]);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/core/server5346.js b/jstests/core/server5346.js
index 18f2f019e5e1f..f9ad685296104 100644
--- a/jstests/core/server5346.js
+++ b/jstests/core/server5346.js
@@ -1,11 +1,8 @@
-t = db.server5346;
+let t = db.server5346;
t.drop();
-x = {
- _id: 1,
- versions: {}
-};
+let x = {_id: 1, versions: {}};
t.insert(x);
t.update({_id: 1}, {$inc: {"versions.2_01": 1}});
diff --git a/jstests/core/server7756.js b/jstests/core/server7756.js
index 844c3a40d4d40..574e5587845f8 100644
--- a/jstests/core/server7756.js
+++ b/jstests/core/server7756.js
@@ -1,5 +1,5 @@
-t = db.server7756;
+let t = db.server7756;
t.drop();
t.save({a: [{1: 'x'}, 'y']});
diff --git a/jstests/core/server9385.js b/jstests/core/server9385.js
index 4eb11076139d2..3e8cd4c96bd44 100644
--- a/jstests/core/server9385.js
+++ b/jstests/core/server9385.js
@@ -2,11 +2,11 @@
//
// @tags: [requires_fastcount]
-t = db.server9385;
+let t = db.server9385;
t.drop();
t.insert({_id: 1, x: 1});
-x = t.findOne();
+let x = t.findOne();
x._id = 2;
t.save(x);
diff --git a/jstests/core/shell/autocomplete.js b/jstests/core/shell/autocomplete.js
index 6b8fb6ffa78ff..3ffa0db77cf77 100644
--- a/jstests/core/shell/autocomplete.js
+++ b/jstests/core/shell/autocomplete.js
@@ -1,6 +1,11 @@
/**
* Validate auto complete works for various javascript types implemented by C++.
+ * @tags: [
+ * # TODO SERVER-77024 enable on sharded passthrough suites when orphans hook will be supported
+ * assumes_unsharded_collection,
+ * ]
*/
+
(function() {
'use strict';
diff --git a/jstests/core/shell/collection_save.js b/jstests/core/shell/collection_save.js
index d375aa9f31eaf..fb3e53b756d09 100644
--- a/jstests/core/shell/collection_save.js
+++ b/jstests/core/shell/collection_save.js
@@ -32,4 +32,4 @@ assert.throws(() => coll.save("The answer to life, the universe and everything")
assert.throws(() => coll.save([{"fruit": "mango"}, {"fruit": "orange"}]),
[],
"saving an array must throw an error");
-})();
\ No newline at end of file
+})();
diff --git a/jstests/core/shell/shell1.js b/jstests/core/shell/shell1.js
index 4fc4c3a1c15e2..cbe52f144019d 100644
--- a/jstests/core/shell/shell1.js
+++ b/jstests/core/shell/shell1.js
@@ -1,4 +1,4 @@
-x = 1;
+let x = 1;
shellHelper("show", "tables;");
shellHelper("show", "tables");
diff --git a/jstests/core/shell/shellkillop.js b/jstests/core/shell/shellkillop.js
index 0ac2ad681ed6f..b6c3746e05225 100644
--- a/jstests/core/shell/shellkillop.js
+++ b/jstests/core/shell/shellkillop.js
@@ -1,15 +1,15 @@
-baseName = "jstests_shellkillop";
+let baseName = "jstests_shellkillop";
// 'retry' should be set to true in contexts where an exception should cause the test to be retried
// rather than to fail.
-retry = false;
+let retry = false;
function testShellAutokillop() {
if (true) { // toggle to disable test
db[baseName].drop();
print("shellkillop.js insert data");
- for (i = 0; i < 100000; ++i) {
+ for (let i = 0; i < 100000; ++i) {
db[baseName].insert({i: 1});
}
assert.eq(100000, db[baseName].count());
@@ -19,7 +19,7 @@ function testShellAutokillop() {
var evalStr = "print('SKO subtask started'); db." + baseName +
".update( {}, {$set:{i:'abcdefghijkl'}}, false, true ); db." + baseName + ".count();";
print("shellkillop.js evalStr:" + evalStr);
- spawn = startMongoProgramNoConnect(
+ let spawn = startMongoProgramNoConnect(
"mongo", "--autokillop", "--port", myPort(), "--eval", evalStr);
sleep(100);
@@ -35,7 +35,7 @@ function testShellAutokillop() {
print("count abcdefghijkl:" + db[baseName].find({i: 'abcdefghijkl'}).count());
var inprog = db.currentOp().inprog;
- for (i in inprog) {
+ for (let i in inprog) {
if (inprog[i].ns == "test." + baseName)
throw Error("shellkillop.js op is still running: " + tojson(inprog[i]));
}
diff --git a/jstests/core/show_record_id.js b/jstests/core/show_record_id.js
index 3af12d74d61f5..6bf75a42d372d 100644
--- a/jstests/core/show_record_id.js
+++ b/jstests/core/show_record_id.js
@@ -8,8 +8,8 @@ var t = db.show_record_id;
t.drop();
function checkResults(arr) {
- for (i in arr) {
- a = arr[i];
+ for (let i in arr) {
+ let a = arr[i];
assert(a['$recordId']);
}
}
diff --git a/jstests/core/single_field_hashed_index.js b/jstests/core/single_field_hashed_index.js
index a05400c4d581f..6f64a8f0dc3ad 100644
--- a/jstests/core/single_field_hashed_index.js
+++ b/jstests/core/single_field_hashed_index.js
@@ -7,9 +7,7 @@
* requires_fastcount,
* ]
*/
-(function() {
-"use strict";
-load("jstests/libs/analyze_plan.js"); // For isIxscan().
+import {getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js";
const t = db.single_field_hashed_index;
t.drop();
@@ -112,5 +110,4 @@ assert.commandFailedWithCode(t.insert({field1: {field2: {0: []}}}), 16766);
assert.commandFailedWithCode(t.insert({field1: [{field2: {0: []}}]}), 16766);
assert.commandFailedWithCode(t.insert({field1: {field2: {0: {field4: []}}}}), 16766);
assert.commandWorked(t.insert({field1: {field2: {0: {otherField: []}}}}));
-assert.commandWorked(t.insert({field1: {field2: {0: {field4: 1}}}}));
-})();
+assert.commandWorked(t.insert({field1: {field2: {0: {field4: 1}}}}));
\ No newline at end of file
diff --git a/jstests/core/sparse_index_supports_ne_null.js b/jstests/core/sparse_index_supports_ne_null.js
index 4ad53e72996de..a8c115dba2a19 100644
--- a/jstests/core/sparse_index_supports_ne_null.js
+++ b/jstests/core/sparse_index_supports_ne_null.js
@@ -9,9 +9,7 @@
* assumes_unsharded_collection,
* ]
*/
-(function() {
-"use strict";
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
const coll = db.sparse_index_supports_ne_null;
coll.drop();
@@ -198,5 +196,4 @@ checkQuery({
nResultsExpected: 0,
indexKeyPattern: keyPattern
});
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/core/splitvector.js b/jstests/core/splitvector.js
index 5c3c1b2e559c4..54d8112ab97ae 100644
--- a/jstests/core/splitvector.js
+++ b/jstests/core/splitvector.js
@@ -26,14 +26,14 @@
// e.g. 20000
// @param maxChunkSize is in MBs.
//
-assertChunkSizes = function(splitVec, numDocs, maxChunkSize, msg) {
+let assertChunkSizes = function(splitVec, numDocs, maxChunkSize, msg) {
splitVec = [{x: -1}].concat(splitVec);
splitVec.push({x: numDocs + 1});
- for (i = 0; i < splitVec.length - 1; i++) {
- min = splitVec[i];
- max = splitVec[i + 1];
+ for (let i = 0; i < splitVec.length - 1; i++) {
+ let min = splitVec[i];
+ let max = splitVec[i + 1];
var avgObjSize = db.jstests_splitvector.stats().avgObjSize;
- size = db.runCommand({datasize: "test.jstests_splitvector", min: min, max: max}).size;
+ let size = db.runCommand({datasize: "test.jstests_splitvector", min: min, max: max}).size;
// It is okay for the last chunk to be smaller. A collection's size does not
// need to be exactly a multiple of maxChunkSize.
@@ -90,7 +90,7 @@ let bulkInsertDocsFixedX = function(coll, numDocs, filler, xVal) {
// -------------------------
// TESTS START HERE
// -------------------------
-f = db.jstests_splitvector;
+let f = db.jstests_splitvector;
resetCollection();
// -------------------------
@@ -125,29 +125,30 @@ assert.eq(
resetCollection();
f.createIndex({x: 1});
+let filler;
var case4 = function() {
// Get baseline document size
filler = "";
while (filler.length < 500)
filler += "a";
f.save({x: 0, y: filler});
- docSize = db.runCommand({datasize: "test.jstests_splitvector"}).size;
+ let docSize = db.runCommand({datasize: "test.jstests_splitvector"}).size;
assert.gt(docSize, 500, "4a");
// Fill collection and get split vector for 1MB maxChunkSize
let numDocs = 4500;
bulkInsertDocs(f, numDocs - 1, filler); // 1 document was already inserted.
- res = db.runCommand(
+ let res = db.runCommand(
{splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1});
// splitVector aims at getting half-full chunks after split
- factor = 0.5;
+ let factor = 0.5;
assert.eq(true, res.ok, "4b");
assert.close(
numDocs * docSize / ((1 << 20) * factor), res.splitKeys.length, "num split keys", -1);
assertChunkSizes(res.splitKeys, numDocs, (1 << 20) * factor, "4d");
- for (i = 0; i < res.splitKeys.length; i++) {
+ for (let i = 0; i < res.splitKeys.length; i++) {
assertFieldNamesMatch(res.splitKeys[i], {x: 1});
}
};
@@ -162,7 +163,7 @@ f.createIndex({x: 1});
var case5 = function() {
// Fill collection and get split vector for 1MB maxChunkSize
bulkInsertDocs(f, 4499, filler);
- res = db.runCommand({
+ let res = db.runCommand({
splitVector: "test.jstests_splitvector",
keyPattern: {x: 1},
maxChunkSize: 1,
@@ -171,7 +172,7 @@ var case5 = function() {
assert.eq(true, res.ok, "5a");
assert.eq(1, res.splitKeys.length, "5b");
- for (i = 0; i < res.splitKeys.length; i++) {
+ for (let i = 0; i < res.splitKeys.length; i++) {
assertFieldNamesMatch(res.splitKeys[i], {x: 1});
}
};
@@ -186,7 +187,7 @@ f.createIndex({x: 1});
var case6 = function() {
// Fill collection and get split vector for 1MB maxChunkSize
bulkInsertDocs(f, 1999, filler);
- res = db.runCommand({
+ let res = db.runCommand({
splitVector: "test.jstests_splitvector",
keyPattern: {x: 1},
maxChunkSize: 1,
@@ -195,7 +196,7 @@ var case6 = function() {
assert.eq(true, res.ok, "6a");
assert.eq(3, res.splitKeys.length, "6b");
- for (i = 0; i < res.splitKeys.length; i++) {
+ for (let i = 0; i < res.splitKeys.length; i++) {
assertFieldNamesMatch(res.splitKeys[i], {x: 1});
}
};
@@ -212,12 +213,12 @@ var case7 = function() {
// Fill collection and get split vector for 1MB maxChunkSize
bulkInsertDocsFixedX(f, 2099, filler, 1);
bulkInsertDocsFixedX(f, 9, filler, 2);
- res = db.runCommand(
+ let res = db.runCommand(
{splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1});
assert.eq(true, res.ok, "7a");
assert.eq(2, res.splitKeys[0].x, "7b");
- for (i = 0; i < res.splitKeys.length; i++) {
+ for (let i = 0; i < res.splitKeys.length; i++) {
assertFieldNamesMatch(res.splitKeys[i], {x: 1});
}
};
@@ -234,14 +235,14 @@ var case8 = function() {
bulkInsertDocsFixedX(f, 9, filler, 1);
bulkInsertDocsFixedX(f, 2099, filler, 2);
bulkInsertDocsFixedX(f, 9, filler, 3);
- res = db.runCommand(
+ let res = db.runCommand(
{splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1});
assert.eq(true, res.ok, "8a");
assert.eq(2, res.splitKeys.length, "8b");
assert.eq(2, res.splitKeys[0].x, "8c");
assert.eq(3, res.splitKeys[1].x, "8d");
- for (i = 0; i < res.splitKeys.length; i++) {
+ for (let i = 0; i < res.splitKeys.length; i++) {
assertFieldNamesMatch(res.splitKeys[i], {x: 1});
}
};
@@ -262,7 +263,7 @@ var case9 = function() {
assert.eq(3, f.count());
print(f.getFullName());
- res = db.runCommand({splitVector: f.getFullName(), keyPattern: {x: 1}, force: true});
+ let res = db.runCommand({splitVector: f.getFullName(), keyPattern: {x: 1}, force: true});
assert.eq(true, res.ok, "9a");
assert.eq(1, res.splitKeys.length, "9b");
@@ -275,7 +276,7 @@ var case9 = function() {
assert.eq(true, res.ok, "9a: " + tojson(res));
assert.eq(1, res.splitKeys.length, "9b: " + tojson(res));
assert.eq(2, res.splitKeys[0].x, "9c: " + tojson(res));
- for (i = 0; i < res.splitKeys.length; i++) {
+ for (let i = 0; i < res.splitKeys.length; i++) {
assertFieldNamesMatch(res.splitKeys[i], {x: 1});
}
}
diff --git a/jstests/core/stage_debug/stages_and_hash.js b/jstests/core/stage_debug/stages_and_hash.js
index 4e75dace007df..741a6778ad6a1 100644
--- a/jstests/core/stage_debug/stages_and_hash.js
+++ b/jstests/core/stage_debug/stages_and_hash.js
@@ -6,7 +6,7 @@
// no_selinux,
// ]
-t = db.stages_and_hashed;
+let t = db.stages_and_hashed;
t.drop();
var collname = "stages_and_hashed";
@@ -21,7 +21,7 @@ t.createIndex({bar: 1});
t.createIndex({baz: 1});
// Scan foo <= 20
-ixscan1 = {
+let ixscan1 = {
ixscan: {
args: {
name: "stages_and_hashed",
@@ -36,7 +36,7 @@ ixscan1 = {
};
// Scan bar >= 40
-ixscan2 = {
+let ixscan2 = {
ixscan: {
args: {
name: "stages_and_hashed",
@@ -52,15 +52,13 @@ ixscan2 = {
// bar = 50 - foo
// Intersection is (foo=0 bar=50, foo=1 bar=49, ..., foo=10 bar=40)
-andix1ix2 = {
- andHash: {args: {nodes: [ixscan1, ixscan2]}}
-};
-res = db.runCommand({stageDebug: {plan: andix1ix2, collection: collname}});
+let andix1ix2 = {andHash: {args: {nodes: [ixscan1, ixscan2]}}};
+let res = db.runCommand({stageDebug: {plan: andix1ix2, collection: collname}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 11);
// Filter predicates from 2 indices. Tests that we union the idx info.
-andix1ix2filter = {
+let andix1ix2filter = {
fetch: {
filter: {bar: {$in: [45, 46, 48]}, foo: {$in: [4, 5, 6]}},
args: {node: {andHash: {args: {nodes: [ixscan1, ixscan2]}}}}
diff --git a/jstests/core/stage_debug/stages_and_sorted.js b/jstests/core/stage_debug/stages_and_sorted.js
index 34826baaa3234..55c5abd8676db 100644
--- a/jstests/core/stage_debug/stages_and_sorted.js
+++ b/jstests/core/stage_debug/stages_and_sorted.js
@@ -6,7 +6,7 @@
// no_selinux,
// ]
-t = db.stages_and_sorted;
+let t = db.stages_and_sorted;
t.drop();
var collname = "stages_and_sorted";
@@ -31,7 +31,7 @@ t.createIndex({bar: 1});
t.createIndex({baz: 1});
// Scan foo == 1
-ixscan1 = {
+let ixscan1 = {
ixscan: {
args: {
name: "stages_and_sorted",
@@ -46,7 +46,7 @@ ixscan1 = {
};
// Scan bar == 1
-ixscan2 = {
+let ixscan2 = {
ixscan: {
args: {
name: "stages_and_sorted",
@@ -61,7 +61,7 @@ ixscan2 = {
};
// Scan baz == 12
-ixscan3 = {
+let ixscan3 = {
ixscan: {
args: {
name: "stages_and_sorted",
@@ -76,18 +76,14 @@ ixscan3 = {
};
// Intersect foo==1 with bar==1 with baz==12.
-andix1ix2 = {
- andSorted: {args: {nodes: [ixscan1, ixscan2, ixscan3]}}
-};
-res = db.runCommand({stageDebug: {collection: collname, plan: andix1ix2}});
+let andix1ix2 = {andSorted: {args: {nodes: [ixscan1, ixscan2, ixscan3]}}};
+let res = db.runCommand({stageDebug: {collection: collname, plan: andix1ix2}});
printjson(res);
assert.eq(res.ok, 1);
assert.eq(res.results.length, N);
// Might as well make sure that hashed does the same thing.
-andix1ix2hash = {
- andHash: {args: {nodes: [ixscan1, ixscan2, ixscan3]}}
-};
+let andix1ix2hash = {andHash: {args: {nodes: [ixscan1, ixscan2, ixscan3]}}};
res = db.runCommand({stageDebug: {collection: collname, plan: andix1ix2hash}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, N);
diff --git a/jstests/core/stage_debug/stages_collection_scan.js b/jstests/core/stage_debug/stages_collection_scan.js
index 08b3eb2eeb63a..ce072ad68b4ec 100644
--- a/jstests/core/stage_debug/stages_collection_scan.js
+++ b/jstests/core/stage_debug/stages_collection_scan.js
@@ -8,7 +8,7 @@
// ]
// Test basic query stage collection scan functionality.
-t = db.stages_collection_scan;
+let t = db.stages_collection_scan;
t.drop();
var collname = "stages_collection_scan";
@@ -17,37 +17,29 @@ for (var i = 0; i < N; ++i) {
t.insert({foo: i});
}
-forward = {
- cscan: {args: {direction: 1}}
-};
-res = db.runCommand({stageDebug: {collection: collname, plan: forward}});
+let forward = {cscan: {args: {direction: 1}}};
+let res = db.runCommand({stageDebug: {collection: collname, plan: forward}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, N);
assert.eq(res.results[0].foo, 0);
assert.eq(res.results[49].foo, 49);
// And, backwards.
-backward = {
- cscan: {args: {direction: -1}}
-};
+let backward = {cscan: {args: {direction: -1}}};
res = db.runCommand({stageDebug: {collection: collname, plan: backward}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, N);
assert.eq(res.results[0].foo, 49);
assert.eq(res.results[49].foo, 0);
-forwardFiltered = {
- cscan: {args: {direction: 1}, filter: {foo: {$lt: 25}}}
-};
+let forwardFiltered = {cscan: {args: {direction: 1}, filter: {foo: {$lt: 25}}}};
res = db.runCommand({stageDebug: {collection: collname, plan: forwardFiltered}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 25);
assert.eq(res.results[0].foo, 0);
assert.eq(res.results[24].foo, 24);
-backwardFiltered = {
- cscan: {args: {direction: -1}, filter: {foo: {$lt: 25}}}
-};
+let backwardFiltered = {cscan: {args: {direction: -1}, filter: {foo: {$lt: 25}}}};
res = db.runCommand({stageDebug: {collection: collname, plan: backwardFiltered}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 25);
diff --git a/jstests/core/stage_debug/stages_fetch.js b/jstests/core/stage_debug/stages_fetch.js
index bdec393ce1c93..30a7ef61ac6fb 100644
--- a/jstests/core/stage_debug/stages_fetch.js
+++ b/jstests/core/stage_debug/stages_fetch.js
@@ -7,7 +7,7 @@
// ]
// Test basic fetch functionality.
-t = db.stages_fetch;
+let t = db.stages_fetch;
t.drop();
var collname = "stages_fetch";
@@ -20,7 +20,7 @@ t.createIndex({foo: 1});
// 20 <= foo <= 30
// bar == 25 (not covered, should error.)
-ixscan1 = {
+let ixscan1 = {
ixscan: {
args: {
keyPattern: {foo: 1},
@@ -33,11 +33,11 @@ ixscan1 = {
filter: {bar: 25}
}
};
-res = db.runCommand({stageDebug: {collection: collname, plan: ixscan1}});
+let res = db.runCommand({stageDebug: {collection: collname, plan: ixscan1}});
assert.eq(res.ok, 0);
// Now, add a fetch. We should be able to filter on the non-covered field since we fetched the obj.
-ixscan2 = {
+let ixscan2 = {
ixscan: {
args: {
keyPattern: {foo: 1},
@@ -49,9 +49,7 @@ ixscan2 = {
}
}
};
-fetch = {
- fetch: {args: {node: ixscan2}, filter: {bar: 25}}
-};
+let fetch = {fetch: {args: {node: ixscan2}, filter: {bar: 25}}};
res = db.runCommand({stageDebug: {collection: collname, plan: fetch}});
printjson(res);
assert.eq(res.ok, 1);
diff --git a/jstests/core/stage_debug/stages_ixscan.js b/jstests/core/stage_debug/stages_ixscan.js
index de345fabb02a8..eb4d3c641d1b6 100644
--- a/jstests/core/stage_debug/stages_ixscan.js
+++ b/jstests/core/stage_debug/stages_ixscan.js
@@ -7,7 +7,7 @@
// ]
// Test basic query stage index scan functionality.
-t = db.stages_ixscan;
+let t = db.stages_ixscan;
t.drop();
var collname = "stages_ixscan";
@@ -40,7 +40,7 @@ assert.commandFailed(db.runCommand({
}));
// foo <= 20
-ixscan1 = {
+let ixscan1 = {
ixscan: {
args: {
keyPattern: {foo: 1},
diff --git a/jstests/core/stage_debug/stages_limit_skip.js b/jstests/core/stage_debug/stages_limit_skip.js
index 623e403c50445..680ae9d682d30 100644
--- a/jstests/core/stage_debug/stages_limit_skip.js
+++ b/jstests/core/stage_debug/stages_limit_skip.js
@@ -7,7 +7,7 @@
// ]
// Test limit and skip
-t = db.stages_limit_skip;
+let t = db.stages_limit_skip;
t.drop();
var collname = "stages_limit_skip";
@@ -20,7 +20,7 @@ t.createIndex({foo: 1});
// foo <= 20, decreasing
// Limit of 5 results.
-ixscan1 = {
+let ixscan1 = {
ixscan: {
args: {
keyPattern: {foo: 1},
@@ -32,10 +32,8 @@ ixscan1 = {
}
}
};
-limit1 = {
- limit: {args: {node: ixscan1, num: 5}}
-};
-res = db.runCommand({stageDebug: {collection: collname, plan: limit1}});
+let limit1 = {limit: {args: {node: ixscan1, num: 5}}};
+let res = db.runCommand({stageDebug: {collection: collname, plan: limit1}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 5);
assert.eq(res.results[0].foo, 20);
@@ -43,9 +41,7 @@ assert.eq(res.results[4].foo, 16);
// foo <= 20, decreasing
// Skip 5 results.
-skip1 = {
- skip: {args: {node: ixscan1, num: 5}}
-};
+let skip1 = {skip: {args: {node: ixscan1, num: 5}}};
res = db.runCommand({stageDebug: {collection: collname, plan: skip1}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 16);
diff --git a/jstests/core/stage_debug/stages_mergesort.js b/jstests/core/stage_debug/stages_mergesort.js
index c202a3ca82729..b3203b98dacd0 100644
--- a/jstests/core/stage_debug/stages_mergesort.js
+++ b/jstests/core/stage_debug/stages_mergesort.js
@@ -7,7 +7,7 @@
// ]
// Test query stage merge sorting.
-t = db.stages_mergesort;
+let t = db.stages_mergesort;
t.drop();
var collname = "stages_mergesort";
@@ -22,7 +22,7 @@ t.createIndex({baz: 1, bar: 1});
// foo == 1
// We would (internally) use "": MinKey and "": MaxKey for the bar index bounds.
-ixscan1 = {
+let ixscan1 = {
ixscan: {
args: {
keyPattern: {foo: 1, bar: 1},
@@ -35,7 +35,7 @@ ixscan1 = {
}
};
// baz == 1
-ixscan2 = {
+let ixscan2 = {
ixscan: {
args: {
keyPattern: {baz: 1, bar: 1},
@@ -48,10 +48,8 @@ ixscan2 = {
}
};
-mergesort = {
- mergeSort: {args: {nodes: [ixscan1, ixscan2], pattern: {bar: 1}}}
-};
-res = db.runCommand({stageDebug: {plan: mergesort, collection: collname}});
+let mergesort = {mergeSort: {args: {nodes: [ixscan1, ixscan2], pattern: {bar: 1}}}};
+let res = db.runCommand({stageDebug: {plan: mergesort, collection: collname}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 2 * N);
assert.eq(res.results[0].bar, 0);
diff --git a/jstests/core/stage_debug/stages_or.js b/jstests/core/stage_debug/stages_or.js
index 6eb2e9812fb0b..3c674c5f0178d 100644
--- a/jstests/core/stage_debug/stages_or.js
+++ b/jstests/core/stage_debug/stages_or.js
@@ -7,7 +7,7 @@
// ]
// Test basic OR functionality
-t = db.stages_or;
+let t = db.stages_or;
t.drop();
var collname = "stages_or";
@@ -21,7 +21,7 @@ t.createIndex({bar: 1});
t.createIndex({baz: 1});
// baz >= 40
-ixscan1 = {
+let ixscan1 = {
ixscan: {
args: {
keyPattern: {baz: 1},
@@ -34,7 +34,7 @@ ixscan1 = {
}
};
// foo >= 40
-ixscan2 = {
+let ixscan2 = {
ixscan: {
args: {
keyPattern: {foo: 1},
@@ -48,17 +48,13 @@ ixscan2 = {
};
// OR of baz and foo. Baz == foo and we dedup.
-orix1ix2 = {
- or: {args: {nodes: [ixscan1, ixscan2], dedup: true}}
-};
-res = db.runCommand({stageDebug: {collection: collname, plan: orix1ix2}});
+let orix1ix2 = {or: {args: {nodes: [ixscan1, ixscan2], dedup: true}}};
+let res = db.runCommand({stageDebug: {collection: collname, plan: orix1ix2}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 10);
// No deduping, 2x the results.
-orix1ix2nodd = {
- or: {args: {nodes: [ixscan1, ixscan2], dedup: false}}
-};
+let orix1ix2nodd = {or: {args: {nodes: [ixscan1, ixscan2], dedup: false}}};
res = db.runCommand({stageDebug: {collection: collname, plan: orix1ix2nodd}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 20);
diff --git a/jstests/core/stage_debug/stages_sort.js b/jstests/core/stage_debug/stages_sort.js
index 2fd682edc3c4b..a79b64bca0abb 100644
--- a/jstests/core/stage_debug/stages_sort.js
+++ b/jstests/core/stage_debug/stages_sort.js
@@ -3,7 +3,7 @@
// @tags: [not_allowed_with_security_token]
if (false) {
- t = db.stages_sort;
+ let t = db.stages_sort;
t.drop();
var N = 50;
@@ -14,7 +14,7 @@ if (false) {
t.createIndex({foo: 1});
// Foo <= 20, descending.
- ixscan1 = {
+ let ixscan1 = {
ixscan: {
args: {
name: "stages_sort",
@@ -29,8 +29,8 @@ if (false) {
};
// Sort with foo ascending.
- sort1 = {sort: {args: {node: ixscan1, pattern: {foo: 1}}}};
- res = db.runCommand({stageDebug: sort1});
+ let sort1 = {sort: {args: {node: ixscan1, pattern: {foo: 1}}}};
+ let res = db.runCommand({stageDebug: sort1});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 21);
assert.eq(res.results[0].foo, 0);
diff --git a/jstests/core/string_with_nul_bytes.js b/jstests/core/string_with_nul_bytes.js
index e72cc0b6dc1e5..718a13646663d 100644
--- a/jstests/core/string_with_nul_bytes.js
+++ b/jstests/core/string_with_nul_bytes.js
@@ -1,9 +1,9 @@
// SERVER-6649 - issues round-tripping strings with embedded NUL bytes
-t = db.string_with_nul_bytes.js;
+let t = db.string_with_nul_bytes.js;
t.drop();
-string = "string with a NUL (\0) byte";
+let string = "string with a NUL (\0) byte";
t.insert({str: string});
assert.eq(t.findOne().str, string);
assert.eq(t.findOne().str.length, string.length); // just to be sure
diff --git a/jstests/core/sub1.js b/jstests/core/sub1.js
index d42677f32662a..f38c5ccc4a01e 100644
--- a/jstests/core/sub1.js
+++ b/jstests/core/sub1.js
@@ -1,16 +1,13 @@
// sub1.js
-t = db.sub1;
+let t = db.sub1;
t.drop();
-x = {
- a: 1,
- b: {c: {d: 2}}
-};
+let x = {a: 1, b: {c: {d: 2}}};
t.save(x);
-y = t.findOne();
+let y = t.findOne();
assert.eq(1, y.a);
assert.eq(2, y.b.c.d);
diff --git a/jstests/core/testminmax.js b/jstests/core/testminmax.js
index 3ebf0a13f3a36..712b00f5151c8 100644
--- a/jstests/core/testminmax.js
+++ b/jstests/core/testminmax.js
@@ -1,6 +1,6 @@
// @tags: [requires_fastcount]
-t = db.minmaxtest;
+let t = db.minmaxtest;
t.drop();
t.insert({
"_id": "IBM.N|00001264779918428889",
diff --git a/jstests/core/text_covered_matching.js b/jstests/core/text_covered_matching.js
index df4fced499ab1..446502e876370 100644
--- a/jstests/core/text_covered_matching.js
+++ b/jstests/core/text_covered_matching.js
@@ -12,10 +12,8 @@
// assumes_read_concern_local,
// ]
-load("jstests/libs/analyze_plan.js");
+import {getPlanStages, getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js";
-(function() {
-"use strict";
const coll = db.text_covered_matching;
coll.drop();
@@ -216,4 +214,3 @@ assert.gt(explainResult.executionStats.totalDocsExamined,
assert.eq(explainResult.executionStats.nReturned,
1,
"Unexpected number of results returned: " + tojson(explainResult));
-})();
diff --git a/jstests/core/timeseries/bucket_span_and_rounding_seconds.js b/jstests/core/timeseries/bucket_span_and_rounding_seconds.js
index 2b339b810c758..3a827938f4eb4 100644
--- a/jstests/core/timeseries/bucket_span_and_rounding_seconds.js
+++ b/jstests/core/timeseries/bucket_span_and_rounding_seconds.js
@@ -10,15 +10,12 @@
* requires_timeseries,
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db.getMongo())) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled.");
- return;
+ quit();
}
const testDB = db.getSiblingDB(jsTestName());
@@ -209,5 +206,4 @@ const verifyCreateCommandFails = function(secondsOptions = {}, errorCode) {
verifyCreateCommandFails(
{bucketRoundingSeconds: bucketingValueMax + 1, bucketMaxSpanSeconds: bucketingValueMax + 1},
idlInvalidValueError);
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/core/timeseries/bucket_unpacking_with_limit.js b/jstests/core/timeseries/bucket_unpacking_with_limit.js
new file mode 100644
index 0000000000000..de6649168b240
--- /dev/null
+++ b/jstests/core/timeseries/bucket_unpacking_with_limit.js
@@ -0,0 +1,160 @@
+/**
+ * Tests that the bucket unpacking with limit rewrite is performed and pushes the limit before
+ * unpacking all buckets, while ensuring no incorrect results are created
+
+ * @tags: [
+ * # This test depends on certain writes ending up in the same bucket. Stepdowns may result in
+ * # writes splitting between two primaries, and thus different buckets.
+ * does_not_support_stepdowns,
+ * # We need a timeseries collection.
+ * requires_timeseries,
+ * # Explain of a resolved view must be executed by mongos.
+ * directly_against_shardsvrs_incompatible,
+ * # This complicates aggregation extraction.
+ * do_not_wrap_aggregations_in_facets,
+ * # Refusing to run a test that issues an aggregation command with explain because it may
+ * # return incomplete results if interrupted by a stepdown.
+ * does_not_support_stepdowns,
+ * requires_fcv_71
+ * ]
+ */
+
+load("jstests/aggregation/extras/utils.js"); // For getExplainedPipelineFromAggregation
+
+const collName = "timeseries_bucket_unpacking_with_limit";
+const coll = db[collName];
+const metaCollName = "timeseries_bucket_unpacking_with_limit_meta";
+const metaColl = db[metaCollName];
+
+// Helper function to set up collections.
+const setupColl = (coll, collName, usesMeta) => {
+ coll.drop();
+
+ // If usesMeta is true, we want the collection to have a onlyMeta field
+ if (usesMeta) {
+ assert.commandWorked(
+ db.createCollection(collName, {timeseries: {timeField: "t", metaField: "m"}}));
+ } else {
+ assert.commandWorked(db.createCollection(collName, {timeseries: {timeField: "t"}}));
+ }
+ const bucketsColl = db.getCollection('system.buckets.' + collName);
+ assert.contains(bucketsColl.getName(), db.getCollectionNames());
+
+ let docs = [];
+ // If usesMeta is true, we push 10 documents with all different onlyMeta field. This tests the
+ // case when documents come from multiple different buckets. If usesMeta is false, we generate
+ // 20 documents that all go into the same bucket.
+ for (let i = 0; i < 10; ++i) {
+ if (usesMeta) {
+ docs.push({m: {"sensorId": i, "type": "temperature"}, t: new Date(i), _id: i});
+ } else {
+ docs.push({t: new Date(i), _id: i});
+ docs.push({t: new Date(i * 10), _id: i * 10});
+ }
+ }
+ assert.commandWorked(coll.insert(docs));
+ return docs;
+};
+
+// Helper function to check the PlanStage.
+const assertPlanStagesInPipeline =
+ ({pipeline, expectedStages, expectedResults = [], onlyMeta = false}) => {
+ // If onlyMeta is set to true, we only want to include the collection with onlyMeta field
+ // specified to ensure sort can be done on the onlyMeta field
+ var colls = onlyMeta ? [metaColl] : [coll, metaColl];
+ for (const c of colls) {
+ const aggRes = c.explain().aggregate(pipeline);
+ const planStage =
+ getExplainedPipelineFromAggregation(db, c, pipeline, {inhibitOptimization: false});
+ // We check index at i in the PlanStage against the i'th index in expectedStages
+ // Should rewrite [{$_unpack}, {$limit: x}] pipeline as [{$limit:
+ // x}, {$_unpack}, {$limit: x}]
+ assert(expectedStages.length == planStage.length);
+ for (var i = 0; i < expectedStages.length; i++) {
+ assert(planStage[i].hasOwnProperty(expectedStages[i]), tojson(aggRes));
+ }
+
+ if (expectedResults.length != 0) {
+ const result = c.aggregate(pipeline).toArray();
+ assert(expectedResults.length == result.length);
+ for (var i = 0; i < expectedResults.length; i++) {
+ assert.docEq(result[i], expectedResults[i], tojson(result));
+ }
+ }
+ }
+ };
+
+// Helper function to test correctness.
+const testLimitCorrectness = (size) => {
+ for (const c of [coll, metaColl]) {
+ const res = c.aggregate([{$limit: size}]).toArray();
+ const allElements = c.find().toArray();
+ // Checks that the result length is correct, and that each element is unique
+ assert.eq(res.length, Math.min(size, allElements.length), tojson(res));
+ assert.eq(res.length, new Set(res).size, tojson(res));
+ // checks that each element in the result is actually from the collection
+ for (var i = 0; i < res.length; i++) {
+ assert.contains(res[i], allElements, tojson(res));
+ }
+ }
+};
+
+setupColl(coll, collName, false);
+const metaDocs = setupColl(metaColl, metaCollName, true);
+
+// Simple limit test. Because the pushed down limit is in the PlanStage now,
+// getExplainedPipelineFromAggregation does not display it and we don't see the first limit / sort
+// stage. The presence of the pushed limit is tested in unit tests.
+assertPlanStagesInPipeline(
+ {pipeline: [{$limit: 2}], expectedStages: ["$_internalUnpackBucket", "$limit"]});
+// Test that when two limits are present, they get squashed into 1 taking limit of the smaller
+// (tighter) value
+assertPlanStagesInPipeline(
+ {pipeline: [{$limit: 2}, {$limit: 10}], expectedStages: ["$_internalUnpackBucket", "$limit"]});
+// Adding another stage after $limit to make sure that is also executed
+assertPlanStagesInPipeline({
+ pipeline: [{$limit: 2}, {$match: {"temp": 11}}],
+ expectedStages: ["$_internalUnpackBucket", "$limit", "$match"]
+});
+
+// Correctness test
+testLimitCorrectness(2);
+testLimitCorrectness(10);
+testLimitCorrectness(20);
+
+// Test that sort absorbs the limits following it.
+assertPlanStagesInPipeline({
+ pipeline: [{$sort: {'m.sensorId': 1}}, {$limit: 2}],
+ expectedStages: ["$_internalUnpackBucket", "$limit"],
+ expectedResults: [metaDocs[0], metaDocs[1]],
+ onlyMeta: true
+});
+assertPlanStagesInPipeline({
+ pipeline: [{$sort: {"m.sensorId": -1}}, {$limit: 10}, {$limit: 2}],
+ expectedStages: ["$_internalUnpackBucket", "$limit"],
+ expectedResults: [metaDocs[9], metaDocs[8]],
+ onlyMeta: true
+});
+assertPlanStagesInPipeline({
+ pipeline: [{$sort: {"m.sensorId": 1}}, {$limit: 10}, {$limit: 50}],
+ expectedStages: ["$_internalUnpackBucket", "$limit"],
+ expectedResults: [
+ metaDocs[0],
+ metaDocs[1],
+ metaDocs[2],
+ metaDocs[3],
+ metaDocs[4],
+ metaDocs[5],
+ metaDocs[6],
+ metaDocs[7],
+ metaDocs[8],
+ metaDocs[9]
+ ],
+ onlyMeta: true
+});
+// Test limit comes before sort.
+assertPlanStagesInPipeline({
+ pipeline: [{$limit: 2}, {$sort: {"m.sensorId": 1}}],
+ expectedStages: ["$_internalUnpackBucket", "$limit", "$sort"],
+ onlyMeta: true
+});
diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort.js b/jstests/core/timeseries/bucket_unpacking_with_sort.js
index 13248964ec830..1e7cdbc5194b1 100644
--- a/jstests/core/timeseries/bucket_unpacking_with_sort.js
+++ b/jstests/core/timeseries/bucket_unpacking_with_sort.js
@@ -14,18 +14,10 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-load("jstests/aggregation/extras/utils.js"); // For getExplainedPipelineFromAggregation.
-load("jstests/core/timeseries/libs/timeseries.js"); // For TimeseriesTest
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStage
-
-if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) {
- jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled.");
- return;
-}
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/aggregation/extras/utils.js"); // For getExplainedPipelineFromAggregation.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
const collName = "bucket_unpacking_with_sort";
const coll = db[collName];
@@ -665,4 +657,3 @@ runDoesntRewriteTest({t: 1},
runDoesntRewriteTest({t: 1}, null, {m: 1, t: 1}, csStringColl, [{$match: {m: 'a'}}]);
runDoesntRewriteTest({t: 1}, null, {m: 1, t: 1}, ciStringColl, [{$match: {m: 'a'}}]);
}
-})();
diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort_extended_range.js b/jstests/core/timeseries/bucket_unpacking_with_sort_extended_range.js
index ad4f6cf868986..ae3b0dc262b34 100644
--- a/jstests/core/timeseries/bucket_unpacking_with_sort_extended_range.js
+++ b/jstests/core/timeseries/bucket_unpacking_with_sort_extended_range.js
@@ -15,17 +15,8 @@
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For getExplainedPipelineFromAggregation.
-load("jstests/core/timeseries/libs/timeseries.js");
-load('jstests/libs/analyze_plan.js');
-
-if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) {
- jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled.");
- return;
-}
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
const timeFieldName = "t";
@@ -262,4 +253,3 @@ function runTest(ascending) {
runTest(false); // descending
runTest(true); // ascending
-})();
diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort_plan_cache.js b/jstests/core/timeseries/bucket_unpacking_with_sort_plan_cache.js
index cca7187ff61d9..05dc167c7882d 100644
--- a/jstests/core/timeseries/bucket_unpacking_with_sort_plan_cache.js
+++ b/jstests/core/timeseries/bucket_unpacking_with_sort_plan_cache.js
@@ -22,18 +22,13 @@
* tenant_migration_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/profiler.js"); // For getLatestProfileEntry.
-load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-load("jstests/libs/analyze_plan.js"); // For planHasStage.
-load("jstests/core/timeseries/libs/timeseries.js"); // For TimeseriesTest.
-
-if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) {
- jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled.");
- return;
-}
+load("jstests/libs/profiler.js"); // For getLatestProfileEntry.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+import {
+ getAggPlanStages,
+ getAggPlanStage,
+ getPlanCacheKeyFromExplain
+} from "jstests/libs/analyze_plan.js";
const fields = ["a", "b", "i"];
@@ -146,4 +141,3 @@ for (const sortDirection of [-1, 1]) {
testBoundedSorterPlanCache(sortDirection, indexDirection);
}
}
-})();
diff --git a/jstests/core/timeseries/clustered_index_crud.js b/jstests/core/timeseries/clustered_index_crud.js
index ae5a3c528611d..be5eb35e0e0e8 100644
--- a/jstests/core/timeseries/clustered_index_crud.js
+++ b/jstests/core/timeseries/clustered_index_crud.js
@@ -6,11 +6,6 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-
const collName = 'system.buckets.clustered_index_crud';
const coll = db[collName];
coll.drop();
@@ -47,5 +42,4 @@ assert.eq(1, coll.find({_id: null}).itcount());
assert.commandWorked(coll.insert({_id: 'x'.repeat(100), a: 10}));
assert.commandWorked(coll.createIndex({a: 1}));
-assert.commandWorked(coll.dropIndex({a: 1}));
-})();
+assert.commandWorked(coll.dropIndex({a: 1}));
\ No newline at end of file
diff --git a/jstests/core/timeseries/libs/geo.js b/jstests/core/timeseries/libs/geo.js
index 3c3e5b6db919e..90d59f8aa5096 100644
--- a/jstests/core/timeseries/libs/geo.js
+++ b/jstests/core/timeseries/libs/geo.js
@@ -22,4 +22,4 @@ function randomLongLat() {
const lat = latRadians * 180 / Math.PI;
return [long, lat];
}
-}
\ No newline at end of file
+}
diff --git a/jstests/core/timeseries/libs/timeseries.js b/jstests/core/timeseries/libs/timeseries.js
index ed82d6cbda667..03b3640249a7a 100644
--- a/jstests/core/timeseries/libs/timeseries.js
+++ b/jstests/core/timeseries/libs/timeseries.js
@@ -2,10 +2,10 @@
// The test runs commands that are not allowed with security token: movechunk, split.
// @tags: [not_allowed_with_security_token]
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load("jstests/aggregation/extras/utils.js");
-var TimeseriesTest = class {
+export var TimeseriesTest = class {
static getBucketMaxSpanSecondsFromGranularity(granularity) {
switch (granularity) {
case 'seconds':
@@ -40,27 +40,14 @@ var TimeseriesTest = class {
return FeatureFlagUtil.isPresentAndEnabled(conn, "TimeseriesScalabilityImprovements");
}
- /**
- * Returns whether sharded time-series updates and deletes are supported.
- * TODO SERVER-69320 remove this helper.
- */
- static shardedTimeseriesUpdatesAndDeletesEnabled(conn) {
- return FeatureFlagUtil.isPresentAndEnabled(conn, "ShardedTimeSeriesUpdateDelete");
- }
-
- // TODO SERVER-69320 remove this helper.
- static shardedtimeseriesCollectionsEnabled(conn) {
- return FeatureFlagUtil.isPresentAndEnabled(conn, "ShardedTimeSeries");
- }
-
// TODO SERVER-65082 remove this helper.
static timeseriesMetricIndexesEnabled(conn) {
return FeatureFlagUtil.isPresentAndEnabled(conn, "TimeseriesMetricIndexes");
}
- // TODO SERVER-69324 remove this helper.
- static bucketUnpackWithSortEnabled(conn) {
- return FeatureFlagUtil.isPresentAndEnabled(conn, "BucketUnpackWithSort");
+ // TODO SERVER-68058 remove this helper.
+ static arbitraryUpdatesEnabled(conn) {
+ return FeatureFlagUtil.isPresentAndEnabled(conn, "TimeseriesUpdatesSupport");
}
/**
diff --git a/jstests/core/timeseries/libs/timeseries_agg_helpers.js b/jstests/core/timeseries/libs/timeseries_agg_helpers.js
index a2a2f74393fbf..f3d549bc22a81 100644
--- a/jstests/core/timeseries/libs/timeseries_agg_helpers.js
+++ b/jstests/core/timeseries/libs/timeseries_agg_helpers.js
@@ -1,9 +1,9 @@
-load("jstests/core/timeseries/libs/timeseries.js");
-
/**
* Helper class for aggregate tests with time-series collection.
*/
-var TimeseriesAggTests = class {
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+
+export var TimeseriesAggTests = class {
/**
* Gets a test db object based on the test suite name.
*/
@@ -20,14 +20,15 @@ var TimeseriesAggTests = class {
* @returns An array of a time-series collection and a non time-series collection,
* respectively in this order.
*/
- static prepareInputCollections(numHosts, numIterations, includeIdleMeasurements = true) {
+ static prepareInputCollections(numHosts,
+ numIterations,
+ includeIdleMeasurements = true,
+ testDB = TimeseriesAggTests.getTestDb()) {
const timeseriesCollOption = {timeseries: {timeField: "time", metaField: "tags"}};
Random.setRandomSeed();
const hosts = TimeseriesTest.generateHosts(numHosts);
- const testDB = TimeseriesAggTests.getTestDb();
-
// Creates a time-series input collection.
const inColl = testDB.getCollection("in");
inColl.drop();
@@ -37,7 +38,7 @@ var TimeseriesAggTests = class {
const observerInColl = testDB.getCollection("observer_in");
observerInColl.drop();
assert.commandWorked(testDB.createCollection(observerInColl.getName()));
- const currTime = new Date();
+ let currTime = new Date();
// Inserts exactly the same random measurement to both inColl and observerInColl.
for (let i = 0; i < numIterations; i++) {
@@ -45,7 +46,7 @@ var TimeseriesAggTests = class {
const userUsage = TimeseriesTest.getRandomUsage();
let newMeasurement = {
tags: host.tags,
- time: new Date(currTime + i),
+ time: new Date(currTime++),
usage_guest: TimeseriesTest.getRandomUsage(),
usage_guest_nice: TimeseriesTest.getRandomUsage(),
usage_idle: TimeseriesTest.getRandomUsage(),
@@ -63,7 +64,7 @@ var TimeseriesAggTests = class {
if (includeIdleMeasurements && (i % 2)) {
let idleMeasurement = {
tags: host.tags,
- time: new Date(currTime + i),
+ time: new Date(currTime++),
idle_user: 100 - userUsage
};
assert.commandWorked(inColl.insert(idleMeasurement));
@@ -78,11 +79,11 @@ var TimeseriesAggTests = class {
/**
* Gets an output collection object with the name 'outCollname'.
*/
- static getOutputCollection(outCollName) {
- const testDB = TimeseriesAggTests.getTestDb();
-
+ static getOutputCollection(outCollName, shouldDrop, testDB = TimeseriesAggTests.getTestDb()) {
let outColl = testDB.getCollection(outCollName);
- outColl.drop();
+ if (shouldDrop) {
+ outColl.drop();
+ }
return outColl;
}
@@ -96,21 +97,35 @@ var TimeseriesAggTests = class {
* Executes 'prepareAction' before executing 'pipeline'. 'prepareAction' takes a collection
* parameter and returns nothing.
*
+ * If 'shouldDrop' is set to false, the output collection will not be dropped before executing
+ * 'pipeline'.
+ *
+ * If 'testDB' is set, that database will be used in the aggregation pipeline.
+ *
* Returns sorted data by "time" field. The sorted result data will help simplify comparison
* logic.
*/
- static getOutputAggregateResults(inColl, pipeline, prepareAction = null) {
+ static getOutputAggregateResults(inColl,
+ pipeline,
+ prepareAction = null,
+ shouldDrop = true,
+ testDB = TimeseriesAggTests.getTestDb()) {
// Figures out the output collection name from the last pipeline stage.
var outCollName = "out";
if (pipeline[pipeline.length - 1]["$out"] != undefined) {
- // If the last stage is "$out", gets the output collection name from it.
- outCollName = pipeline[pipeline.length - 1]["$out"];
+ // If the last stage is "$out", gets the output collection name from the string or
+ // object input.
+ if (typeof pipeline[pipeline.length - 1]["$out"] == 'string') {
+ outCollName = pipeline[pipeline.length - 1]["$out"];
+ } else {
+ outCollName = pipeline[pipeline.length - 1]["$out"]["coll"];
+ }
} else if (pipeline[pipeline.length - 1]["$merge"] != undefined) {
// If the last stage is "$merge", gets the output collection name from it.
outCollName = pipeline[pipeline.length - 1]["$merge"].into;
}
- let outColl = TimeseriesAggTests.getOutputCollection(outCollName);
+ let outColl = TimeseriesAggTests.getOutputCollection(outCollName, shouldDrop, testDB);
if (prepareAction != null) {
prepareAction(outColl);
}
@@ -122,4 +137,22 @@ var TimeseriesAggTests = class {
.sort({"time": 1})
.toArray();
}
+
+ static verifyResults(actualResults, expectedResults) {
+ // Verifies that the number of measurements is same as expected.
+ assert.eq(actualResults.length, expectedResults.length, actualResults);
+
+ // Verifies that every measurement is same as expected.
+ for (var i = 0; i < expectedResults.length; ++i) {
+ assert.eq(actualResults[i], expectedResults[i], actualResults);
+ }
+ }
+
+ static generateOutPipeline(collName, dbName, options, aggStage = null) {
+ let outStage = {$out: {db: dbName, coll: collName, timeseries: options}};
+ if (aggStage) {
+ return [aggStage, outStage];
+ }
+ return [outStage];
+ }
};
diff --git a/jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js b/jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js
index 200b257084ca0..af28500c152a3 100644
--- a/jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js
+++ b/jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js
@@ -3,19 +3,19 @@
*/
load("jstests/aggregation/extras/utils.js");
-load("jstests/core/timeseries/libs/timeseries_agg_helpers.js");
-load("jstests/libs/analyze_plan.js");
+import {TimeseriesAggTests} from "jstests/core/timeseries/libs/timeseries_agg_helpers.js";
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
// These are functions instead of const variables to avoid tripping up the parallel jstests.
-function getEquivalentStrings() {
+export function getEquivalentStrings() {
return ['a', 'A', 'b', 'B'];
}
-function getEquivalentNumbers() {
+export function getEquivalentNumbers() {
return [7, NumberInt(7), NumberLong(7), NumberDecimal(7)];
}
-function verifyLastpoint({tsColl, observerColl, pipeline, precedingFilter, expectStage}) {
+export function verifyLastpoint({tsColl, observerColl, pipeline, precedingFilter, expectStage}) {
// Verify lastpoint optmization.
const explain = tsColl.explain().aggregate(pipeline);
expectStage({explain, precedingFilter});
@@ -26,7 +26,7 @@ function verifyLastpoint({tsColl, observerColl, pipeline, precedingFilter, expec
assertArrayEq({actual, expected});
}
-function createBoringCollections(includeIdleMeasurements = false) {
+export function createBoringCollections(includeIdleMeasurements = false) {
// Prepare collections. Note: we usually test without idle measurements (all meta subfields are
// non-null). If we allow the insertion of idle measurements, we will obtain multiple lastpoints
// per bucket, and may have different results on the observer and timeseries collections.
@@ -37,7 +37,7 @@ function createBoringCollections(includeIdleMeasurements = false) {
}
// Generate interesting values.
-function generateInterestingValues() {
+export function generateInterestingValues() {
const epoch = ISODate('1970-01-01');
// Pick values with interesting equality behavior.
@@ -94,7 +94,7 @@ function generateInterestingValues() {
return docs;
}
-function getMapInterestingValuesToEquivalentsStage() {
+export function getMapInterestingValuesToEquivalentsStage() {
const firstElemInId = {$arrayElemAt: ["$_id", 0]};
const isIdArray = {$isArray: "$_id"};
const equivalentStrings = getEquivalentStrings();
@@ -135,7 +135,7 @@ function getMapInterestingValuesToEquivalentsStage() {
};
}
-function createInterestingCollections() {
+export function createInterestingCollections() {
const testDB = TimeseriesAggTests.getTestDb();
const collation = {locale: 'en_US', strength: 2};
@@ -164,7 +164,7 @@ function createInterestingCollections() {
return [tsColl, observerColl];
}
-function expectDistinctScan({explain}) {
+export function expectDistinctScan({explain}) {
// The query can utilize DISTINCT_SCAN.
assert.neq(getAggPlanStage(explain, "DISTINCT_SCAN"), null, explain);
@@ -172,7 +172,7 @@ function expectDistinctScan({explain}) {
assert.eq(getAggPlanStage(explain, "SORT"), null, explain);
}
-function expectCollScan({explain, precedingFilter, noSortInCursor}) {
+export function expectCollScan({explain, precedingFilter, noSortInCursor}) {
if (noSortInCursor) {
// We need a separate sort stage.
assert.eq(getAggPlanStage(explain, "SORT"), null, explain);
@@ -189,7 +189,7 @@ function expectCollScan({explain, precedingFilter, noSortInCursor}) {
}
}
-function expectIxscan({explain, noSortInCursor}) {
+export function expectIxscan({explain, noSortInCursor}) {
if (noSortInCursor) {
// We can rely on the index without a cursor $sort.
assert.eq(getAggPlanStage(explain, "SORT"), null, explain);
@@ -209,7 +209,7 @@ function expectIxscan({explain, noSortInCursor}) {
3. Lastpoint queries on indexes with ascending time and $last/$bottom and an additional
secondary index so that we can use the DISTINCT_SCAN optimization.
*/
-function testAllTimeMetaDirections(tsColl, observerColl, getTestCases) {
+export function testAllTimeMetaDirections(tsColl, observerColl, getTestCases) {
const testDB = TimeseriesAggTests.getTestDb();
const testCases = [
{time: -1, useBucketsIndex: false},
diff --git a/jstests/core/timeseries/libs/timeseries_writes_util.js b/jstests/core/timeseries/libs/timeseries_writes_util.js
new file mode 100644
index 0000000000000..4b6c345372dd4
--- /dev/null
+++ b/jstests/core/timeseries/libs/timeseries_writes_util.js
@@ -0,0 +1,1032 @@
+/**
+ * Helpers for testing timeseries arbitrary writes.
+ */
+
+import {getExecutionStages, getPlanStage} from "jstests/libs/analyze_plan.js";
+
+export const timeFieldName = "time";
+export const metaFieldName = "tag";
+export const sysCollNamePrefix = "system.buckets.";
+
+export const closedBucketFilter = {
+ "control.closed": {$not: {$eq: true}}
+};
+
+// The split point is between the 'A' and 'B' meta values which is _id: 4. [1, 3] goes to the
+// primary shard and [4, 7] goes to the other shard.
+export const splitMetaPointBetweenTwoShards = {
+ meta: "B"
+};
+
+// This split point is the same as the 'splitMetaPointBetweenTwoShards'.
+export const splitTimePointBetweenTwoShards = {
+ [`control.min.${timeFieldName}`]: ISODate("2003-06-30")
+};
+
+export function generateTimeValue(index) {
+ return ISODate(`${2000 + index}-01-01`);
+}
+
+// Defines sample data set for testing.
+export const doc1_a_nofields = {
+ _id: 1,
+ [timeFieldName]: generateTimeValue(1),
+ [metaFieldName]: "A",
+};
+
+export const doc2_a_f101 = {
+ _id: 2,
+ [timeFieldName]: generateTimeValue(2),
+ [metaFieldName]: "A",
+ f: 101
+};
+
+export const doc3_a_f102 = {
+ _id: 3,
+ [timeFieldName]: generateTimeValue(3),
+ [metaFieldName]: "A",
+ f: 102
+};
+
+export const doc4_b_f103 = {
+ _id: 4,
+ [timeFieldName]: generateTimeValue(4),
+ [metaFieldName]: "B",
+ f: 103
+};
+
+export const doc5_b_f104 = {
+ _id: 5,
+ [timeFieldName]: generateTimeValue(5),
+ [metaFieldName]: "B",
+ f: 104
+};
+
+export const doc6_c_f105 = {
+ _id: 6,
+ [timeFieldName]: generateTimeValue(6),
+ [metaFieldName]: "C",
+ f: 105
+};
+
+export const doc7_c_f106 = {
+ _id: 7,
+ [timeFieldName]: generateTimeValue(7),
+ [metaFieldName]: "C",
+ f: 106,
+};
+
+export let testDB = null;
+export let st = null;
+export let primaryShard = null;
+export let otherShard = null;
+export let mongos0DB = null;
+export let mongos1DB = null;
+
+/**
+ * Composes and returns a bucket-level filter for timeseries arbitrary writes.
+ *
+ * The bucket-level filter is composed of the closed bucket filter and the given filter(s) which
+ * are ANDed together. The closed bucket filter is always the first element of the AND array.
+ * Zero or more filters can be passed in as arguments.
+ */
+export function makeBucketFilter(...args) {
+ if (!args.length) {
+ return closedBucketFilter;
+ }
+
+ return {$and: [closedBucketFilter].concat(Array.from(args))};
+}
+
+export function getTestDB() {
+ if (!testDB) {
+ testDB = db.getSiblingDB(jsTestName());
+ assert.commandWorked(testDB.dropDatabase());
+ }
+ return testDB;
+}
+
+export function prepareCollection({dbToUse, collName, initialDocList}) {
+ if (!dbToUse) {
+ dbToUse = getTestDB();
+ }
+ const coll = dbToUse.getCollection(collName);
+ coll.drop();
+ assert.commandWorked(dbToUse.createCollection(
+ coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
+ assert.commandWorked(coll.insert(initialDocList));
+
+ return coll;
+}
+
+export function prepareShardedCollection(
+ {dbToUse, collName, initialDocList, includeMeta = true, shardKey, splitPoint}) {
+ if (!dbToUse) {
+ assert.neq(
+ null, testDB, "testDB must be initialized before calling prepareShardedCollection");
+ dbToUse = testDB;
+ }
+
+ const coll = dbToUse.getCollection(collName);
+ const sysCollName = sysCollNamePrefix + coll.getName();
+ coll.drop();
+
+ const tsOptions = includeMeta
+ ? {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}
+ : {timeseries: {timeField: timeFieldName}};
+ assert.commandWorked(dbToUse.createCollection(coll.getName(), tsOptions));
+ assert.commandWorked(coll.insert(initialDocList));
+
+ if (!shardKey) {
+ shardKey = includeMeta ? {[metaFieldName]: 1} : {[timeFieldName]: 1};
+ }
+ assert.commandWorked(coll.createIndex(shardKey));
+ assert.commandWorked(
+ dbToUse.adminCommand({shardCollection: coll.getFullName(), key: shardKey}));
+
+ if (!splitPoint) {
+ splitPoint = includeMeta ? splitMetaPointBetweenTwoShards : splitTimePointBetweenTwoShards;
+ }
+ // [MinKey, splitPoint) and [splitPoint, MaxKey) are the two chunks after the split.
+ assert.commandWorked(
+ dbToUse.adminCommand({split: dbToUse[sysCollName].getFullName(), middle: splitPoint}));
+
+ assert.commandWorked(dbToUse.adminCommand({
+ moveChunk: dbToUse[sysCollName].getFullName(),
+ find: splitPoint,
+ to: otherShard.shardName,
+ _waitForDelete: true
+ }));
+
+ return coll;
+}
+
+export function makeFindOneAndRemoveCommand(coll, filter, fields, sort, collation) {
+ let findAndModifyCmd = {findAndModify: coll.getName(), query: filter, remove: true};
+ if (fields) {
+ findAndModifyCmd["fields"] = fields;
+ }
+ if (sort) {
+ findAndModifyCmd["sort"] = sort;
+ }
+ if (collation) {
+ findAndModifyCmd["collation"] = collation;
+ }
+
+ return findAndModifyCmd;
+}
+
+export function makeFindOneAndUpdateCommand(
+ coll, filter, update, returnNew, upsert, fields, sort, collation) {
+ assert(filter !== undefined && update !== undefined);
+ let findAndModifyCmd = {findAndModify: coll.getName(), query: filter, update: update};
+ if (returnNew !== undefined) {
+ findAndModifyCmd["new"] = returnNew;
+ }
+ if (upsert !== undefined) {
+ findAndModifyCmd["upsert"] = upsert;
+ }
+ if (fields !== undefined) {
+ findAndModifyCmd["fields"] = fields;
+ }
+ if (sort !== undefined) {
+ findAndModifyCmd["sort"] = sort;
+ }
+ if (collation !== undefined) {
+ findAndModifyCmd["collation"] = collation;
+ }
+
+ return findAndModifyCmd;
+}
+
+/**
+ * Returns the name of the caller of the function that called this function using the stack trace.
+ *
+ * This is useful for generating unique collection names. If the return function name is not unique
+ * and the caller needs to generate a unique collection name, the caller can append a unique suffix.
+ */
+export function getCallerName(callDepth = 2) {
+ return `${new Error().stack.split('\n')[callDepth].split('@')[0]}`;
+}
+
+export function verifyResultDocs(coll, initialDocList, expectedResultDocs, nDeleted) {
+ let resultDocs = coll.find().toArray();
+ assert.eq(resultDocs.length, initialDocList.length - nDeleted, tojson(resultDocs));
+
+ // Validate the collection's exact contents if we were given the expected results. We may skip
+ // this step in some cases, if the delete doesn't pinpoint a specific document.
+ if (expectedResultDocs) {
+ assert.eq(expectedResultDocs.length, resultDocs.length, tojson(resultDocs));
+ assert.sameMembers(expectedResultDocs, resultDocs, tojson(resultDocs));
+ }
+}
+
+export function verifyExplain({
+ explain,
+ rootStageName,
+ opType,
+ bucketFilter,
+ residualFilter,
+ nBucketsUnpacked,
+ nReturned,
+ nMatched,
+ nModified,
+ nUpserted,
+}) {
+ jsTestLog(`Explain: ${tojson(explain)}`);
+ assert(opType === "updateOne" || opType === "deleteOne" || opType === "updateMany" ||
+ opType === "deleteMany");
+
+ if (!rootStageName) {
+ rootStageName = "TS_MODIFY";
+ }
+ assert("PROJECTION_DEFAULT" === rootStageName || "TS_MODIFY" === rootStageName,
+ "Only PROJECTION_DEFAULT or TS_MODIFY is allowed");
+
+ let foundStage = getPlanStage(explain.queryPlanner.winningPlan, rootStageName);
+ assert.neq(null,
+ foundStage,
+ `The root ${rootStageName} stage not found in the plan: ${tojson(explain)}`);
+ if (rootStageName === "PROJECTION_DEFAULT") {
+ assert.eq("TS_MODIFY",
+ foundStage.inputStage.stage,
+ `TS_MODIFY is not a child of ${rootStageName} in the plan: ${tojson(explain)}`);
+ foundStage = foundStage.inputStage;
+ }
+
+ assert.eq(opType, foundStage.opType, `TS_MODIFY opType is wrong: ${tojson(foundStage)}`);
+ assert.eq(bucketFilter,
+ foundStage.bucketFilter,
+ `TS_MODIFY bucketFilter is wrong: ${tojson(foundStage)}`);
+ assert.eq(residualFilter,
+ foundStage.residualFilter,
+ `TS_MODIFY residualFilter is wrong: ${tojson(foundStage)}`);
+
+ const execStages = getExecutionStages(explain);
+ assert.eq(rootStageName, execStages[0].stage, `The root stage is wrong: ${tojson(execStages)}`);
+ let tsModifyStage = execStages[0];
+ if (tsModifyStage.stage === "PROJECTION_DEFAULT") {
+ tsModifyStage = tsModifyStage.inputStage;
+ }
+ assert.eq(
+ "TS_MODIFY", tsModifyStage.stage, `Can't find TS_MODIFY stage: ${tojson(execStages)}`);
+
+ if (nBucketsUnpacked !== undefined) {
+ assert.eq(nBucketsUnpacked,
+ tsModifyStage.nBucketsUnpacked,
+ `Got wrong nBucketsUnpacked ${tojson(tsModifyStage)}`);
+ }
+ if (nReturned !== undefined) {
+ assert.eq(
+ nReturned, tsModifyStage.nReturned, `Got wrong nReturned ${tojson(tsModifyStage)}`);
+ }
+ if (nMatched !== undefined) {
+ assert.eq(nMatched,
+ tsModifyStage.nMeasurementsMatched,
+ `Got wrong nMeasurementsMatched ${tojson(tsModifyStage)}`);
+ }
+ if (nModified !== undefined) {
+ if (opType.startsWith("update")) {
+ assert.eq(nModified,
+ tsModifyStage.nMeasurementsUpdated,
+ `Got wrong nMeasurementsModified ${tojson(tsModifyStage)}`);
+ } else {
+ assert.eq(nModified,
+ tsModifyStage.nMeasurementsDeleted,
+ `Got wrong nMeasurementsModified ${tojson(tsModifyStage)}`);
+ }
+ }
+ if (nUpserted !== undefined) {
+ assert.eq(nUpserted,
+ tsModifyStage.nMeasurementsUpserted,
+ `Got wrong nMeasurementsUpserted ${tojson(tsModifyStage)}`);
+ }
+}
+
+/**
+ * Verifies that a deleteOne returns the expected set of documents.
+ *
+ * - initialDocList: The initial documents in the collection.
+ * - filter: The filter for the deleteOne command.
+ * - expectedResultDocs: The expected documents in the collection after the delete.
+ * - nDeleted: The expected number of documents deleted.
+ */
+export function testDeleteOne({initialDocList, filter, expectedResultDocs, nDeleted}) {
+ const callerName = getCallerName();
+ jsTestLog(`Running ${callerName}(${tojson(arguments[0])})`);
+
+ const coll = prepareCollection({collName: callerName, initialDocList: initialDocList});
+
+ const res = assert.commandWorked(coll.deleteOne(filter));
+ assert.eq(nDeleted, res.deletedCount);
+
+ verifyResultDocs(coll, initialDocList, expectedResultDocs, nDeleted);
+}
+
+export function getBucketCollection(coll) {
+ return coll.getDB()[sysCollNamePrefix + coll.getName()];
+}
+
+/**
+ * Ensure the updateOne command operates correctly by examining documents after the update.
+ */
+export function testUpdateOne({
+ initialDocList,
+ updateQuery,
+ updateObj,
+ c,
+ resultDocList,
+ nMatched,
+ nModified = nMatched,
+ upsert = false,
+ upsertedDoc,
+ failCode
+}) {
+ const collName = getCallerName();
+ jsTestLog(`Running ${collName}(${tojson(arguments[0])})`);
+
+ const testDB = getTestDB();
+ const coll = testDB.getCollection(collName);
+ prepareCollection({collName, initialDocList});
+
+ let upd = {q: updateQuery, u: updateObj, multi: false, upsert: upsert};
+ if (c) {
+ upd["c"] = c;
+ upd["upsertSupplied"] = true;
+ }
+ const updateCommand = {
+ update: coll.getName(),
+ updates: [upd],
+ };
+
+ const res = failCode ? assert.commandFailedWithCode(testDB.runCommand(updateCommand), failCode)
+ : assert.commandWorked(testDB.runCommand(updateCommand));
+ if (!failCode) {
+ if (upsertedDoc) {
+ assert.eq(1, res.n, tojson(res));
+ assert.eq(0, res.nModified, tojson(res));
+ assert(res.hasOwnProperty("upserted"), tojson(res));
+ assert.eq(1, res.upserted.length);
+
+ if (upsertedDoc.hasOwnProperty("_id")) {
+ assert.eq(upsertedDoc._id, res.upserted[0]._id);
+ } else {
+ upsertedDoc["_id"] = res.upserted[0]._id;
+ }
+ resultDocList.push(upsertedDoc);
+ } else {
+ assert.eq(nMatched, res.n, tojson(res));
+ assert.eq(nModified, res.nModified, tojson(res));
+ assert(!res.hasOwnProperty("upserted"), tojson(res));
+ }
+ }
+
+ if (resultDocList) {
+ assert.sameMembers(resultDocList,
+ coll.find().toArray(),
+ "Collection contents did not match expected after update");
+ }
+}
+
+/**
+ * Verifies that a findAndModify remove returns the expected result(s) 'res'.
+ *
+ * - initialDocList: The initial documents in the collection.
+ * - cmd.filter: The filter for the findAndModify command.
+ * - cmd.fields: The projection for the findAndModify command.
+ * - cmd.sort: The sort option for the findAndModify command.
+ * - cmd.collation: The collation option for the findAndModify command.
+ * - res.errorCode: If errorCode is set, we expect the command to fail with that code and other
+ * fields of 'res' are ignored.
+ * - res.expectedResultDocs: The expected documents in the collection after the delete.
+ * - res.nDeleted: The expected number of documents deleted.
+ * - res.deletedDoc: The expected document returned by the findAndModify command.
+ * - res.rootStage: The expected root stage of the explain plan.
+ * - res.bucketFilter: The expected bucket filter of the TS_MODIFY stage.
+ * - res.residualFilter: The expected residual filter of the TS_MODIFY stage.
+ * - res.nBucketsUnpacked: The expected number of buckets unpacked by the TS_MODIFY stage.
+ * - res.nReturned: The expected number of documents returned by the TS_MODIFY stage.
+ */
+export function testFindOneAndRemove({
+ initialDocList,
+ cmd: {filter, fields, sort, collation},
+ res: {
+ errorCode,
+ expectedResultDocs,
+ nDeleted,
+ deletedDoc,
+ rootStage,
+ bucketFilter,
+ residualFilter,
+ nBucketsUnpacked,
+ nReturned,
+ },
+}) {
+ const callerName = getCallerName();
+ jsTestLog(`Running ${callerName}(${tojson(arguments[0])})`);
+
+ const coll = prepareCollection({collName: callerName, initialDocList: initialDocList});
+
+ const findAndModifyCmd = makeFindOneAndRemoveCommand(coll, filter, fields, sort, collation);
+ jsTestLog(`Running findAndModify remove: ${tojson(findAndModifyCmd)}`);
+
+ const session = coll.getDB().getSession();
+ const shouldRetryWrites = session.getOptions().shouldRetryWrites();
+ // TODO SERVER-76583: Remove this check and always verify the result or verify the 'errorCode'.
+ if (coll.getDB().getSession().getOptions().shouldRetryWrites()) {
+ assert.commandFailedWithCode(
+ testDB.runCommand(findAndModifyCmd), 7308305, `cmd = ${tojson(findAndModifyCmd)}`);
+ return;
+ }
+
+ if (errorCode) {
+ assert.commandFailedWithCode(
+ testDB.runCommand(findAndModifyCmd), errorCode, `cmd = ${tojson(findAndModifyCmd)}`);
+ return;
+ }
+
+ if (bucketFilter !== undefined) {
+ const explainRes = assert.commandWorked(
+ coll.runCommand({explain: findAndModifyCmd, verbosity: "executionStats"}));
+ verifyExplain({
+ explain: explainRes,
+ rootStageName: rootStage,
+ opType: "deleteOne",
+ bucketFilter: bucketFilter,
+ residualFilter: residualFilter,
+ nBucketsUnpacked: nBucketsUnpacked,
+ nReturned: nReturned,
+ });
+ }
+
+ const res = assert.commandWorked(testDB.runCommand(findAndModifyCmd));
+ jsTestLog(`findAndModify remove result: ${tojson(res)}`);
+ assert.eq(nDeleted, res.lastErrorObject.n, tojson(res));
+ if (deletedDoc) {
+ assert.docEq(deletedDoc, res.value, tojson(res));
+ } else if (nDeleted === 1) {
+ assert.neq(null, res.value, tojson(res));
+ } else if (nDeleted === 0) {
+ assert.eq(null, res.value, tojson(res));
+ }
+
+ verifyResultDocs(coll, initialDocList, expectedResultDocs, nDeleted);
+}
+
+/**
+ * Verifies that a findAndModify update returns the expected result(s) 'res'.
+ *
+ * - initialDocList: The initial documents in the collection.
+ * - cmd.filter: The 'query' spec for the findAndModify command.
+ * - cmd.update: The 'update' spec for the findAndModify command.
+ * - cmd.returnNew: The 'new' option for the findAndModify command.
+ * - cmd.upsert: The 'upsert' option for the findAndModify command.
+ * - cmd.fields: The projection for the findAndModify command.
+ * - cmd.sort: The sort option for the findAndModify command.
+ * - cmd.collation: The collation option for the findAndModify command.
+ * - res.errorCode: If errorCode is set, we expect the command to fail with that code and other
+ * fields of 'res' are ignored.
+ * - res.resultDocList: The expected documents in the collection after the update.
+ * - res.nModified: The expected number of documents deleted.
+ * - res.returnDoc: The expected document returned by the findAndModify command.
+ * - res.rootStage: The expected root stage of the explain plan.
+ * - res.bucketFilter: The expected bucket filter of the TS_MODIFY stage.
+ * - res.residualFilter: The expected residual filter of the TS_MODIFY stage.
+ * - res.nBucketsUnpacked: The expected number of buckets unpacked by the TS_MODIFY stage.
+ * - res.nMatched: The expected number of documents matched by the TS_MODIFY stage.
+ * - res.nModified: The expected number of documents modified by the TS_MODIFY stage.
+ * - res.nUpserted: The expected number of documents upserted by the TS_MODIFY stage.
+ */
+export function testFindOneAndUpdate({
+ initialDocList,
+ cmd: {filter, update, returnNew, upsert, fields, sort, collation},
+ res: {
+ errorCode,
+ resultDocList,
+ returnDoc,
+ rootStage,
+ bucketFilter,
+ residualFilter,
+ nBucketsUnpacked,
+ nMatched,
+ nModified,
+ nUpserted,
+ },
+}) {
+ const collName = getCallerName();
+ jsTestLog(`Running ${collName}(${tojson(arguments[0])})`);
+
+ const testDB = getTestDB();
+ const coll = testDB.getCollection(collName);
+ prepareCollection({collName, initialDocList});
+
+ const findAndModifyCmd = makeFindOneAndUpdateCommand(
+ coll, filter, update, returnNew, upsert, fields, sort, collation);
+ jsTestLog(`Running findAndModify update: ${tojson(findAndModifyCmd)}`);
+
+ // TODO SERVER-76583: Remove this check and always verify the result or verify the 'errorCode'.
+ if (coll.getDB().getSession().getOptions().shouldRetryWrites()) {
+ assert.commandFailedWithCode(testDB.runCommand(findAndModifyCmd), 7314600);
+ return;
+ }
+
+ if (errorCode) {
+ assert.commandFailedWithCode(testDB.runCommand(findAndModifyCmd), errorCode);
+ return;
+ }
+
+ if (bucketFilter !== undefined) {
+ const explainRes = assert.commandWorked(
+ coll.runCommand({explain: findAndModifyCmd, verbosity: "executionStats"}));
+ verifyExplain({
+ explain: explainRes,
+ rootStageName: rootStage,
+ opType: "updateOne",
+ bucketFilter: bucketFilter,
+ residualFilter: residualFilter,
+ nBucketsUnpacked: nBucketsUnpacked,
+ nReturned: returnDoc ? 1 : 0,
+ nMatched: nMatched,
+ nModified: nModified,
+ nUpserted: nUpserted,
+ });
+ }
+
+ const res = assert.commandWorked(testDB.runCommand(findAndModifyCmd));
+ jsTestLog(`findAndModify update result: ${tojson(res)}`);
+ if (upsert) {
+ assert(nUpserted !== undefined && (nUpserted === 0 || nUpserted === 1),
+ "nUpserted must be 0 or 1");
+
+ assert.eq(1, res.lastErrorObject.n, tojson(res));
+ if (returnNew !== undefined) {
+ assert(returnDoc, "returnDoc must be provided when upsert are true");
+ assert.docEq(returnDoc, res.value, tojson(res));
+ }
+
+ if (nUpserted === 1) {
+ assert(res.lastErrorObject.upserted, `Expected upserted ObjectId: ${tojson(res)}`);
+ assert.eq(false, res.lastErrorObject.updatedExisting, tojson(res));
+ } else {
+ assert(!res.lastErrorObject.upserted, `Expected no upserted ObjectId: ${tojson(res)}`);
+ assert.eq(true, res.lastErrorObject.updatedExisting, tojson(res));
+ }
+ } else {
+ if (returnDoc !== undefined && returnDoc !== null) {
+ assert.eq(1, res.lastErrorObject.n, tojson(res));
+ assert.eq(true, res.lastErrorObject.updatedExisting, tojson(res));
+ assert.docEq(returnDoc, res.value, tojson(res));
+ } else {
+ assert.eq(0, res.lastErrorObject.n, tojson(res));
+ assert.eq(false, res.lastErrorObject.updatedExisting, tojson(res));
+ assert.eq(null, res.value, tojson(res));
+ }
+ }
+
+ if (resultDocList !== undefined) {
+ assert.sameMembers(resultDocList,
+ coll.find().toArray(),
+ "Collection contents did not match expected after update");
+ }
+}
+
+export function getRelevantProfilerEntries(db, coll, requestType) {
+ const sysCollName = sysCollNamePrefix + coll.getName();
+ const profilerFilter = {
+ $or: [
+ // Potential two-phase protocol cluster query.
+ {
+ "op": "command",
+ "ns": `${db.getName()}.${sysCollName}`,
+ "command.aggregate": `${sysCollName}`,
+ "command.$_isClusterQueryWithoutShardKeyCmd": true,
+ // Filters out events recorded because of StaleConfig error.
+ "ok": {$ne: 0},
+ },
+ // Potential two-phase protocol write command.
+ {
+ "op": "command",
+ "ns": `${db.getName()}.${sysCollName}`,
+ [`command.${requestType}`]: `${sysCollName}`,
+ },
+ // Targeted write command.
+ {
+ "op": "command",
+ "ns": `${db.getName()}.${sysCollName}`,
+ [`command.${requestType}`]: `${coll.getName()}`,
+ }
+ ]
+ };
+ return db.system.profile.find(profilerFilter).toArray();
+}
+
+export function verifyThatRequestIsRoutedToCorrectShard(
+ coll, requestType, writeType, dataBearingShard) {
+ assert(primaryShard && otherShard, "The sharded cluster must be initialized");
+ assert(dataBearingShard === "primary" || dataBearingShard === "other" ||
+ dataBearingShard === "none" || dataBearingShard === "any",
+ "Invalid shard: " + dataBearingShard);
+ assert(writeType === "twoPhaseProtocol" || writeType === "targeted",
+ "Invalid write type: " + writeType);
+ assert(requestType === "findAndModify" || requestType === "delete" || requestType === "update",
+ "Invalid request type: " + requestType);
+
+ const primaryDB = primaryShard.getDB(testDB.getName());
+ const otherDB = otherShard.getDB(testDB.getName());
+
+ const primaryEntries = getRelevantProfilerEntries(primaryDB, coll, requestType);
+ const otherEntries = getRelevantProfilerEntries(otherDB, coll, requestType);
+
+ /*
+ * The profiler entries for the two-phase protocol are expected to be in the following order:
+ * On the data bearing shard:
+ * 1. Cluster query.
+ * 2. Targeted request.
+ *
+ * On the non-data bearing shard:
+ * 1. Cluster query.
+ *
+ * The profiler entries for the targeted write are expected to be in the following order:
+ * On the data bearing shard:
+ * 1. Targeted request.
+ */
+
+ if (dataBearingShard === "none") {
+ // If dataBearingShard is "none", the writeType must be "twoPhaseProtocol". So, no shards
+ // should get the targeted request after the cluster query for the case of "none".
+
+ assert.eq("twoPhaseProtocol",
+ writeType,
+ "Expected data bearing shard to be 'none' only for 'twoPhaseProtocol' mode");
+
+ assert.eq(1, primaryEntries.length, "Expected one profiler entry on primary shard");
+ // The entry must be for the cluster query.
+ assert(primaryEntries[0].command.hasOwnProperty("aggregate"),
+ "Unexpected profile entries: " + tojson(primaryEntries));
+
+ assert.eq(1, otherEntries.length, "Expected one profiler entry on other shard");
+ // The entry must be for the cluster query.
+ assert(otherEntries[0].command.hasOwnProperty("aggregate"),
+ "Unexpected profile entries: " + tojson(otherEntries));
+ return;
+ }
+
+ const [dataBearingShardEntries, nonDataBearingShardEntries] = (() => {
+ if (dataBearingShard === "any") {
+ assert.eq("twoPhaseProtocol",
+ writeType,
+ "Expected data bearing shard to be 'any' only for 'twoPhaseProtocol' mode");
+ return primaryEntries.length === 2 ? [primaryEntries, otherEntries]
+ : [otherEntries, primaryEntries];
+ }
+
+ return dataBearingShard === "primary" ? [primaryEntries, otherEntries]
+ : [otherEntries, primaryEntries];
+ })();
+
+ if (writeType === "twoPhaseProtocol") {
+ // At this point, we know that the data bearing shard is either primary or other. So, we
+ // expect two profiler entries on the data bearing shard and one on the non-data bearing
+ // shard.
+
+ assert.eq(
+ 2,
+ dataBearingShardEntries.length,
+ `Expected two profiler entries for data bearing shard in 'twoPhaseProtocol' mode but
+ got: ${tojson(dataBearingShardEntries)}`);
+ // The first entry must be for the cluster query.
+ assert(dataBearingShardEntries[0].command.hasOwnProperty("aggregate"),
+ "Unexpected profile entries: " + tojson(dataBearingShardEntries));
+ // The second entry must be the findAndModify command.
+ assert(dataBearingShardEntries[1].command.hasOwnProperty(requestType),
+ "Unexpected profile entries: " + tojson(dataBearingShardEntries));
+
+ assert.eq(
+ 1,
+ nonDataBearingShardEntries.length,
+ `Expected one profiler entry for non data bearing shard in 'twoPhaseProtocol' mode but
+ got: ${tojson(nonDataBearingShardEntries)}`);
+ // The first entry must be for the cluster query.
+ assert(nonDataBearingShardEntries[0].command.hasOwnProperty("aggregate"),
+ "Unexpected profile entries: " + tojson(nonDataBearingShardEntries));
+ } else {
+ // This is the targeted write case. So, we expect one profiler entry on the data bearing
+ // shard and none on the non-data bearing shard.
+
+ assert.eq(1, dataBearingShardEntries.length, tojson(dataBearingShardEntries));
+ // The first entry must be the findAndModify command.
+ assert(dataBearingShardEntries[0].command.hasOwnProperty(requestType),
+ "Unexpected profile entries: " + tojson(dataBearingShardEntries));
+
+ assert.eq(0, nonDataBearingShardEntries.length, tojson(nonDataBearingShardEntries));
+ }
+}
+
+export function restartProfiler() {
+ assert(primaryShard && otherShard, "The sharded cluster must be initialized");
+
+ const primaryDB = primaryShard.getDB(testDB.getName());
+ const otherDB = otherShard.getDB(testDB.getName());
+
+ primaryDB.setProfilingLevel(0);
+ primaryDB.system.profile.drop();
+ primaryDB.setProfilingLevel(2);
+ otherDB.setProfilingLevel(0);
+ otherDB.system.profile.drop();
+ otherDB.setProfilingLevel(2);
+}
+
+/**
+ * Verifies that a findAndModify remove on a sharded timeseries collection returns the expected
+ * result(s) 'res'.
+ *
+ * - initialDocList: The initial documents in the collection.
+ * - cmd.filter: The filter for the findAndModify command.
+ * - cmd.fields: The projection for the findAndModify command.
+ * - cmd.sort: The sort option for the findAndModify command.
+ * - cmd.collation: The collation option for the findAndModify command.
+ * - res.errorCode: If errorCode is set, we expect the command to fail with that code and other
+ * fields of 'res' are ignored.
+ * - res.nDeleted: The expected number of documents deleted.
+ * - res.deletedDoc: The expected document returned by the findAndModify command.
+ * - res.writeType: "twoPhaseProtocol" or "targeted". On sharded time-series collection, we route
+ * queries to shards if the queries contain the shardkey. "twoPhaseProtocol" means
+ * that we cannot target a specific data-bearing shard from the query and should
+ * the scatter-gather-like two-phase protocol. On the other hand, "targeted" means
+ * we can from the query.
+ * - res.dataBearingShard: "primary", "other", "none", or "any". For "none" and "any", only
+ * the "twoPhaseProtocol" is allowed.
+ * - res.rootStage: The expected root stage of the explain plan.
+ * - res.bucketFilter: The expected bucket filter of the TS_MODIFY stage.
+ * - res.residualFilter: The expected residual filter of the TS_MODIFY stage.
+ * - res.nBucketsUnpacked: The expected number of buckets unpacked by the TS_MODIFY stage.
+ * - res.nReturned: The expected number of documents returned by the TS_MODIFY stage.
+ */
+export function testFindOneAndRemoveOnShardedCollection({
+ initialDocList,
+ includeMeta = true,
+ cmd: {filter, fields, sort, collation},
+ res: {
+ errorCode,
+ nDeleted,
+ deletedDoc,
+ writeType,
+ dataBearingShard,
+ rootStage,
+ bucketFilter,
+ residualFilter,
+ nBucketsUnpacked,
+ nReturned,
+ },
+}) {
+ const callerName = getCallerName();
+ jsTestLog(`Running ${callerName}(${tojson(arguments[0])})`);
+
+ const coll = prepareShardedCollection(
+ {collName: callerName, initialDocList: initialDocList, includeMeta: includeMeta});
+
+ const findAndModifyCmd = makeFindOneAndRemoveCommand(coll, filter, fields, sort, collation);
+ jsTestLog(`Running findAndModify remove: ${tojson(findAndModifyCmd)}`);
+
+ const session = coll.getDB().getSession();
+ const shouldRetryWrites = session.getOptions().shouldRetryWrites();
+ // TODO SERVER-76583: Remove this check and always verify the result or verify the 'errorCode'.
+ if (!shouldRetryWrites && !errorCode) {
+ if (bucketFilter) {
+ // Due to the limitation of two-phase write protocol, the TS_MODIFY stage's execution
+ // stats can't really show the results close to real execution. We can just verify
+ // plan part.
+ assert(writeType !== "twoPhaseProtocol" || (!nBucketsUnpacked && !nReturned),
+ "Can't verify nBucketsUnpacked and nReturned for the two-phase protocol.");
+
+ const explainRes = assert.commandWorked(
+ coll.runCommand({explain: findAndModifyCmd, verbosity: "executionStats"}));
+ verifyExplain({
+ explain: explainRes,
+ rootStageName: rootStage,
+ opType: "deleteOne",
+ bucketFilter: bucketFilter,
+ residualFilter: residualFilter,
+ nBucketsUnpacked: nBucketsUnpacked,
+ nReturned: nReturned,
+ });
+ }
+
+ restartProfiler();
+ const res = assert.commandWorked(testDB.runCommand(findAndModifyCmd));
+ jsTestLog(`findAndModify remove result: ${tojson(res)}`);
+ assert.eq(nDeleted, res.lastErrorObject.n, tojson(res));
+ let expectedResultDocs = initialDocList;
+ if (deletedDoc) {
+ // Note: To figure out the expected result documents, we need to know the _id of the
+ // deleted document.
+ assert(deletedDoc.hasOwnProperty("_id"),
+ `deletedDoc must have _id but got ${tojson(deletedDoc)}`);
+ assert.docEq(deletedDoc, res.value, tojson(res));
+ expectedResultDocs = initialDocList.filter(doc => doc._id !== deletedDoc._id);
+ } else if (nDeleted === 1) {
+ // Note: To figure out the expected result documents, we need to know the _id of the
+ // deleted document. And so we don't allow 'fields' to be specified because it might
+ // exclude _id field.
+ assert(!fields, `Must specify deletedDoc when fields are specified: ${tojson(fields)}`);
+ assert.neq(null, res.value, tojson(res));
+ expectedResultDocs = initialDocList.filter(doc => doc._id !== res.value._id);
+ } else if (nDeleted === 0) {
+ assert.eq(null, res.value, tojson(res));
+ }
+
+ verifyResultDocs(coll, initialDocList, expectedResultDocs, nDeleted);
+ verifyThatRequestIsRoutedToCorrectShard(coll, "findAndModify", writeType, dataBearingShard);
+ } else if (errorCode) {
+ assert.commandFailedWithCode(
+ testDB.runCommand(findAndModifyCmd), errorCode, `cmd = ${tojson(findAndModifyCmd)}`);
+ } else {
+ // TODO SERVER-76583: Remove this test.
+ assert.commandFailedWithCode(
+ testDB.runCommand(findAndModifyCmd), 7308305, `cmd = ${tojson(findAndModifyCmd)}`);
+ }
+}
+
+/**
+ * Verifies that a findAndModify update on a sharded timeseries collection returns the expected
+ * result(s) 'res'.
+ *
+ * - initialDocList: The initial documents in the collection.
+ * - cmd.filter: The 'query' spec for the findAndModify command.
+ * - cmd.update: The 'update' spec for the findAndModify command.
+ * - cmd.returnNew: The 'new' option for the findAndModify command.
+ * - cmd.upsert: The 'upsert' option for the findAndModify command.
+ * - cmd.fields: The projection for the findAndModify command.
+ * - cmd.sort: The sort option for the findAndModify command.
+ * - cmd.collation: The collation option for the findAndModify command.
+ * - res.errorCode: If errorCode is set, we expect the command to fail with that code and other
+ * fields of 'res' are ignored.
+ * - res.resultDocList: The expected documents in the collection after the update.
+ * - res.returnDoc: The expected document returned by the findAndModify command.
+ * - res.writeType: "twoPhaseProtocol" or "targeted". On sharded time-series collection, we route
+ * queries to shards if the queries contain the shardkey. "twoPhaseProtocol" means
+ * that we cannot target a specific data-bearing shard from the query and should
+ * the scatter-gather-like two-phase protocol. On the other hand, "targeted" means
+ * we can from the query.
+ * - res.dataBearingShard: "primary", "other", "none", or "any". For "none" and "any", only
+ * the "twoPhaseProtocol" is allowed.
+ * - res.rootStage: The expected root stage of the explain plan.
+ * - res.bucketFilter: The expected bucket filter of the TS_MODIFY stage.
+ * - res.residualFilter: The expected residual filter of the TS_MODIFY stage.
+ * - res.nBucketsUnpacked: The expected number of buckets unpacked by the TS_MODIFY stage.
+ * - res.nMatched: The expected number of documents matched by the TS_MODIFY stage.
+ * - res.nModified: The expected number of documents modified by the TS_MODIFY stage.
+ * - res.nUpserted: The expected number of documents upserted by the TS_MODIFY stage.
+ */
+export function testFindOneAndUpdateOnShardedCollection({
+ initialDocList,
+ startTxn = false,
+ includeMeta = true,
+ cmd: {filter, update, returnNew, upsert, fields, sort, collation},
+ res: {
+ errorCode,
+ resultDocList,
+ returnDoc,
+ writeType,
+ dataBearingShard,
+ rootStage,
+ bucketFilter,
+ residualFilter,
+ nBucketsUnpacked,
+ nMatched,
+ nModified,
+ nUpserted,
+ },
+}) {
+ const callerName = getCallerName();
+ jsTestLog(`Running ${callerName}(${tojson(arguments[0])})`);
+
+ const coll = prepareShardedCollection(
+ {collName: callerName, initialDocList: initialDocList, includeMeta: includeMeta});
+
+ const findAndModifyCmd = makeFindOneAndUpdateCommand(
+ coll, filter, update, returnNew, upsert, fields, sort, collation);
+ jsTestLog(`Running findAndModify update: ${tojson(findAndModifyCmd)}`);
+
+ if (errorCode) {
+ assert.commandFailedWithCode(coll.runCommand(findAndModifyCmd), errorCode);
+ assert.sameMembers(initialDocList,
+ coll.find().toArray(),
+ "Collection contents did not match expected after update failure.");
+ return;
+ }
+
+ // Explain can't be run inside a transaction.
+ if (!startTxn && bucketFilter) {
+ // Due to the limitation of two-phase write protocol, the TS_MODIFY stage's execution
+ // stats can't really show the results close to real execution. We can just verify
+ // plan part.
+ assert(writeType !== "twoPhaseProtocol" ||
+ (nBucketsUnpacked === undefined && nMatched === undefined &&
+ nModified === undefined),
+ "Can't verify stats for the two-phase protocol.");
+
+ const explainRes = assert.commandWorked(
+ coll.runCommand({explain: findAndModifyCmd, verbosity: "executionStats"}));
+ verifyExplain({
+ explain: explainRes,
+ rootStageName: rootStage,
+ opType: "updateOne",
+ bucketFilter: bucketFilter,
+ residualFilter: residualFilter,
+ nBucketsUnpacked: nBucketsUnpacked,
+ nReturned: returnDoc ? 1 : 0,
+ nMatched: nMatched,
+ nModified: nModified,
+ nUpserted: nUpserted,
+ });
+ }
+
+ restartProfiler();
+ const res = (() => {
+ if (!startTxn) {
+ return assert.commandWorked(testDB.runCommand(findAndModifyCmd));
+ }
+
+ const session = coll.getDB().getMongo().startSession();
+ const sessionDb = session.getDatabase(coll.getDB().getName());
+ session.startTransaction();
+ const res = assert.commandWorked(sessionDb.runCommand(findAndModifyCmd));
+ session.commitTransaction();
+
+ return res;
+ })();
+ jsTestLog(`findAndModify update result: ${tojson(res)}`);
+ if (upsert) {
+ assert(nUpserted !== undefined && (nUpserted === 0 || nUpserted === 1),
+ "nUpserted must be 0 or 1");
+
+ assert.eq(1, res.lastErrorObject.n, tojson(res));
+ if (returnNew !== undefined) {
+ assert(returnDoc, "returnDoc must be provided when upsert are true");
+ assert.docEq(returnDoc, res.value, tojson(res));
+ }
+
+ if (nUpserted === 1) {
+ assert(res.lastErrorObject.upserted, `Expected upserted ObjectId: ${tojson(res)}`);
+ assert.eq(false, res.lastErrorObject.updatedExisting, tojson(res));
+ } else {
+ assert(!res.lastErrorObject.upserted, `Expected no upserted ObjectId: ${tojson(res)}`);
+ assert.eq(true, res.lastErrorObject.updatedExisting, tojson(res));
+ }
+ } else {
+ if (returnDoc !== undefined && returnDoc !== null) {
+ assert.eq(1, res.lastErrorObject.n, tojson(res));
+ assert.eq(true, res.lastErrorObject.updatedExisting, tojson(res));
+ assert.docEq(returnDoc, res.value, tojson(res));
+ } else {
+ assert.eq(0, res.lastErrorObject.n, tojson(res));
+ assert.eq(false, res.lastErrorObject.updatedExisting, tojson(res));
+ assert.eq(null, res.value, tojson(res));
+ }
+ }
+
+ if (resultDocList !== undefined) {
+ assert.sameMembers(resultDocList,
+ coll.find().toArray(),
+ "Collection contents did not match expected after update");
+ }
+
+ verifyThatRequestIsRoutedToCorrectShard(coll, "findAndModify", writeType, dataBearingShard);
+}
+
+/**
+ * Sets up a sharded cluster. 'nMongos' is the number of mongos in the cluster.
+ */
+export function setUpShardedCluster({nMongos} = {
+ nMongos: 1
+}) {
+ assert.eq(null, st, "A sharded cluster must not be initialized yet");
+ assert.eq(null, primaryShard, "The primary shard must not be initialized yet");
+ assert.eq(null, otherShard, "The other shard must not be initialized yet");
+ assert.eq(null, testDB, "testDB must be not initialized yet");
+ assert.eq(null, mongos0DB, "mongos0DB must be not initialized yet");
+ assert.eq(null, mongos1DB, "mongos1DB must be not initialized yet");
+ assert(nMongos === 1 || nMongos === 2, "nMongos must be 1 or 2");
+
+ st = new ShardingTest({mongos: nMongos, shards: 2, rs: {nodes: 2}});
+
+ testDB = st.s.getDB(jsTestName());
+ assert.commandWorked(testDB.dropDatabase());
+ assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
+ primaryShard = st.getPrimaryShard(testDB.getName());
+ st.ensurePrimaryShard(testDB.getName(), primaryShard.shardName);
+ otherShard = st.getOther(primaryShard);
+ mongos0DB = st.s0.getDB(testDB.getName());
+ if (nMongos > 1) {
+ mongos1DB = st.s1.getDB(testDB.getName());
+ }
+}
+
+/**
+ * Tears down the sharded cluster created by setUpShardedCluster().
+ */
+export function tearDownShardedCluster() {
+ assert.neq(null, st, "A sharded cluster must be initialized");
+ st.stop();
+}
diff --git a/jstests/core/timeseries/nondefault_collation.js b/jstests/core/timeseries/nondefault_collation.js
index f101ed4a323fb..d4686a448d62d 100644
--- a/jstests/core/timeseries/nondefault_collation.js
+++ b/jstests/core/timeseries/nondefault_collation.js
@@ -1,6 +1,10 @@
/**
- * Test ensures that users can specify non-default collation when querying on time-series
- * collections.
+ * Correctness tests for TS collections with collation that might not match the explicit collation,
+ * specified in the query.
+ *
+ * Queries on timeseries attempt various optimizations to avoid unpacking of buckets. These rely on
+ * the meta field and the control data (currently, min and max), computed for each bucket.
+ * Collection's collation might affect the computed control values.
*
* @tags: [
* # TODO (SERVER-73322): remove
@@ -15,131 +19,195 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {aggPlanHasStage} from "jstests/libs/analyze_plan.js";
const coll = db.timeseries_nondefault_collation;
const bucketsColl = db.getCollection('system.buckets.' + coll.getName());
-coll.drop(); // implicitly drops bucketsColl.
-
-const timeFieldName = 'time';
-const metaFieldName = 'meta';
-
const numericOrdering = {
- collation: {locale: "en_US", numericOrdering: true}
+ locale: "en_US",
+ numericOrdering: true,
+ strength: 1 // case and diacritics ignored
};
-
const caseSensitive = {
- collation: {locale: "en_US", strength: 1, caseLevel: true, numericOrdering: true}
+ locale: "en_US",
+ strength: 1,
+ caseLevel: true
};
-
const diacriticSensitive = {
- collation: {locale: "en_US", strength: 2}
+ locale: "en_US",
+ strength: 2,
+ caseLevel: false
};
-
-const englishCollation = {
- locale: 'en',
+const insensitive = {
+ locale: "en_US",
strength: 1
};
-const simpleCollation = {
- locale: "simple"
-};
+// Find on meta field isn't different from a find on any other view, but let's check it anyway.
+(function testFind_MetaField() {
+ coll.drop();
-assert.commandWorked(db.createCollection(coll.getName(), {
- timeseries: {timeField: timeFieldName, metaField: metaFieldName},
- collation: englishCollation
-}));
-assert.contains(bucketsColl.getName(), db.getCollectionNames());
-
-assert.commandWorked(
- coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "1", name: 'A', name2: "á"}));
-assert.commandWorked(
- coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "2", name: 'a', name2: "á"}));
-assert.commandWorked(
- coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "5", name: 'A', name2: "á"}));
-assert.commandWorked(
- coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "10", name: 'a', name2: "á"}));
-assert.commandWorked(
- coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "20", name: 'A', name2: "a"}));
-assert.commandWorked(
- coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "50", name: 'B', name2: "a"}));
-assert.commandWorked(
- coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "100", name: 'b', name2: "a"}));
-assert.commandWorked(
- coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "200", name: 'B', name2: "a"}));
-assert.commandWorked(
- coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "500", name: 'b', name2: "a"}));
-
-// Default collation is case and diacretic insensitive.
-assert.eq(2, coll.aggregate([{$sortByCount: "$name"}]).itcount());
-assert.eq(1, coll.aggregate([{$sortByCount: "$name2"}]).itcount());
-
-// Test that a explicit collation different from collection's default passes for a timeseries
-// collection.
-let results =
- coll.aggregate([{$bucket: {groupBy: "$meta", boundaries: ["1", "10", "100", "1000"]}}],
- numericOrdering)
- .toArray();
-assert.eq(3, results.length);
-assert.eq({_id: "1", count: 3}, results[0]);
-assert.eq({_id: "10", count: 3}, results[1]);
-assert.eq({_id: "100", count: 3}, results[2]);
-
-assert.eq(4, coll.aggregate([{$sortByCount: "$name"}], caseSensitive).itcount());
-assert.eq(2, coll.aggregate([{$sortByCount: "$name2"}], diacriticSensitive).itcount());
-
-coll.drop();
-const defaultCollation = {
- locale: "en",
- numericOrdering: true,
- caseLevel: true,
- strength: 2
-};
-assert.commandWorked(db.createCollection(coll.getName(), {
- timeseries: {timeField: timeFieldName, metaField: metaFieldName},
- collation: defaultCollation
-}));
-assert.contains(bucketsColl.getName(), db.getCollectionNames());
-assert.commandWorked(coll.createIndex({[metaFieldName]: 1}, {collation: {locale: "simple"}}));
-
-assert.commandWorked(coll.insert(
- {[timeFieldName]: ISODate(), [metaFieldName]: 1, name: 'A', name2: "á", value: "1"}));
-assert.commandWorked(coll.insert(
- {[timeFieldName]: ISODate(), [metaFieldName]: 2, name: 'a', name2: "á", value: "11"}));
-assert.commandWorked(coll.insert(
- {[timeFieldName]: ISODate(), [metaFieldName]: 1, name: 'A', name2: "á", value: "50"}));
-assert.commandWorked(coll.insert(
- {[timeFieldName]: ISODate(), [metaFieldName]: 1, name: 'a', name2: "á", value: "100"}));
-assert.commandWorked(coll.insert(
- {[timeFieldName]: ISODate(), [metaFieldName]: "2", name: 'A', name2: "a", value: "3"}));
-assert.commandWorked(coll.insert(
- {[timeFieldName]: ISODate(), [metaFieldName]: "5", name: 'B', name2: "a", value: "-100"}));
-assert.commandWorked(coll.insert(
- {[timeFieldName]: ISODate(), [metaFieldName]: "1", name: 'b', name2: "a", value: "-200"}));
-assert.commandWorked(coll.insert(
- {[timeFieldName]: ISODate(), [metaFieldName]: "2", name: 'B', name2: "a", value: "1000"}));
-assert.commandWorked(coll.insert(
- {[timeFieldName]: ISODate(), [metaFieldName]: "5", name: 'b', name2: "a", value: "4"}));
-
-// This collection has been created using non simple collation. The collection was then indexed on
-// its metadata using simple collation. These tests confirm that queries on the indexed field using
-// nondefault (simple) collation use the index. They also confirm that queries that don't involve
-// strings but do use default collation, on indexed fields, also use the index.
-const nonDefaultCollationQuery = coll.find({meta: 2}).collation(englishCollation).explain();
-assert(aggPlanHasStage(nonDefaultCollationQuery, "IXSCAN"), nonDefaultCollationQuery);
-
-const simpleNonDefaultCollationQuery = coll.find({meta: 2}).collation(simpleCollation).explain();
-assert(aggPlanHasStage(simpleNonDefaultCollationQuery, "IXSCAN"), simpleNonDefaultCollationQuery);
-
-const defaultCollationQuery = coll.find({meta: 1}).collation(defaultCollation).explain();
-assert(aggPlanHasStage(defaultCollationQuery, "IXSCAN"), defaultCollationQuery);
-
-// This test guarantees that the bucket's min/max matches the query's min/max regardless of
-// collation.
-results = coll.find({value: {$gt: "4"}}).collation(simpleCollation);
-assert.eq(1, results.itcount());
+ assert.commandWorked(db.createCollection(
+ coll.getName(),
+ {timeseries: {timeField: 'time', metaField: 'meta'}, collation: numericOrdering}));
+ assert.contains(bucketsColl.getName(), db.getCollectionNames());
+
+ assert.commandWorked(coll.insert({time: ISODate(), meta: "1", value: 42}));
+ assert.commandWorked(coll.insert({time: ISODate(), meta: "10", value: 42}));
+ assert.commandWorked(coll.insert({time: ISODate(), meta: "5", value: 42}));
+
+ // Use the collection's collation with numeric ordering.
+ let res1 = coll.find({meta: {$gt: "4"}});
+ assert.eq(2, res1.itcount(), res1.toArray()); // should match "5" and "10"
+
+ // Use explicit collation with lexicographic ordering.
+ let res2 = coll.find({meta: {$gt: "4"}}).collation(insensitive);
+ assert.eq(1, res2.itcount(), res2.toArray()); // should match only "5"
+}());
+
+// For the measurement fields each bucket computes additional "control values", such as min/max and
+// might use them to avoid unpacking.
+(function testFind_MeasurementField() {
+ coll.drop();
+
+ assert.commandWorked(db.createCollection(
+ coll.getName(),
+ {timeseries: {timeField: 'time', metaField: 'meta'}, collation: numericOrdering}));
+ assert.contains(bucketsColl.getName(), db.getCollectionNames());
+
+ // The 'numericOrdering' on the collection means that the max of the bucket with the three docs
+ // below is "10" (while the lexicographic max is "5").
+ assert.commandWorked(coll.insert({time: ISODate(), meta: 42, value: "1"}));
+ assert.commandWorked(coll.insert({time: ISODate(), meta: 42, value: "10"}));
+ assert.commandWorked(coll.insert({time: ISODate(), meta: 42, value: "5"}));
+
+ // A query with default collation would use the bucket's min/max and find the matches. We are
+ // not checking the unpacking optimizations here as it's not a concern of collation per se.
+ let res1 = coll.find({value: {$gt: "4"}});
+ assert.eq(2, res1.itcount(), res1.toArray()); // should match "5" and "10"
+
+ // If a query with 'insensitive' collation, which doesn't do numeric ordering, used the bucket's
+ // min/max it would miss the bucket. Check, that it doesn't.
+ let res2 = coll.find({value: {$gt: "4"}}).collation(insensitive);
+ assert.eq(1, res2.itcount(), res2.toArray()); // should match only "5"
+}());
+
+(function testAgg_GroupByMetaField() {
+ coll.drop();
+
+ assert.commandWorked(db.createCollection(
+ coll.getName(),
+ {timeseries: {timeField: 'time', metaField: 'meta'}, collation: numericOrdering}));
+ assert.contains(bucketsColl.getName(), db.getCollectionNames());
+
+ assert.commandWorked(coll.insert({time: ISODate(), meta: "1", val: 1}));
+ assert.commandWorked(coll.insert({time: ISODate(), meta: "5", val: 1}));
+
+ // Using collection's collation with numeric ordering.
+ let res1 =
+ coll.aggregate([{$bucket: {groupBy: "$meta", boundaries: ["1", "10", "50"]}}]).toArray();
+ assert.eq(1, res1.length);
+ assert.eq({_id: "1", count: 2}, res1[0]);
+
+ // Using explicit collation with lexicographic ordering.
+ let res2 = coll.aggregate([{$bucket: {groupBy: "$meta", boundaries: ["1", "10", "50"]}}],
+ {collation: insensitive})
+ .toArray();
+ assert.eq(2, res2.length);
+ assert.eq({_id: "1", count: 1}, res2[0]); // "1" goes here
+ assert.eq({_id: "10", count: 1}, res2[1]); // "5" goes here
+}());
+
+(function testAgg_GroupByMeasurementField() {
+ coll.drop();
+
+ assert.commandWorked(db.createCollection(
+ coll.getName(),
+ {timeseries: {timeField: 'time', metaField: 'meta'}, collation: insensitive}));
+ assert.contains(bucketsColl.getName(), db.getCollectionNames());
+
+ // Cause two different buckets with various case/diacritics in each for the measurement 'name'.
+ assert.commandWorked(coll.insert({time: ISODate(), meta: "a", name: 'A'}));
+ assert.commandWorked(coll.insert({time: ISODate(), meta: "a", name: 'a'}));
+ assert.commandWorked(coll.insert({time: ISODate(), meta: "a", name: 'á'}));
+ assert.commandWorked(coll.insert({time: ISODate(), meta: "b", name: 'A'}));
+ assert.commandWorked(coll.insert({time: ISODate(), meta: "b", name: 'a'}));
+ assert.commandWorked(coll.insert({time: ISODate(), meta: "b", name: 'ä'}));
+
+ // Test with the collection's collation, which is case and diacritic insensitive.
+ assert.eq(1, coll.aggregate([{$sortByCount: "$name"}]).itcount());
+
+ // Test with explicit collation that is different from the collection's.
+ assert.eq(2, coll.aggregate([{$sortByCount: "$name"}], {collation: caseSensitive}).itcount());
+ assert.eq(3,
+ coll.aggregate([{$sortByCount: "$name"}], {collation: diacriticSensitive}).itcount());
+}());
+
+// For $group queries that would put whole buckets into the same group, it might be possible to
+// avoid unpacking if the information the group is computing is exposed in the control data of each
+// bucket. Currently, we only do this optimization for min/max with the meta as the group key.
+(function testAgg_MinMaxOptimization() {
+ coll.drop();
+
+ assert.commandWorked(db.createCollection(
+ coll.getName(),
+ {timeseries: {timeField: 'time', metaField: 'meta'}, collation: numericOrdering}));
+ assert.contains(bucketsColl.getName(), db.getCollectionNames());
+
+ // These two docs will be placed in the same bucket, and the max for the bucket will be computed
+ // using collection's collation, that is, it should be "10".
+ assert.commandWorked(coll.insert({time: ISODate(), meta: 42, val: "10"}));
+ assert.commandWorked(coll.insert({time: ISODate(), meta: 42, val: "5"}));
+
+ // Let's check our understanding of what happens with the bucketing as otherwise the tests below
+ // won't be testing what we think they are.
+ let buckets = bucketsColl.find().toArray();
+ assert.eq(1, buckets.length, "All docs should be placed into the same bucket");
+ assert.eq("10", buckets[0].control.max.val, "Computed max control for 'val' measurement");
+
+ // Use the collection's collation with numeric ordering.
+ let res1 = coll.aggregate([{$group: {_id: "$meta", v: {$max: "$val"}}}]).toArray();
+ assert.eq("10", res1[0].v, "max val in numeric ordering per the collection's collation");
+
+ // Use the collection's collation with lexicographic ordering.
+ let res2 =
+ coll.aggregate([{$group: {_id: "$meta", v: {$max: "$val"}}}], {collation: insensitive})
+ .toArray();
+ assert.eq("5", res2[0].v, "max val in lexicographic ordering per the query collation");
}());
+
+(function testFind_IndexWithDifferentCollation() {
+ coll.drop();
+
+ assert.commandWorked(db.createCollection(
+ coll.getName(),
+ {timeseries: {timeField: 'time', metaField: 'meta'}, collation: diacriticSensitive}));
+ assert.contains(bucketsColl.getName(), db.getCollectionNames());
+
+ // Create index with a different collation.
+ assert.commandWorked(coll.createIndex({meta: 1}, {collation: insensitive}));
+
+ // We only check that the correct plan is chosen so the contents of the collection don't matter
+ // as long as it's not empty.
+ assert.commandWorked(coll.insert({time: ISODate(), meta: 42}));
+ assert.commandWorked(coll.insert({time: ISODate(), meta: "the answer"}));
+
+ // Queries that don't specify explicit collation should use the collection's default collation
+ // which isn't compatible with the index, so the index should NOT be used.
+ let query = coll.find({meta: "str"}).explain();
+ assert(!aggPlanHasStage(query, "IXSCAN"), query);
+
+ // Queries with an explicit collation which isn't compatible with the index, should NOT do
+ // index scan.
+ query = coll.find({meta: "str"}).collation(caseSensitive).explain();
+ assert(!aggPlanHasStage(query, "IXSCAN"), query);
+
+ // Queries with the same collation as in the index, should do index scan.
+ query = coll.find({meta: "str"}).collation(insensitive).explain();
+ assert(aggPlanHasStage(query, "IXSCAN"), query);
+
+ // Numeric queries that don't rely on collation should do index scan.
+ query = coll.find({meta: 1}).explain();
+ assert(aggPlanHasStage(query, "IXSCAN"), query);
+}());
\ No newline at end of file
diff --git a/jstests/core/timeseries/partialFilterExpression_with_internalBucketGeoWithin.js b/jstests/core/timeseries/partialFilterExpression_with_internalBucketGeoWithin.js
index d0a94ff3affad..50e20a5b6555c 100644
--- a/jstests/core/timeseries/partialFilterExpression_with_internalBucketGeoWithin.js
+++ b/jstests/core/timeseries/partialFilterExpression_with_internalBucketGeoWithin.js
@@ -11,12 +11,11 @@
* ]
*/
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/feature_flag_util.js");
+import {getWinningPlan, isCollscan, isIxscan} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load("jstests/libs/fixture_helpers.js"); // For isSharded.
load('jstests/noPassthrough/libs/index_build.js');
-(function() {
if (FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) {
const timeFieldName = "timestamp";
@@ -183,4 +182,3 @@ if (FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) {
.explain());
assert(isCollscan(db, getWinningPlan(findAndExplain.queryPlanner)));
}
-})();
diff --git a/jstests/core/timeseries/timeseries_bucket_index.js b/jstests/core/timeseries/timeseries_bucket_index.js
index 1a816b7c84d1a..093302190892c 100644
--- a/jstests/core/timeseries/timeseries_bucket_index.js
+++ b/jstests/core/timeseries/timeseries_bucket_index.js
@@ -9,11 +9,8 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/analyze_plan.js"); // For 'planHasStage' helper.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {planHasStage} from "jstests/libs/analyze_plan.js";
TimeseriesTest.run((insert) => {
const coll = db.timeseries_bucket_index;
@@ -54,4 +51,3 @@ TimeseriesTest.run((insert) => {
assert.commandWorked(bucketsColl.remove({_id: bucketId}));
assert.docEq([], bucketsColl.find().toArray());
});
-})();
diff --git a/jstests/core/timeseries/timeseries_bucket_limit_count.js b/jstests/core/timeseries/timeseries_bucket_limit_count.js
index a6942d3f42cda..c60d540d662de 100644
--- a/jstests/core/timeseries/timeseries_bucket_limit_count.js
+++ b/jstests/core/timeseries/timeseries_bucket_limit_count.js
@@ -9,10 +9,8 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
TimeseriesTest.run((insert) => {
const collNamePrefix = 'timeseries_bucket_limit_count_';
@@ -70,9 +68,15 @@ TimeseriesTest.run((insert) => {
assert.eq(bucketMaxCount - 1,
bucketDocs[0].control.max.x,
'invalid control.max for x in first bucket: ' + tojson(bucketDocs));
- assert.eq(2,
- bucketDocs[0].control.version,
- 'unexpected control.version in first bucket: ' + tojson(bucketDocs));
+ if (FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) {
+ assert.eq(1,
+ bucketDocs[0].control.version,
+ 'unexpected control.version in first bucket: ' + tojson(bucketDocs));
+ } else {
+ assert.eq(2,
+ bucketDocs[0].control.version,
+ 'unexpected control.version in first bucket: ' + tojson(bucketDocs));
+ }
assert(!bucketDocs[0].control.hasOwnProperty("closed"),
'unexpected control.closed in first bucket: ' + tojson(bucketDocs));
@@ -100,4 +104,3 @@ TimeseriesTest.run((insert) => {
runTest(numDocs / 2);
runTest(numDocs);
});
-})();
diff --git a/jstests/core/timeseries/timeseries_bucket_limit_time_range.js b/jstests/core/timeseries/timeseries_bucket_limit_time_range.js
index 23454d6068e97..2509ad01ca21c 100644
--- a/jstests/core/timeseries/timeseries_bucket_limit_time_range.js
+++ b/jstests/core/timeseries/timeseries_bucket_limit_time_range.js
@@ -8,10 +8,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const isTimeseriesScalabilityImprovementsEnabled =
@@ -124,5 +121,4 @@ TimeseriesTest.run((insert) => {
runTest(1);
runTest(numDocs);
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_bucket_manual_removal.js b/jstests/core/timeseries/timeseries_bucket_manual_removal.js
index 698aed1104fa7..dd439bfff0d2a 100644
--- a/jstests/core/timeseries/timeseries_bucket_manual_removal.js
+++ b/jstests/core/timeseries/timeseries_bucket_manual_removal.js
@@ -10,10 +10,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const coll = db.timeseries_bucket_manual_removal;
@@ -68,5 +65,4 @@ TimeseriesTest.run((insert) => {
buckets = bucketsColl.find().toArray();
assert.eq(buckets.length, 1, 'Expected one bucket but found ' + tojson(buckets));
assert.neq(buckets[0]._id, bucketId);
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_bucket_rename.js b/jstests/core/timeseries/timeseries_bucket_rename.js
deleted file mode 100644
index d219d10480211..0000000000000
--- a/jstests/core/timeseries/timeseries_bucket_rename.js
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Tests that a system.buckets collection cannot be renamed.
- *
- * @tags: [
- * # We need a timeseries collection.
- * requires_timeseries,
- * ]
- */
-(function() {
-'use strict';
-
-const coll = db.timeseries_bucket_rename;
-const bucketsColl = db.getCollection('system.buckets.' + coll.getName());
-
-const timeFieldName = 'time';
-
-coll.drop();
-assert.commandWorked(db.createCollection(coll.getName(), {timeseries: {timeField: timeFieldName}}));
-assert.contains(bucketsColl.getName(), db.getCollectionNames());
-
-assert.commandFailedWithCode(db.adminCommand({
- renameCollection: bucketsColl.getFullName(),
- to: db.getName() + ".otherColl",
- dropTarget: false
-}),
- ErrorCodes.IllegalOperation);
-})();
diff --git a/jstests/core/timeseries/timeseries_collation.js b/jstests/core/timeseries/timeseries_collation.js
index 76ef7fca54a6f..f86402953dea5 100644
--- a/jstests/core/timeseries/timeseries_collation.js
+++ b/jstests/core/timeseries/timeseries_collation.js
@@ -9,10 +9,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-'use strict';
-
-load('jstests/core/timeseries/libs/timeseries.js');
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const coll = db.timeseries_collation;
@@ -92,5 +89,4 @@ TimeseriesTest.run((insert) => {
assert.eq(buckets[2].control.min.y, null);
assert.eq(buckets[2].control.max.x, null);
assert.eq(buckets[2].control.max.y, null);
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_collmod.js b/jstests/core/timeseries/timeseries_collmod.js
index 18ae48a421f27..503b100697914 100644
--- a/jstests/core/timeseries/timeseries_collmod.js
+++ b/jstests/core/timeseries/timeseries_collmod.js
@@ -11,10 +11,7 @@
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
const collName = "timeseries_collmod";
const coll = db.getCollection(collName);
@@ -214,5 +211,4 @@ if (TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db.getMongo())) {
// No-op command should succeed with empty time-series options.
assert.commandWorked(db.runCommand({"collMod": collName, "timeseries": {}}));
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_compact.js b/jstests/core/timeseries/timeseries_compact.js
index b5a3b22f1d7d6..549f3d801131d 100644
--- a/jstests/core/timeseries/timeseries_compact.js
+++ b/jstests/core/timeseries/timeseries_compact.js
@@ -13,10 +13,7 @@
* uses_compact,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run(() => {
const coll = db.timeseries_compact;
@@ -33,5 +30,4 @@ TimeseriesTest.run(() => {
assert.commandWorked(db.runCommand({compact: coll.getName(), force: true}));
assert.commandWorked(db.runCommand({compact: "system.buckets." + coll.getName(), force: true}));
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_create_collection.js b/jstests/core/timeseries/timeseries_create_collection.js
index 04d37eff5c09f..0cea8021481b0 100644
--- a/jstests/core/timeseries/timeseries_create_collection.js
+++ b/jstests/core/timeseries/timeseries_create_collection.js
@@ -19,6 +19,15 @@ assert.commandWorked(testDB.dropDatabase());
const timeFieldName = 'time';
const coll = testDB.t;
+// Fails to create a time-series collection with null-embedded timeField or metaField.
+assert.commandFailedWithCode(
+ testDB.createCollection(coll.getName(), {timeseries: {timeField: '\0time'}}),
+ ErrorCodes.BadValue);
+assert.commandFailedWithCode(
+ testDB.createCollection(coll.getName(),
+ {timeseries: {timeField: timeFieldName, metaField: 't\0ag'}}),
+ ErrorCodes.BadValue);
+
// Create a timeseries collection, listCollection should show view and bucket collection
assert.commandWorked(
testDB.createCollection(coll.getName(), {timeseries: {timeField: timeFieldName}}));
diff --git a/jstests/core/timeseries/timeseries_create_invalid_view.js b/jstests/core/timeseries/timeseries_create_invalid_view.js
deleted file mode 100644
index 54254e8c3cada..0000000000000
--- a/jstests/core/timeseries/timeseries_create_invalid_view.js
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Verify we cannot create a view on a system.buckets collection.
- *
- * @tags: [
- * # This restriction was added in 6.1.
- * requires_fcv_61,
- * # We need a timeseries collection.
- * requires_timeseries,
- * ]
- */
-(function() {
-'use strict';
-
-const testDB = db.getSiblingDB(jsTestName());
-assert.commandWorked(testDB.dropDatabase());
-
-const timeFieldName = 'time';
-const coll = testDB.t;
-
-// Create a timeseries collection, listCollection should show view and bucket collection
-assert.commandWorked(
- testDB.createCollection(coll.getName(), {timeseries: {timeField: timeFieldName}}));
-let collections = assert.commandWorked(testDB.runCommand({listCollections: 1})).cursor.firstBatch;
-jsTestLog('Checking listCollections result: ' + tojson(collections));
-assert(collections.find(entry => entry.name === 'system.buckets.' + coll.getName()));
-assert(collections.find(entry => entry.name === coll.getName()));
-
-// Ensure we cannot create a view on a system.buckets collection.
-assert.commandFailedWithCode(testDB.createView("badView", "system.buckets." + coll.getName(), []),
- ErrorCodes.InvalidNamespace);
-})();
diff --git a/jstests/core/timeseries/timeseries_delete_collation.js b/jstests/core/timeseries/timeseries_delete_collation.js
index 913654b63439f..b808f21e52408 100644
--- a/jstests/core/timeseries/timeseries_delete_collation.js
+++ b/jstests/core/timeseries/timeseries_delete_collation.js
@@ -6,12 +6,13 @@
* # We need a timeseries collection.
* requires_timeseries,
* requires_non_retryable_writes,
- * requires_fcv_70,
+ * requires_fcv_71,
* ]
*/
-(function() {
-"use strict";
+import {getPlanStage} from "jstests/libs/analyze_plan.js";
+
+load("jstests/libs/fixture_helpers.js"); // For 'isMongos'
const timeFieldName = "time";
const metaFieldName = "tag";
@@ -33,6 +34,9 @@ const caseInsensitive = {
const simple = {
locale: "simple"
};
+const closedBucketFilter = {
+ "control.closed": {"$not": {"$eq": true}}
+};
const docs = [
{_id: 0, [timeFieldName]: dateTime, [metaFieldName]: "A", str: "HELLO"},
@@ -50,9 +54,22 @@ const docs = [
];
/**
- * Confirms that a set of deletes returns the expected set of documents.
+ * Confirms that a set of deletes returns the expected set of documents and runs the correct delete
+ * stage and bucket query.
*/
-function runTest({deleteFilter, queryCollation, collectionCollation, nDeleted}) {
+function runTest({
+ deleteFilter,
+ queryCollation,
+ collectionCollation,
+ nDeleted,
+ expectedBucketQuery,
+ expectedDeleteStage
+}) {
+ jsTestLog(`Running ${tojson(deleteFilter)} with queryCollation: ${
+ tojson(queryCollation)} and collectionCollation: ${tojson(collectionCollation)}`);
+
+ assert(expectedDeleteStage === "TS_MODIFY" || expectedDeleteStage === "DELETE");
+
const coll = testDB.getCollection(collNamePrefix + testCaseId++);
assert.commandWorked(testDB.createCollection(coll.getName(), {
timeseries: {timeField: timeFieldName, metaField: metaFieldName},
@@ -60,40 +77,186 @@ function runTest({deleteFilter, queryCollation, collectionCollation, nDeleted})
}));
assert.commandWorked(coll.insert(docs));
- const res = assert.commandWorked(coll.deleteMany(deleteFilter, {collation: queryCollation}));
- assert.eq(nDeleted, res.deletedCount);
+ const deleteCommand = {
+ delete: coll.getName(),
+ deletes: [{q: deleteFilter, limit: 0, collation: queryCollation}]
+ };
+ const explain = testDB.runCommand({explain: deleteCommand, verbosity: "queryPlanner"});
+ const parsedQuery = FixtureHelpers.isMongos(testDB)
+ ? explain.queryPlanner.winningPlan.shards[0].parsedQuery
+ : explain.queryPlanner.parsedQuery;
+
+ assert.eq(expectedBucketQuery, parsedQuery, `Got wrong parsedQuery: ${tojson(explain)}`);
+ assert.neq(null,
+ getPlanStage(explain.queryPlanner.winningPlan, expectedDeleteStage),
+ `${expectedDeleteStage} stage not found in the plan: ${tojson(explain)}`);
+
+ const res = assert.commandWorked(testDB.runCommand(deleteCommand));
+ assert.eq(nDeleted, res.n);
}
(function testNoCollation() {
// Residual filter.
- runTest({deleteFilter: {str: "Hello"}, nDeleted: 0});
- runTest({deleteFilter: {str: "hello"}, nDeleted: 3});
+ runTest({
+ deleteFilter: {str: "Hello"},
+ nDeleted: 0,
+ expectedBucketQuery: {
+ $and: [
+ closedBucketFilter,
+ {"control.max.str": {$_internalExprGte: "Hello"}},
+ {"control.min.str": {$_internalExprLte: "Hello"}}
+ ]
+ },
+ expectedDeleteStage: "TS_MODIFY"
+ });
+ runTest({
+ deleteFilter: {str: "hello"},
+ nDeleted: 3,
+ expectedBucketQuery: {
+ $and: [
+ closedBucketFilter,
+ {"control.max.str": {$_internalExprGte: "hello"}},
+ {"control.min.str": {$_internalExprLte: "hello"}}
+ ]
+ },
+ expectedDeleteStage: "TS_MODIFY"
+ });
// Bucket filter.
- runTest({deleteFilter: {[metaFieldName]: "a"}, nDeleted: 0});
- runTest({deleteFilter: {[metaFieldName]: "A"}, nDeleted: 4});
+ runTest({
+ deleteFilter: {[metaFieldName]: "a"},
+ nDeleted: 0,
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "a"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "DELETE"
+ });
+ runTest({
+ deleteFilter: {[metaFieldName]: "A"},
+ nDeleted: 4,
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "A"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "DELETE"
+ });
})();
(function testQueryLevelCollation() {
// Residual filter.
- runTest({deleteFilter: {str: "Hello"}, queryCollation: caseSensitive, nDeleted: 0});
- runTest({deleteFilter: {str: "Hello"}, queryCollation: caseInsensitive, nDeleted: 6});
+ runTest({
+ deleteFilter: {str: "Hello"},
+ queryCollation: caseSensitive,
+ nDeleted: 0,
+ expectedBucketQuery: {
+ $and: [
+ closedBucketFilter,
+ {"control.max.str": {$_internalExprGte: "Hello"}},
+ {"control.min.str": {$_internalExprLte: "Hello"}}
+ ]
+ },
+ expectedDeleteStage: "TS_MODIFY"
+ });
+ runTest({
+ deleteFilter: {str: "Hello"},
+ queryCollation: caseInsensitive,
+ nDeleted: 6,
+ expectedBucketQuery: {
+ $and: [
+ closedBucketFilter,
+ {"control.max.str": {$_internalExprGte: "Hello"}},
+ {"control.min.str": {$_internalExprLte: "Hello"}}
+ ]
+ },
+ expectedDeleteStage: "TS_MODIFY"
+ });
// Bucket filter.
- runTest({deleteFilter: {[metaFieldName]: "a"}, queryCollation: caseSensitive, nDeleted: 0});
- runTest({deleteFilter: {[metaFieldName]: "a"}, queryCollation: caseInsensitive, nDeleted: 4});
+ runTest({
+ deleteFilter: {[metaFieldName]: "a"},
+ queryCollation: caseSensitive,
+ nDeleted: 0,
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "a"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "DELETE"
+ });
+ runTest({
+ deleteFilter: {[metaFieldName]: "a"},
+ queryCollation: caseInsensitive,
+ nDeleted: 4,
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "a"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "DELETE"
+ });
})();
(function testCollectionLevelCollation() {
// Residual filter.
- runTest({deleteFilter: {str: "Hello"}, collectionCollation: caseSensitive, nDeleted: 0});
- runTest({deleteFilter: {str: "Hello"}, collectionCollation: caseInsensitive, nDeleted: 6});
+ runTest({
+ deleteFilter: {str: "Hello"},
+ collectionCollation: caseSensitive,
+ nDeleted: 0,
+ expectedBucketQuery: {
+ $and: [
+ closedBucketFilter,
+ {"control.max.str": {$_internalExprGte: "Hello"}},
+ {"control.min.str": {$_internalExprLte: "Hello"}}
+ ]
+ },
+ expectedDeleteStage: "TS_MODIFY"
+ });
+ runTest({
+ deleteFilter: {str: "Hello"},
+ collectionCollation: caseInsensitive,
+ nDeleted: 6,
+ expectedBucketQuery: {
+ $and: [
+ closedBucketFilter,
+ {"control.max.str": {$_internalExprGte: "Hello"}},
+ {"control.min.str": {$_internalExprLte: "Hello"}}
+ ]
+ },
+ expectedDeleteStage: "TS_MODIFY"
+ });
// Bucket filter.
- runTest(
- {deleteFilter: {[metaFieldName]: "a"}, collectionCollation: caseSensitive, nDeleted: 0});
- runTest(
- {deleteFilter: {[metaFieldName]: "a"}, collectionCollation: caseInsensitive, nDeleted: 4});
+ runTest({
+ deleteFilter: {[metaFieldName]: "a"},
+ collectionCollation: caseSensitive,
+ nDeleted: 0,
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "a"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "DELETE"
+ });
+ runTest({
+ deleteFilter: {[metaFieldName]: "a"},
+ collectionCollation: caseInsensitive,
+ nDeleted: 4,
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "a"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "DELETE"
+ });
})();
(function testQueryLevelCollationOverridesDefault() {
@@ -102,13 +265,40 @@ function runTest({deleteFilter, queryCollation, collectionCollation, nDeleted})
deleteFilter: {str: "Hello"},
queryCollation: caseInsensitive,
collectionCollation: caseInsensitive,
- nDeleted: 6
+ nDeleted: 6,
+ expectedBucketQuery: {
+ $and: [
+ closedBucketFilter,
+ {"control.max.str": {$_internalExprGte: "Hello"}},
+ {"control.min.str": {$_internalExprLte: "Hello"}}
+ ]
+ },
+ expectedDeleteStage: "TS_MODIFY"
});
runTest({
deleteFilter: {str: "Hello"},
queryCollation: caseInsensitive,
collectionCollation: caseSensitive,
- nDeleted: 6
+ nDeleted: 6,
+ // We cannot push down bucket metric predicate for TS_MODIFY stage when the query level
+ // collation overrides the collection level collation.
+ expectedBucketQuery: closedBucketFilter,
+ expectedDeleteStage: "TS_MODIFY"
+ });
+ runTest({
+ deleteFilter: {[metaFieldName]: "A", str: "Hello"},
+ queryCollation: caseInsensitive,
+ collectionCollation: caseSensitive,
+ nDeleted: 2,
+ // We cannot push down bucket metric predicate for TS_MODIFY stage when the query level
+ // collation overrides the collection level collation.
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "A"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "TS_MODIFY"
});
// Bucket filter.
@@ -116,13 +306,29 @@ function runTest({deleteFilter, queryCollation, collectionCollation, nDeleted})
deleteFilter: {[metaFieldName]: "a"},
queryCollation: caseInsensitive,
collectionCollation: caseInsensitive,
- nDeleted: 4
+ nDeleted: 4,
+ // We can push down bucket filter for DELETE stage with the query level collation.
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "a"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "DELETE"
});
runTest({
deleteFilter: {[metaFieldName]: "a"},
queryCollation: caseInsensitive,
collectionCollation: caseSensitive,
- nDeleted: 4
+ nDeleted: 4,
+ // We can push down bucket filter for DELETE stage with the query level collation.
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "a"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "DELETE"
});
})();
@@ -132,13 +338,51 @@ function runTest({deleteFilter, queryCollation, collectionCollation, nDeleted})
deleteFilter: {str: "Hello"},
queryCollation: simple,
collectionCollation: caseInsensitive,
- nDeleted: 0
+ nDeleted: 0,
+ // We cannot push down bucket metric predicate for TS_MODIFY stage when the query level
+ // collation overrides the collection level collation.
+ expectedBucketQuery: closedBucketFilter,
+ expectedDeleteStage: "TS_MODIFY"
});
runTest({
deleteFilter: {str: "hello"},
queryCollation: simple,
collectionCollation: caseInsensitive,
- nDeleted: 3
+ nDeleted: 3,
+ // We cannot push down bucket metric predicate for TS_MODIFY stage when the query level
+ // collation overrides the collection level collation.
+ expectedBucketQuery: closedBucketFilter,
+ expectedDeleteStage: "TS_MODIFY"
+ });
+ runTest({
+ deleteFilter: {[metaFieldName]: "a", str: "hello"},
+ queryCollation: simple,
+ collectionCollation: caseInsensitive,
+ nDeleted: 0,
+ // We cannot push down bucket metric predicate for TS_MODIFY stage when the query level
+ // collation overrides the collection level collation.
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "a"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "TS_MODIFY"
+ });
+ runTest({
+ deleteFilter: {[metaFieldName]: "A", str: "HELLO"},
+ queryCollation: simple,
+ collectionCollation: caseInsensitive,
+ nDeleted: 1,
+ // We cannot push down bucket metric predicate for TS_MODIFY stage when the query level
+ // collation overrides the collection level collation.
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "A"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "TS_MODIFY"
});
// Bucket filter.
@@ -146,13 +390,28 @@ function runTest({deleteFilter, queryCollation, collectionCollation, nDeleted})
deleteFilter: {[metaFieldName]: "a"},
queryCollation: simple,
collectionCollation: caseInsensitive,
- nDeleted: 0
+ nDeleted: 0,
+ // We can push down bucket filter for DELETE stage with the query level collation.
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "a"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "DELETE"
});
runTest({
deleteFilter: {[metaFieldName]: "A"},
queryCollation: simple,
collectionCollation: caseInsensitive,
- nDeleted: 4
+ nDeleted: 4,
+ // We can push down bucket filter for DELETE stage with the query level collation.
+ expectedBucketQuery: {
+ $and: [
+ {meta: {$eq: "A"}},
+ closedBucketFilter,
+ ]
+ },
+ expectedDeleteStage: "DELETE"
});
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_delete_compressed_buckets.js b/jstests/core/timeseries/timeseries_delete_compressed_buckets.js
index 0b290d9240113..d159e70cf8d23 100644
--- a/jstests/core/timeseries/timeseries_delete_compressed_buckets.js
+++ b/jstests/core/timeseries/timeseries_delete_compressed_buckets.js
@@ -10,8 +10,13 @@
* ]
*/
-(function() {
-"use strict";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
+// TODO SERVER-77454: Investigate re-enabling this.
+if (FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) {
+ jsTestLog("Skipping test as the always use compressed buckets feature is enabled");
+ quit();
+}
const timeFieldName = "time";
const metaFieldName = "tag";
@@ -74,5 +79,4 @@ if (FeatureFlagUtil.isPresentAndEnabled(db, "UpdateOneWithoutShardKey")) {
assert.eq(coll.countDocuments({f: {$lt: 100}}),
100 - 50 - 1, // 100 records to start + 50 deleted above + 1 more deleted
"Expected records matching the filter to be deleted.");
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_delete_hint.js b/jstests/core/timeseries/timeseries_delete_hint.js
index 187173f48d412..0e8b47216047c 100644
--- a/jstests/core/timeseries/timeseries_delete_hint.js
+++ b/jstests/core/timeseries/timeseries_delete_hint.js
@@ -17,11 +17,7 @@
* uses_parallel_shell,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/curop_helpers.js");
-load("jstests/libs/feature_flag_util.js");
load('jstests/libs/parallel_shell_helpers.js');
const timeFieldName = "time";
@@ -68,7 +64,7 @@ const validateDeleteIndex = (docsToInsert,
: assert.commandWorked(
testDB.runCommand({delete: coll.getName(), deletes: deleteQuery}));
assert.eq(res["n"], expectedNRemoved);
- assert.docEq(expectedRemainingDocs, coll.find({}, {_id: 0}).toArray());
+ assert.sameMembers(expectedRemainingDocs, coll.find({}, {_id: 0}).toArray());
assert(coll.drop());
},
docsToInsert,
@@ -207,5 +203,4 @@ validateDeleteIndex([objA, objB, objC],
[{q: {[metaFieldName]: {c: "C"}}, limit: 0, hint: {"test_hint": 1}}],
[{[metaFieldName]: -1}, {[timeFieldName]: 1}],
"IXSCAN { control.min.time: 1, control.max.time: 1 }",
- {expectedErrorCode: ErrorCodes.BadValue});
-})();
+ {expectedErrorCode: ErrorCodes.BadValue});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_delete_one.js b/jstests/core/timeseries/timeseries_delete_one.js
index 39ad152a7d215..deeab37afb791 100644
--- a/jstests/core/timeseries/timeseries_delete_one.js
+++ b/jstests/core/timeseries/timeseries_delete_one.js
@@ -4,97 +4,24 @@
* @tags: [
* # We need a timeseries collection.
* requires_timeseries,
- * featureFlagUpdateOneWithoutShardKey,
+ * requires_fcv_71
* ]
*/
-(function() {
-"use strict";
-
-const timeFieldName = "time";
-const metaFieldName = "tag";
-const dateTime = ISODate("2021-07-12T16:00:00Z");
-const collNamePrefix = "timeseries_delete_one_";
-let testCaseId = 0;
-
-const testDB = db.getSiblingDB(jsTestName());
-assert.commandWorked(testDB.dropDatabase());
-
-/**
- * Confirms that a deleteOne() returns the expected set of documents.
- */
-function testDeleteOne({initialDocList, filter, expectedResultDocs, nDeleted}) {
- const coll = testDB.getCollection(collNamePrefix + testCaseId++);
- assert.commandWorked(testDB.createCollection(
- coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
-
- assert.commandWorked(coll.insert(initialDocList));
-
- const res = assert.commandWorked(coll.deleteOne(filter));
- assert.eq(nDeleted, res.deletedCount);
-
- const resultDocs = coll.find().toArray();
- assert.eq(resultDocs.length, initialDocList.length - nDeleted, tojson(resultDocs));
-
- // Validate the collection's exact contents if we were given the expected results. We may skip
- // this step in some cases, if the delete doesn't pinpoint a specific document.
- if (expectedResultDocs) {
- assert.eq(expectedResultDocs.length, resultDocs.length, resultDocs);
- expectedResultDocs.forEach(expectedDoc => {
- assert.docEq(
- expectedDoc,
- coll.findOne({_id: expectedDoc._id}),
- `Expected document (_id = ${expectedDoc._id}) not found in result collection: ${
- tojson(resultDocs)}`);
- });
- }
-}
-
-const doc1_a_nofields = {
- _id: 1,
- [timeFieldName]: dateTime,
- [metaFieldName]: "A",
-};
-const doc2_a_f101 = {
- _id: 2,
- [timeFieldName]: dateTime,
- [metaFieldName]: "A",
- f: 101
-};
-const doc3_a_f102 = {
- _id: 3,
- [timeFieldName]: dateTime,
- [metaFieldName]: "A",
- f: 102
-};
-const doc4_b_f103 = {
- _id: 4,
- [timeFieldName]: dateTime,
- [metaFieldName]: "B",
- f: 103
-};
-const doc5_b_f104 = {
- _id: 5,
- [timeFieldName]: dateTime,
- [metaFieldName]: "B",
- f: 104
-};
-const doc6_c_f105 = {
- _id: 6,
- [timeFieldName]: dateTime,
- [metaFieldName]: "C",
- f: 105
-};
-const doc7_c_f106 = {
- _id: 7,
- [timeFieldName]: dateTime,
- [metaFieldName]: "C",
- f: 106,
-};
+import {
+ doc1_a_nofields,
+ doc2_a_f101,
+ doc3_a_f102,
+ doc4_b_f103,
+ doc5_b_f104,
+ doc6_c_f105,
+ doc7_c_f106,
+ metaFieldName,
+ testDeleteOne
+} from "jstests/core/timeseries/libs/timeseries_writes_util.js";
// Query on the 'f' field leads to zero measurement delete.
(function testZeroMeasurementDelete() {
- jsTestLog("Running testZeroMeasurementDelete()");
testDeleteOne({
initialDocList: [doc1_a_nofields, doc4_b_f103, doc6_c_f105],
filter: {f: 17},
@@ -105,7 +32,6 @@ const doc7_c_f106 = {
// Query on the 'f' field leads to a partial bucket delete.
(function testPartialBucketDelete() {
- jsTestLog("Running testPartialBucketDelete()");
testDeleteOne({
initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102],
filter: {f: 101},
@@ -116,7 +42,6 @@ const doc7_c_f106 = {
// Query on the 'f' field leads to a full (single document) bucket delete.
(function testFullBucketDelete() {
- jsTestLog("Running testFullBucketDelete()");
testDeleteOne({
initialDocList: [doc2_a_f101],
filter: {f: 101},
@@ -127,7 +52,6 @@ const doc7_c_f106 = {
// Query on the 'tag' field matches all docs and deletes one.
(function testMatchFullBucketOnlyDeletesOne() {
- jsTestLog("Running testMatchFullBucketOnlyDeletesOne()");
testDeleteOne({
initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102],
filter: {[metaFieldName]: "A"},
@@ -138,7 +62,6 @@ const doc7_c_f106 = {
// Query on the 'tag' and metric field.
(function testMetaAndMetricFilterOnlyDeletesOne() {
- jsTestLog("Running testMetaAndMetricFilterOnlyDeletesOne()");
testDeleteOne({
initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102],
filter: {[metaFieldName]: "A", f: {$gt: 100}},
@@ -149,7 +72,6 @@ const doc7_c_f106 = {
// Query on the 'f' field matches docs in multiple buckets but only deletes from one.
(function testMatchMultiBucketOnlyDeletesOne() {
- jsTestLog("Running testMatchMultiBucketOnlyDeletesOne()");
testDeleteOne({
initialDocList: [
doc1_a_nofields,
@@ -168,7 +90,6 @@ const doc7_c_f106 = {
// Empty filter matches all docs but only deletes one.
(function testEmptyFilterOnlyDeletesOne() {
- jsTestLog("Running testEmptyFilterOnlyDeletesOne()");
testDeleteOne({
initialDocList: [
doc1_a_nofields,
@@ -184,4 +105,3 @@ const doc7_c_f106 = {
nDeleted: 1,
});
})();
-})();
diff --git a/jstests/core/timeseries/timeseries_delete_with_meta.js b/jstests/core/timeseries/timeseries_delete_with_meta.js
index 4f3becabcceca..f376babcb1535 100644
--- a/jstests/core/timeseries/timeseries_delete_with_meta.js
+++ b/jstests/core/timeseries/timeseries_delete_with_meta.js
@@ -10,19 +10,10 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
-load("jstests/libs/analyze_plan.js"); // For planHasStage().
-load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'.
-
-if (FixtureHelpers.isMongos(db) &&
- TimeseriesTest.shardedtimeseriesCollectionsEnabled(db.getMongo()) &&
- !TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(db.getMongo())) {
- jsTestLog("Skipping test because the sharded time-series feature flag is disabled");
- return;
-}
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {planHasStage} from "jstests/libs/analyze_plan.js";
+load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'.
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const testDB = db.getSiblingDB(jsTestName());
assert.commandWorked(testDB.dropDatabase());
@@ -181,4 +172,3 @@ TimeseriesTest.run((insert) => {
includeMetaField: false
});
});
-})();
diff --git a/jstests/core/timeseries/timeseries_delete_with_meta_concurrent.js b/jstests/core/timeseries/timeseries_delete_with_meta_concurrent.js
index f5d752d80a463..7f8ff3c95eeb5 100644
--- a/jstests/core/timeseries/timeseries_delete_with_meta_concurrent.js
+++ b/jstests/core/timeseries/timeseries_delete_with_meta_concurrent.js
@@ -20,10 +20,6 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
load("jstests/libs/curop_helpers.js");
load('jstests/libs/parallel_shell_helpers.js');
@@ -114,5 +110,4 @@ validateDeleteIndex([objA],
validateDeleteIndex([objA],
[{q: {[metaFieldName]: {a: "A"}}, limit: 0}],
ErrorCodes.NamespaceNotFound,
- testCases.REPLACE_COLLECTION);
-})();
+ testCases.REPLACE_COLLECTION);
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_explain_delete.js b/jstests/core/timeseries/timeseries_explain_delete.js
index bbee36a45ab37..1021ef77811cb 100644
--- a/jstests/core/timeseries/timeseries_explain_delete.js
+++ b/jstests/core/timeseries/timeseries_explain_delete.js
@@ -5,16 +5,14 @@
* # We need a timeseries collection.
* requires_timeseries,
* # To avoid multiversion tests
- * requires_fcv_70,
+ * requires_fcv_71,
* # To avoid burn-in tests in in-memory build variants
* requires_persistence,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getPlanStage() and getExecutionStages().
+import {getExecutionStages, getPlanStage} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const timeFieldName = "time";
const metaFieldName = "tag";
@@ -31,18 +29,21 @@ const docs = [
{_id: 3, [timeFieldName]: dateTime, [metaFieldName]: 2},
{_id: 4, [timeFieldName]: dateTime, [metaFieldName]: 2},
];
+const closedBucketFilter = {
+ "control.closed": {$not: {$eq: true}}
+};
function testDeleteExplain({
singleDeleteOp,
expectedDeleteStageName,
- expectedOpType,
+ expectedOpType = null,
expectedBucketFilter,
- expectedResidualFilter,
+ expectedResidualFilter = null,
expectedNumDeleted,
- expectedNumUnpacked,
+ expectedNumUnpacked = null,
expectedUsedIndexName = null
}) {
- assert(expectedDeleteStageName === "TS_MODIFY" || expectedDeleteStageName === "BATCHED_DELETE");
+ assert(expectedDeleteStageName === "TS_MODIFY" || expectedDeleteStageName === "DELETE");
// Prepares a timeseries collection.
const coll = testDB.getCollection(collNamePrefix + testCaseId++);
@@ -62,7 +63,6 @@ function testDeleteExplain({
const innerDeleteCommand = {delete: coll.getName(), deletes: [singleDeleteOp]};
const deleteExplainPlanCommand = {explain: innerDeleteCommand, verbosity: "queryPlanner"};
let explain = assert.commandWorked(testDB.runCommand(deleteExplainPlanCommand));
- jsTestLog(tojson(explain));
const deleteStage = getPlanStage(explain.queryPlanner.winningPlan, expectedDeleteStageName);
assert.neq(null,
deleteStage,
@@ -71,25 +71,22 @@ function testDeleteExplain({
assert.eq(expectedOpType,
deleteStage.opType,
`TS_MODIFY opType is wrong: ${tojson(deleteStage)}`);
-
- if (Object.keys(expectedBucketFilter).length) {
- expectedBucketFilter = {
- "$and": [expectedBucketFilter, {"control.closed": {$not: {$eq: true}}}]
- };
- } else {
- expectedBucketFilter = {"control.closed": {$not: {$eq: true}}};
- }
assert.eq(expectedBucketFilter,
deleteStage.bucketFilter,
`TS_MODIFY bucketFilter is wrong: ${tojson(deleteStage)}`);
assert.eq(expectedResidualFilter,
deleteStage.residualFilter,
`TS_MODIFY residualFilter is wrong: ${tojson(deleteStage)}`);
+ } else {
+ const collScanStage = getPlanStage(explain.queryPlanner.winningPlan, "COLLSCAN");
+ assert.neq(null, collScanStage, `COLLSCAN stage not found in the plan: ${tojson(explain)}`);
+ assert.eq(expectedBucketFilter,
+ collScanStage.filter,
+ `COLLSCAN filter is wrong: ${tojson(collScanStage)}`);
}
if (expectedUsedIndexName) {
const ixscanStage = getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN");
- jsTestLog(tojson(ixscanStage));
assert.eq(expectedUsedIndexName,
ixscanStage.indexName,
`Wrong index used: ${tojson(ixscanStage)}`);
@@ -98,7 +95,6 @@ function testDeleteExplain({
// Verifies the TS_MODIFY stage in the execution stats.
const deleteExplainStatsCommand = {explain: innerDeleteCommand, verbosity: "executionStats"};
explain = assert.commandWorked(testDB.runCommand(deleteExplainStatsCommand));
- jsTestLog(tojson(explain));
const execStages = getExecutionStages(explain);
assert.gt(execStages.length, 0, `No execution stages found: ${tojson(explain)}`);
assert.eq(expectedDeleteStageName,
@@ -127,12 +123,11 @@ function testDeleteExplain({
q: {},
limit: 0,
},
- // If the delete query is empty, we should use the BATCHED_DELETE plan.
- expectedDeleteStageName: "BATCHED_DELETE",
+ // If the delete query is empty, we should use the DELETE plan.
+ expectedDeleteStageName: "DELETE",
expectedOpType: "deleteMany",
- expectedBucketFilter: {},
- expectedResidualFilter: {},
- expectedNumDeleted: 2,
+ expectedBucketFilter: closedBucketFilter,
+ expectedNumDeleted: 4,
});
})();
@@ -146,94 +141,137 @@ function testDeleteExplain({
},
expectedDeleteStageName: "TS_MODIFY",
expectedOpType: "deleteMany",
- // The bucket filter is the one with metaFieldName translated to 'meta'.
- // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 3"
- expectedBucketFilter: {meta: {$eq: 2}},
+ expectedBucketFilter: {
+ $and:
+ [closedBucketFilter, {meta: {$eq: 2}}, {"control.max._id": {$_internalExprGte: 3}}]
+ },
expectedResidualFilter: {_id: {$gte: 3}},
expectedNumDeleted: 2,
expectedNumUnpacked: 1
});
})();
-(function testDeleteManyWithBucketFilterAndIndexHint() {
+(function testDeleteManyWithBucketMetricFilterOnly() {
testDeleteExplain({
singleDeleteOp: {
// The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so
// 'expectedNumUnpacked' is exactly 1.
- q: {[metaFieldName]: 2, _id: {$gte: 3}},
+ q: {_id: {$lte: 3}},
limit: 0,
- hint: {[metaFieldName]: 1}
},
expectedDeleteStageName: "TS_MODIFY",
expectedOpType: "deleteMany",
- // The bucket filter is the one with metaFieldName translated to 'meta'.
- // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 3"
- expectedBucketFilter: {meta: {$eq: 2}},
- expectedResidualFilter: {_id: {$gte: 3}},
- expectedNumDeleted: 2,
- expectedNumUnpacked: 1,
- expectedUsedIndexName: metaFieldName + "_1"
- });
-})();
-
-// TODO SERVER-75518: Enable following three test cases.
-/*
-(function testDeleteOneWithEmptyBucketFilter() {
- testDeleteExplain({
- singleDeleteOp: {
- // The non-meta field filter leads to a COLLSCAN below the TS_MODIFY stage and so
- // 'expectedNumUnpacked' is 2.
- q: {_id: 3},
- limit: 1,
- },
- expectedDeleteStageName: "TS_MODIFY",
- expectedOpType: "deleteOne",
- // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 3"
- expectedBucketFilter: {},
- expectedResidualFilter: {_id: {$eq: 3}},
- expectedNumDeleted: 1,
+ expectedBucketFilter:
+ {$and: [closedBucketFilter, {"control.min._id": {$_internalExprLte: 3}}]},
+ expectedResidualFilter: {_id: {$lte: 3}},
+ expectedNumDeleted: 3,
expectedNumUnpacked: 2
});
})();
-(function testDeleteOneWithBucketFilter() {
- testDeleteExplain({
- singleDeleteOp: {
- // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so
- // 'expectedNumUnpacked' is exactly 1.
- q: {[metaFieldName]: 2, _id: {$gte: 1}},
- limit: 1,
- },
- expectedDeleteStageName: "TS_MODIFY",
- expectedOpType: "deleteOne",
- // The bucket filter is the one with metaFieldName translated to 'meta'.
- // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 2"
- expectedBucketFilter: {meta: {$eq: 2}},
- expectedResidualFilter: {_id: {$gte: 1}},
- expectedNumDeleted: 1,
- expectedNumUnpacked: 1
- });
-})();
-
-(function testDeleteOneWithBucketFilterAndIndexHint() {
+(function testDeleteManyWithBucketFilterAndIndexHint() {
testDeleteExplain({
singleDeleteOp: {
// The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so
// 'expectedNumUnpacked' is exactly 1.
- q: {[metaFieldName]: 2, _id: {$gte: 1}},
- limit: 1,
+ q: {[metaFieldName]: 2, _id: 3},
+ limit: 0,
hint: {[metaFieldName]: 1}
},
expectedDeleteStageName: "TS_MODIFY",
- expectedOpType: "deleteOne",
- // The bucket filter is the one with metaFieldName translated to 'meta'.
- // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 3"
- expectedBucketFilter: {meta: {$eq: 2}},
- expectedResidualFilter: {_id: {$gte: 1}},
+ expectedOpType: "deleteMany",
+ expectedBucketFilter: {
+ $and: [
+ closedBucketFilter,
+ {meta: {$eq: 2}},
+ {
+ $and: [
+ {"control.min._id": {$_internalExprLte: 3}},
+ {"control.max._id": {$_internalExprGte: 3}}
+ ]
+ }
+ ]
+ },
+ expectedResidualFilter: {_id: {$eq: 3}},
expectedNumDeleted: 1,
expectedNumUnpacked: 1,
expectedUsedIndexName: metaFieldName + "_1"
});
})();
-*/
-})();
+
+if (FeatureFlagUtil.isPresentAndEnabled(db, "UpdateOneWithoutShardKey")) {
+ (function testDeleteOneWithEmptyBucketFilter() {
+ testDeleteExplain({
+ singleDeleteOp: {
+ // The non-meta field filter leads to a COLLSCAN below the TS_MODIFY stage and so
+ // 'expectedNumUnpacked' is 2.
+ q: {_id: 3},
+ limit: 1,
+ },
+ expectedDeleteStageName: "TS_MODIFY",
+ expectedOpType: "deleteOne",
+ expectedBucketFilter: {
+ $and: [
+ closedBucketFilter,
+ {
+ $and: [
+ {"control.min._id": {$_internalExprLte: 3}},
+ {"control.max._id": {$_internalExprGte: 3}}
+ ]
+ }
+ ]
+ },
+ expectedResidualFilter: {_id: {$eq: 3}},
+ expectedNumDeleted: 1,
+ expectedNumUnpacked: 1
+ });
+ })();
+
+ (function testDeleteOneWithBucketFilter() {
+ testDeleteExplain({
+ singleDeleteOp: {
+ // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so
+ // 'expectedNumUnpacked' is exactly 1.
+ q: {[metaFieldName]: 2, _id: {$gte: 1}},
+ limit: 1,
+ },
+ expectedDeleteStageName: "TS_MODIFY",
+ expectedOpType: "deleteOne",
+ expectedBucketFilter: {
+ $and: [
+ closedBucketFilter,
+ {meta: {$eq: 2}},
+ {"control.max._id": {$_internalExprGte: 1}}
+ ]
+ },
+ expectedResidualFilter: {_id: {$gte: 1}},
+ expectedNumDeleted: 1,
+ expectedNumUnpacked: 1
+ });
+ })();
+
+ (function testDeleteOneWithBucketFilterAndIndexHint() {
+ testDeleteExplain({
+ singleDeleteOp: {
+ // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so
+ // 'expectedNumUnpacked' is exactly 1.
+ q: {[metaFieldName]: 2, _id: {$gte: 1}},
+ limit: 1,
+ hint: {[metaFieldName]: 1}
+ },
+ expectedDeleteStageName: "TS_MODIFY",
+ expectedOpType: "deleteOne",
+ expectedBucketFilter: {
+ $and: [
+ closedBucketFilter,
+ {meta: {$eq: 2}},
+ {"control.max._id": {$_internalExprGte: 1}}
+ ]
+ },
+ expectedResidualFilter: {_id: {$gte: 1}},
+ expectedNumDeleted: 1,
+ expectedNumUnpacked: 1,
+ expectedUsedIndexName: metaFieldName + "_1"
+ });
+ })();
+}
diff --git a/jstests/core/timeseries/timeseries_explain_update.js b/jstests/core/timeseries/timeseries_explain_update.js
new file mode 100644
index 0000000000000..2ec666996c52c
--- /dev/null
+++ b/jstests/core/timeseries/timeseries_explain_update.js
@@ -0,0 +1,293 @@
+/**
+ * Tests whether the explain works for a single update operation on a timeseries collection.
+ *
+ * @tags: [
+ * # We need a timeseries collection.
+ * requires_timeseries,
+ * # To avoid multiversion tests
+ * requires_fcv_71,
+ * # To avoid burn-in tests in in-memory build variants
+ * requires_persistence,
+ * # TODO SERVER-66393 Remove this tag.
+ * featureFlagTimeseriesUpdatesSupport,
+ * # TODO SERVER-73726 Remove this tag.
+ * assumes_unsharded_collection,
+ * ]
+ */
+
+import {
+ getCallerName,
+ getTestDB,
+ makeBucketFilter,
+ metaFieldName,
+ prepareCollection,
+ timeFieldName
+} from "jstests/core/timeseries/libs/timeseries_writes_util.js";
+import {getExecutionStages, getPlanStage} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
+const dateTime = ISODate("2021-07-12T16:00:00Z");
+
+const testDB = getTestDB();
+
+const docs = [
+ {_id: 1, [timeFieldName]: dateTime, [metaFieldName]: 1},
+ {_id: 2, [timeFieldName]: dateTime, [metaFieldName]: 1},
+ {_id: 3, [timeFieldName]: dateTime, [metaFieldName]: 2},
+ {_id: 4, [timeFieldName]: dateTime, [metaFieldName]: 2},
+];
+
+function testUpdateExplain({
+ singleUpdateOp,
+ expectedUpdateStageName,
+ expectedOpType = null,
+ expectedBucketFilter,
+ expectedResidualFilter = null,
+ expectedNumUpdated,
+ expectedNumMatched = expectedNumUpdated,
+ expectedNumUpserted = 0,
+ expectedNumUnpacked = null,
+ expectedUsedIndexName = null
+}) {
+ assert(expectedUpdateStageName === "TS_MODIFY" || expectedUpdateStageName === "UPDATE");
+
+ // Prepares a timeseries collection.
+ const collName = getCallerName();
+ const coll = testDB.getCollection(collName);
+ prepareCollection({collName, initialDocList: docs});
+
+ // Creates an index same as the one in the hint so as to verify that the index hint is honored.
+ if (singleUpdateOp.hasOwnProperty("hint")) {
+ assert.commandWorked(coll.createIndex(singleUpdateOp.hint));
+ }
+
+ // Verifies the TS_MODIFY stage in the plan.
+ const innerUpdateCommand = {update: coll.getName(), updates: [singleUpdateOp]};
+ const updateExplainPlanCommand = {explain: innerUpdateCommand, verbosity: "queryPlanner"};
+ let explain = assert.commandWorked(testDB.runCommand(updateExplainPlanCommand));
+ const updateStage = getPlanStage(explain.queryPlanner.winningPlan, expectedUpdateStageName);
+ assert.neq(null,
+ updateStage,
+ `${expectedUpdateStageName} stage not found in the plan: ${tojson(explain)}`);
+ if (expectedUpdateStageName === "TS_MODIFY") {
+ assert.eq(expectedOpType,
+ updateStage.opType,
+ `TS_MODIFY opType is wrong: ${tojson(updateStage)}`);
+ assert.eq(expectedBucketFilter,
+ updateStage.bucketFilter,
+ `TS_MODIFY bucketFilter is wrong: ${tojson(updateStage)}`);
+ assert.eq(expectedResidualFilter,
+ updateStage.residualFilter,
+ `TS_MODIFY residualFilter is wrong: ${tojson(updateStage)}`);
+ } else {
+ const collScanStage = getPlanStage(explain.queryPlanner.winningPlan, "COLLSCAN");
+ assert.neq(null, collScanStage, `COLLSCAN stage not found in the plan: ${tojson(explain)}`);
+ assert.eq(expectedBucketFilter,
+ collScanStage.filter,
+ `COLLSCAN filter is wrong: ${tojson(collScanStage)}`);
+ }
+
+ if (expectedUsedIndexName) {
+ const ixscanStage = getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN");
+ assert.eq(expectedUsedIndexName,
+ ixscanStage.indexName,
+ `Wrong index used: ${tojson(ixscanStage)}`);
+ }
+
+ // Verifies the TS_MODIFY stage in the execution stats.
+ const updateExplainStatsCommand = {explain: innerUpdateCommand, verbosity: "executionStats"};
+ explain = assert.commandWorked(testDB.runCommand(updateExplainStatsCommand));
+ const execStages = getExecutionStages(explain);
+ assert.gt(execStages.length, 0, `No execution stages found: ${tojson(explain)}`);
+ assert.eq(expectedUpdateStageName,
+ execStages[0].stage,
+ `TS_MODIFY stage not found in executionStages: ${tojson(explain)}`);
+ if (expectedUpdateStageName === "TS_MODIFY") {
+ assert.eq(expectedNumUpdated,
+ execStages[0].nMeasurementsUpdated,
+ `Got wrong nMeasurementsUpdated: ${tojson(execStages[0])}`);
+ assert.eq(expectedNumMatched,
+ execStages[0].nMeasurementsMatched,
+ `Got wrong nMeasurementsMatched: ${tojson(execStages[0])}`);
+ assert.eq(expectedNumUpserted,
+ execStages[0].nMeasurementsUpserted,
+ `Got wrong nMeasurementsUpserted: ${tojson(execStages[0])}`);
+ assert.eq(expectedNumUnpacked,
+ execStages[0].nBucketsUnpacked,
+ `Got wrong nBucketsUnpacked: ${tojson(execStages[0])}`);
+ } else {
+ assert.eq(expectedNumUpdated,
+ execStages[0].nWouldModify,
+ `Got wrong nWouldModify: ${tojson(execStages[0])}`);
+ assert.eq(expectedNumMatched,
+ execStages[0].nMatched,
+ `Got wrong nMatched: ${tojson(execStages[0])}`);
+ }
+
+ assert.sameMembers(
+ docs, coll.find().toArray(), "Explain command must not touch documents in the collection");
+}
+
+(function testUpdateManyWithEmptyQuery() {
+ testUpdateExplain({
+ singleUpdateOp: {
+ q: {},
+ u: {$set: {[metaFieldName]: 3}},
+ multi: true,
+ },
+ // If the update query is empty, we should use the UPDATE plan.
+ expectedUpdateStageName: "UPDATE",
+ expectedOpType: "updateMany",
+ expectedBucketFilter: makeBucketFilter(),
+ expectedNumUpdated: 4,
+ });
+})();
+
+(function testUpdateManyWithBucketFilter() {
+ testUpdateExplain({
+ singleUpdateOp: {
+ // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so
+ // 'expectedNumUnpacked' is exactly 1.
+ q: {[metaFieldName]: 2, _id: {$gte: 3}},
+ u: {$set: {[metaFieldName]: 2}},
+ multi: true,
+ },
+ expectedUpdateStageName: "TS_MODIFY",
+ expectedOpType: "updateMany",
+ expectedBucketFilter:
+ makeBucketFilter({meta: {$eq: 2}}, {"control.max._id": {$_internalExprGte: 3}}),
+ expectedResidualFilter: {_id: {$gte: 3}},
+ expectedNumUpdated: 0,
+ expectedNumMatched: 2,
+ expectedNumUnpacked: 1
+ });
+})();
+
+(function testUpdateManyWithBucketFilterAndIndexHint() {
+ testUpdateExplain({
+ singleUpdateOp: {
+ // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so
+ // 'expectedNumUnpacked' is exactly 1.
+ q: {[metaFieldName]: 2, _id: 3},
+ u: {$set: {[metaFieldName]: 3}},
+ multi: true,
+ hint: {[metaFieldName]: 1}
+ },
+ expectedUpdateStageName: "TS_MODIFY",
+ expectedOpType: "updateMany",
+ expectedBucketFilter: makeBucketFilter({meta: {$eq: 2}}, {
+ $and: [
+ {"control.min._id": {$_internalExprLte: 3}},
+ {"control.max._id": {$_internalExprGte: 3}}
+ ]
+ }),
+ expectedResidualFilter: {_id: {$eq: 3}},
+ expectedNumUpdated: 1,
+ expectedNumUnpacked: 1,
+ expectedUsedIndexName: metaFieldName + "_1"
+ });
+})();
+
+(function testUpsert() {
+ testUpdateExplain({
+ singleUpdateOp: {
+ q: {[metaFieldName]: 100},
+ u: {$set: {[timeFieldName]: dateTime}},
+ multi: true,
+ upsert: true,
+ },
+ expectedUpdateStageName: "TS_MODIFY",
+ expectedOpType: "updateMany",
+ expectedBucketFilter: makeBucketFilter({meta: {$eq: 100}}),
+ expectedResidualFilter: {},
+ expectedNumUpdated: 0,
+ expectedNumMatched: 0,
+ expectedNumUnpacked: 0,
+ expectedNumUpserted: 1,
+ });
+})();
+
+(function testUpsertNoop() {
+ testUpdateExplain({
+ singleUpdateOp: {
+ q: {[metaFieldName]: 1},
+ u: {$set: {f: 10}},
+ multi: true,
+ upsert: true,
+ },
+ expectedUpdateStageName: "TS_MODIFY",
+ expectedOpType: "updateMany",
+ expectedBucketFilter: makeBucketFilter({meta: {$eq: 1}}),
+ expectedResidualFilter: {},
+ expectedNumUpdated: 2,
+ expectedNumMatched: 2,
+ expectedNumUnpacked: 1,
+ expectedNumUpserted: 0,
+ });
+})();
+
+// TODO SERVER-73726 Reevaluate whether this exclusion is needed.
+if (FeatureFlagUtil.isPresentAndEnabled(db, "UpdateOneWithoutShardKey")) {
+ (function testUpdateOneWithEmptyBucketFilter() {
+ testUpdateExplain({
+ singleUpdateOp: {
+ // The non-meta field filter leads to a COLLSCAN below the TS_MODIFY stage and so
+ // 'expectedNumUnpacked' is 2.
+ q: {_id: 3},
+ u: {$set: {[metaFieldName]: 3}},
+ multi: false,
+ },
+ expectedUpdateStageName: "TS_MODIFY",
+ expectedOpType: "updateOne",
+ expectedBucketFilter: makeBucketFilter({
+ $and: [
+ {"control.min._id": {$_internalExprLte: 3}},
+ {"control.max._id": {$_internalExprGte: 3}}
+ ]
+ }),
+ expectedResidualFilter: {_id: {$eq: 3}},
+ expectedNumUpdated: 1,
+ expectedNumUnpacked: 1
+ });
+ })();
+
+ (function testUpdateOneWithBucketFilter() {
+ testUpdateExplain({
+ singleUpdateOp: {
+ // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so
+ // 'expectedNumUnpacked' is exactly 1.
+ q: {[metaFieldName]: 2, _id: {$gte: 1}},
+ u: {$set: {[metaFieldName]: 3}},
+ multi: false,
+ },
+ expectedUpdateStageName: "TS_MODIFY",
+ expectedOpType: "updateOne",
+ expectedBucketFilter:
+ makeBucketFilter({meta: {$eq: 2}}, {"control.max._id": {$_internalExprGte: 1}}),
+ expectedResidualFilter: {_id: {$gte: 1}},
+ expectedNumUpdated: 1,
+ expectedNumUnpacked: 1
+ });
+ })();
+
+ (function testUpdateOneWithBucketFilterAndIndexHint() {
+ testUpdateExplain({
+ singleUpdateOp: {
+ // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so
+ // 'expectedNumUnpacked' is exactly 1.
+ q: {[metaFieldName]: 2, _id: {$gte: 1}},
+ u: {$set: {[metaFieldName]: 3}},
+ multi: false,
+ hint: {[metaFieldName]: 1}
+ },
+ expectedUpdateStageName: "TS_MODIFY",
+ expectedOpType: "updateOne",
+ expectedBucketFilter:
+ makeBucketFilter({meta: {$eq: 2}}, {"control.max._id": {$_internalExprGte: 1}}),
+ expectedResidualFilter: {_id: {$gte: 1}},
+ expectedNumUpdated: 1,
+ expectedNumUnpacked: 1,
+ expectedUsedIndexName: metaFieldName + "_1"
+ });
+ })();
+}
diff --git a/jstests/core/timeseries/timeseries_field_parsed_as_bson.js b/jstests/core/timeseries/timeseries_field_parsed_as_bson.js
new file mode 100644
index 0000000000000..6e4d582825bc0
--- /dev/null
+++ b/jstests/core/timeseries/timeseries_field_parsed_as_bson.js
@@ -0,0 +1,68 @@
+/**
+ * Tests that timeseries timeField is parsed as bson.
+ *
+ * @tags: [
+ * # We need a timeseries collection.
+ * requires_timeseries,
+ * requires_fcv_71,
+ * ]
+ */
+
+(function() {
+'use strict';
+
+const collName = "timeseries_field_parsed_as_bson";
+const coll = db.getCollection(collName);
+
+coll.drop();
+const timeField = "badInput']}}}}}}";
+assert.commandWorked(db.createCollection(collName, {timeseries: {timeField: timeField}}));
+
+const timeseriesCollInfo = db.getCollectionInfos({name: "system.buckets." + collName})[0];
+jsTestLog("Timeseries system collection info: " + tojson(timeseriesCollInfo));
+const properties = {};
+properties[timeField] = {
+ "bsonType": "date"
+};
+const expectedValidator = {
+ "$jsonSchema": {
+ "bsonType": "object",
+ "required": ["_id", "control", "data"],
+ "properties": {
+ "_id": {"bsonType": "objectId"},
+ "control": {
+ "bsonType": "object",
+ "required": ["version", "min", "max"],
+ "properties": {
+ "version": {"bsonType": "number"},
+ "min":
+ {"bsonType": "object", "required": [timeField], "properties": properties},
+ "max":
+ {"bsonType": "object", "required": [timeField], "properties": properties},
+ "closed": {"bsonType": "bool"},
+ "count": {"bsonType": "number", "minimum": 1}
+ },
+ "additionalProperties": false
+ },
+ "data": {"bsonType": "object"},
+ "meta": {}
+ },
+ "additionalProperties": false
+ }
+};
+
+assert(timeseriesCollInfo.options);
+assert.eq(timeseriesCollInfo.options.validator, expectedValidator);
+
+const doc = {
+ a: 1,
+ [timeField]: new Date("2021-01-01")
+};
+assert.commandWorked(coll.insert(doc));
+assert.docEq([doc], coll.aggregate([{$match: {}}, {$project: {_id: 0}}]).toArray());
+
+coll.drop();
+assert.commandWorked(db.createCollection(collName, {timeseries: {timeField: "\\"}}));
+coll.drop();
+assert.commandWorked(db.createCollection(collName, {timeseries: {timeField: "\\\\"}}));
+})();
diff --git a/jstests/core/timeseries/timeseries_find_and_modify_remove.js b/jstests/core/timeseries/timeseries_find_and_modify_remove.js
new file mode 100644
index 0000000000000..6d334b2214a2e
--- /dev/null
+++ b/jstests/core/timeseries/timeseries_find_and_modify_remove.js
@@ -0,0 +1,223 @@
+/**
+ * Tests findAndModify with remove: true on a timeseries collection.
+ *
+ * @tags: [
+ * # We need a timeseries collection.
+ * requires_timeseries,
+ * # findAndModify with remove: true on a timeseries collection is supported since 7.1
+ * requires_fcv_71,
+ * # TODO SERVER-76583: Remove following two tags.
+ * does_not_support_retryable_writes,
+ * requires_non_retryable_writes,
+ * ]
+ */
+
+import {
+ doc1_a_nofields,
+ doc2_a_f101,
+ doc3_a_f102,
+ doc4_b_f103,
+ doc5_b_f104,
+ doc6_c_f105,
+ doc7_c_f106,
+ makeBucketFilter,
+ metaFieldName,
+ testFindOneAndRemove,
+ timeFieldName
+} from "jstests/core/timeseries/libs/timeseries_writes_util.js";
+
+// findAndModify with a sort option is not supported.
+(function testSortOptionFails() {
+ testFindOneAndRemove({
+ initialDocList: [doc1_a_nofields, doc4_b_f103, doc6_c_f105],
+ cmd: {filter: {f: {$gt: 100}}, sort: {f: 1}},
+ res: {errorCode: ErrorCodes.InvalidOptions},
+ });
+})();
+
+// Query on the 'f' field leads to zero measurement delete.
+(function testZeroMeasurementDelete() {
+ testFindOneAndRemove({
+ initialDocList: [doc1_a_nofields, doc4_b_f103, doc6_c_f105],
+ cmd: {filter: {f: 17}},
+ res: {
+ expectedDocList: [doc1_a_nofields, doc4_b_f103, doc6_c_f105],
+ nDeleted: 0,
+ bucketFilter: makeBucketFilter({
+ $and: [
+ {"control.min.f": {$_internalExprLte: 17}},
+ {"control.max.f": {$_internalExprGte: 17}},
+ ]
+ }),
+ residualFilter: {f: {$eq: 17}},
+ nBucketsUnpacked: 0,
+ nReturned: 0,
+ },
+ });
+})();
+
+// Query on the 'f' field leads to a partial bucket delete.
+(function testPartialBucketDelete() {
+ testFindOneAndRemove({
+ initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102],
+ cmd: {filter: {f: 101}},
+ res:
+ {expectedDocList: [doc1_a_nofields, doc3_a_f102], nDeleted: 1, deletedDoc: doc2_a_f101},
+ });
+})();
+
+// Query on the 'f' field leads to a partial bucket delete and 'fields' project the returned doc.
+(function testPartialBucketDeleteWithFields() {
+ testFindOneAndRemove({
+ initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102],
+ cmd: {filter: {f: 102}, fields: {f: 1, [metaFieldName]: 1, _id: 0}},
+ res: {
+ expectedDocList: [doc1_a_nofields, doc2_a_f101],
+ nDeleted: 1,
+ deletedDoc: {f: 102, [metaFieldName]: "A"},
+ rootStage: "PROJECTION_DEFAULT",
+ bucketFilter: makeBucketFilter({
+ $and: [
+ {"control.min.f": {$_internalExprLte: 102}},
+ {"control.max.f": {$_internalExprGte: 102}},
+ ]
+ }),
+ residualFilter: {f: {$eq: 102}},
+ nBucketsUnpacked: 1,
+ nReturned: 1,
+ },
+ });
+})();
+
+// Query on the 'f' field leads to a full (single document) bucket delete.
+(function testFullBucketDelete() {
+ testFindOneAndRemove({
+ initialDocList: [doc2_a_f101],
+ cmd: {filter: {f: 101}},
+ res: {
+ expectedDocList: [],
+ nDeleted: 1,
+ deletedDoc: doc2_a_f101,
+ bucketFilter: makeBucketFilter({
+ $and: [
+ {"control.min.f": {$_internalExprLte: 101}},
+ {"control.max.f": {$_internalExprGte: 101}},
+ ]
+ }),
+ residualFilter: {f: {$eq: 101}},
+ nBucketsUnpacked: 1,
+ nReturned: 1,
+ },
+ });
+})();
+
+// Query on the 'tag' field matches all docs and deletes one.
+(function testMatchFullBucketOnlyDeletesOne() {
+ testFindOneAndRemove({
+ initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102],
+ cmd: {filter: {[metaFieldName]: "A"}},
+ // Don't validate exact results as we could delete any doc.
+ res: {
+ nDeleted: 1,
+ bucketFilter: makeBucketFilter({meta: {$eq: "A"}}),
+ residualFilter: {},
+ nBucketsUnpacked: 1,
+ nReturned: 1,
+ },
+ });
+})();
+
+// Query on the 'tag' and metric field.
+(function testMetaAndMetricFilterOnlyDeletesOne() {
+ testFindOneAndRemove({
+ initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102],
+ cmd: {filter: {[metaFieldName]: "A", f: {$gt: 101}}},
+ res: {
+ nDeleted: 1,
+ deletedDoc: doc3_a_f102,
+ bucketFilter:
+ makeBucketFilter({meta: {$eq: "A"}}, {"control.max.f": {$_internalExprGt: 101}}),
+ residualFilter: {f: {$gt: 101}},
+ nBucketsUnpacked: 1,
+ nReturned: 1,
+ }
+ });
+})();
+
+// Query on the 'f' field matches docs in multiple buckets but only deletes from one.
+(function testMatchMultiBucketOnlyDeletesOne() {
+ testFindOneAndRemove({
+ initialDocList: [
+ doc1_a_nofields,
+ doc2_a_f101,
+ doc3_a_f102,
+ doc4_b_f103,
+ doc5_b_f104,
+ doc6_c_f105,
+ doc7_c_f106
+ ],
+ cmd: {filter: {f: {$gt: 101}}},
+ // Don't validate exact results as we could delete one of a few docs.
+ res: {
+ nDeleted: 1,
+ bucketFilter: makeBucketFilter({"control.max.f": {$_internalExprGt: 101}}),
+ residualFilter: {f: {$gt: 101}},
+ nBucketsUnpacked: 1,
+ nReturned: 1,
+ },
+ });
+})();
+
+// Empty filter matches all docs but only deletes one.
+(function testEmptyFilterOnlyDeletesOne() {
+ testFindOneAndRemove({
+ initialDocList: [
+ doc1_a_nofields,
+ doc2_a_f101,
+ doc3_a_f102,
+ doc4_b_f103,
+ doc5_b_f104,
+ doc6_c_f105,
+ doc7_c_f106
+ ],
+ cmd: {filter: {}},
+ // Don't validate exact results as we could delete any doc.
+ res: {
+ nDeleted: 1,
+ bucketFilter: makeBucketFilter({}),
+ residualFilter: {},
+ nBucketsUnpacked: 1,
+ nReturned: 1
+ },
+ });
+})();
+
+// Verifies that the collation is properly propagated to the bucket-level filter when the
+// query-level collation overrides the collection default collation.
+(function testFindAndRemoveWithCollation() {
+ testFindOneAndRemove({
+ initialDocList: [
+ doc1_a_nofields,
+ doc2_a_f101,
+ doc3_a_f102,
+ doc4_b_f103,
+ doc5_b_f104,
+ doc6_c_f105,
+ doc7_c_f106
+ ],
+ cmd: {
+ filter: {[metaFieldName]: "a", f: {$gt: 101}},
+ /*caseInsensitive collation*/
+ collation: {locale: "en", strength: 2}
+ },
+ res: {
+ nDeleted: 1,
+ deletedDoc: doc3_a_f102,
+ bucketFilter:
+ makeBucketFilter({meta: {$eq: "a"}}, {"control.max.f": {$_internalExprGt: 101}}),
+ residualFilter: {f: {$gt: 101}},
+ nBucketsUnpacked: 1,
+ nReturned: 1,
+ },
+ });
+})();
diff --git a/jstests/core/timeseries/timeseries_find_and_modify_update.js b/jstests/core/timeseries/timeseries_find_and_modify_update.js
new file mode 100644
index 0000000000000..c2c5bb007f9cb
--- /dev/null
+++ b/jstests/core/timeseries/timeseries_find_and_modify_update.js
@@ -0,0 +1,538 @@
+/**
+ * Tests singleton updates on a time-series collection.
+ *
+ * @tags: [
+ * # We need a timeseries collection.
+ * requires_timeseries,
+ * featureFlagTimeseriesUpdatesSupport,
+ * # TODO SERVER-76454 Remove the following two tags.
+ * does_not_support_retryable_writes,
+ * requires_non_retryable_writes,
+ * ]
+ */
+
+import {
+ makeBucketFilter,
+ metaFieldName,
+ testFindOneAndUpdate,
+ timeFieldName
+} from "jstests/core/timeseries/libs/timeseries_writes_util.js";
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+/**
+ * Tests op-style updates.
+ */
+{
+ const doc_m1_a_b =
+ {[timeFieldName]: ISODate("2023-02-06T19:19:01Z"), [metaFieldName]: 1, _id: 1, a: 1, b: 1};
+ const doc_a_b = {[timeFieldName]: ISODate("2023-02-06T19:19:01Z"), _id: 1, a: 1, b: 1};
+ const doc_m1_b =
+ {[timeFieldName]: ISODate("2023-02-06T19:19:01Z"), [metaFieldName]: 1, _id: 1, b: 1};
+ const doc_m2_b =
+ {[timeFieldName]: ISODate("2023-02-06T19:19:01Z"), [metaFieldName]: 2, _id: 1, b: 1};
+ const doc_m1_arrayA_b = {
+ [timeFieldName]: ISODate("2023-02-06T19:19:01Z"),
+ [metaFieldName]: 1,
+ _id: 1,
+ a: ["arr", "ay"],
+ b: 1
+ };
+ const doc_stringM1_a_b = {
+ [timeFieldName]: ISODate("2023-02-06T19:19:01Z"),
+ [metaFieldName]: "1",
+ _id: 1,
+ a: 1,
+ b: 1
+ };
+ const doc_m1_c_d =
+ {[timeFieldName]: ISODate("2023-02-06T19:19:02Z"), [metaFieldName]: 1, _id: 2, c: 1, d: 1};
+ const doc_m1_a_b_later =
+ {[timeFieldName]: ISODate("2023-02-07T19:19:01Z"), [metaFieldName]: 1, _id: 1, a: 1, b: 1};
+ const query_m1_a1 = {a: {$eq: 1}, [metaFieldName]: {$eq: 1}};
+ const query_m1_b1 = {b: {$eq: 1}, [metaFieldName]: {$eq: 1}};
+
+ // Verifies that sort option is rejected.
+ (function testSortOptionFails() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ cmd: {
+ filter: {},
+ update: {$unset: {a: ""}},
+ sort: {_id: 1},
+ },
+ res: {errorCode: ErrorCodes.InvalidOptions},
+ });
+ })();
+
+ // Metric field update: unset field and return the old doc.
+ (function testUnsetMetricField() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ cmd: {
+ filter: query_m1_a1,
+ update: {$unset: {a: ""}},
+ },
+ res: {
+ resultDocList: [doc_m1_b, doc_m1_c_d],
+ returnDoc: doc_m1_a_b,
+ bucketFilter: makeBucketFilter({meta: {$eq: 1}}, {
+ $and: [
+ {"control.min.a": {$_internalExprLte: 1}},
+ {"control.max.a": {$_internalExprGte: 1}}
+ ]
+ }),
+ residualFilter: {a: {$eq: 1}},
+ nBucketsUnpacked: 1,
+ nMatched: 1,
+ nModified: 1,
+ },
+ });
+ })();
+
+ // Metric field update: add new field and return the new doc.
+ (function testAddNewMetricField() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_m1_b, doc_m1_c_d],
+ cmd: {filter: query_m1_b1, update: {$set: {a: 1}}, returnNew: true},
+ res: {
+ resultDocList: [doc_m1_a_b, doc_m1_c_d],
+ returnDoc: doc_m1_a_b,
+ bucketFilter: makeBucketFilter({meta: {$eq: 1}}, {
+ $and: [
+ {"control.min.b": {$_internalExprLte: 1}},
+ {"control.max.b": {$_internalExprGte: 1}}
+ ]
+ }),
+ residualFilter: {b: {$eq: 1}},
+ nBucketsUnpacked: 1,
+ nMatched: 1,
+ nModified: 1,
+ },
+ });
+ })();
+
+ // Metric field update: change field type (integer to array) with 'fields' option.
+ (function testChangeFieldTypeWithFields() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ cmd: {
+ filter: query_m1_a1,
+ update: {$set: {a: ["arr", "ay"]}},
+ fields: {a: 1, b: 1, _id: 0},
+ },
+ res: {resultDocList: [doc_m1_arrayA_b, doc_m1_c_d], returnDoc: {a: 1, b: 1}},
+ });
+ })();
+
+ // Metric field update: no-op with non-existent field to unset.
+ (function testMatchOneNoopUpdate() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ cmd: {
+ filter: query_m1_a1,
+ update: {$unset: {z: ""}},
+ },
+ res: {
+ resultDocList: [doc_m1_a_b, doc_m1_c_d],
+ returnDoc: doc_m1_a_b,
+ },
+ });
+ })();
+
+ // Metric field update: no-op with non-existent field to unset and returnNew.
+ (function testMatchOneNoopUpdateWithReturnNew() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ cmd: {
+ filter: query_m1_a1,
+ update: {$unset: {z: ""}},
+ returnNew: true,
+ },
+ res: {
+ resultDocList: [doc_m1_a_b, doc_m1_c_d],
+ // The return doc is the same as the original doc, since the update is a no-op.
+ returnDoc: doc_m1_a_b,
+ },
+ });
+ })();
+
+ // Metric field update: no-op with non-existent field to unset.
+ (function testMatchMultipleNoopUpdate() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ cmd: {
+ filter: {},
+ update: {$unset: {z: ""}},
+ },
+ res: {
+ resultDocList: [doc_m1_a_b, doc_m1_c_d],
+ returnDoc: doc_m1_a_b,
+ bucketFilter: makeBucketFilter({}),
+ residualFilter: {},
+ nBucketsUnpacked: 1,
+ nMatched: 1,
+ nModified: 0,
+ nUpserted: 0,
+ },
+ });
+ })();
+
+ // Metric field update: match multiple docs, only update one, returning the new doc.
+ (function testMatchMultipleUpdateOne() {
+ const resultDoc = Object.assign({}, doc_a_b, {a: 100});
+ testFindOneAndUpdate({
+ initialDocList: [doc_a_b, doc_m1_a_b_later],
+ cmd: {
+ filter: {},
+ update: {$set: {a: 100}},
+ returnNew: true,
+ },
+ res: {
+ resultDocList: [resultDoc, doc_m1_a_b_later],
+ returnDoc: resultDoc,
+ bucketFilter: makeBucketFilter({}),
+ residualFilter: {},
+ nBucketsUnpacked: 1,
+ nMatched: 1,
+ nModified: 1,
+ },
+ });
+ })();
+
+ // Match and update zero docs.
+ (function testMatchNone() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_a_b, doc_m1_a_b, doc_m1_c_d],
+ cmd: {
+ filter: {[metaFieldName]: {z: "Z"}},
+ update: {$set: {a: 100}},
+ },
+ res: {
+ resultDocList: [doc_a_b, doc_m1_a_b, doc_m1_c_d],
+ bucketFilter: makeBucketFilter({meta: {$eq: {z: "Z"}}}),
+ residualFilter: {},
+ nBucketsUnpacked: 0,
+ nMatched: 0,
+ nModified: 0,
+ nUpserted: 0,
+ },
+ });
+ })();
+
+ // Meta-only update only updates one.
+ (function testMetaOnlyUpdateOne() {
+ const returnDoc = Object.assign({}, doc_m1_a_b, {[metaFieldName]: 2});
+ testFindOneAndUpdate({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ cmd: {
+ filter: {[metaFieldName]: 1},
+ update: {$set: {[metaFieldName]: 2}},
+ returnNew: true,
+ },
+ res: {
+ resultDocList: [doc_m1_c_d, returnDoc],
+ returnDoc: returnDoc,
+ bucketFilter: makeBucketFilter({meta: {$eq: 1}}),
+ residualFilter: {},
+ nBucketsUnpacked: 1,
+ nMatched: 1,
+ nModified: 1,
+ nUpserted: 0,
+ },
+ });
+ })();
+
+ // Meta field update: remove meta field.
+ (function testRemoveMetaField() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ cmd: {
+ filter: query_m1_a1,
+ update: {$unset: {[metaFieldName]: ""}},
+ },
+ res: {resultDocList: [doc_a_b, doc_m1_c_d], returnDoc: doc_m1_a_b},
+ });
+ })();
+
+ // Meta field update: add meta field.
+ (function testAddMetaField() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_a_b],
+ cmd: {
+ filter: {},
+ update: {$set: {[metaFieldName]: 1}},
+ },
+ res: {resultDocList: [doc_m1_a_b], returnDoc: doc_a_b},
+ });
+ })();
+
+ // Meta field update: update meta field.
+ (function testUpdateMetaField() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_m1_b],
+ cmd: {
+ filter: {},
+ update: {$set: {[metaFieldName]: 2}},
+ },
+ res: {resultDocList: [doc_m2_b], returnDoc: doc_m1_b},
+ });
+ })();
+
+ // Meta field update: update meta field to different type (integer to string).
+ (function testChangeMetaFieldType() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ cmd: {
+ filter: query_m1_a1,
+ update: {$set: {[metaFieldName]: "1"}},
+ },
+ res: {resultDocList: [doc_stringM1_a_b, doc_m1_c_d], returnDoc: doc_m1_a_b},
+ });
+ })();
+}
+
+/**
+ * Tests pipeline-style updates.
+ */
+{
+ const timestamp2023 = ISODate("2023-02-06T19:19:00Z");
+ const doc_2023_m1_a1 = {[timeFieldName]: timestamp2023, [metaFieldName]: 1, _id: 1, a: 1};
+ const doc_2023_m2_a1_newField =
+ {[timeFieldName]: timestamp2023, [metaFieldName]: 2, _id: 1, a: 1, "newField": 42};
+
+ // Update metaField and add a new field.
+ (function testPipelineUpdateSetMultipleFields() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_2023_m1_a1],
+ cmd: {
+ filter: {a: {$eq: 1}, [metaFieldName]: {$eq: 1}},
+ update: [
+ {$set: {[metaFieldName]: 2}},
+ {$set: {"newField": 42}},
+ ],
+ },
+ res: {
+ resultDocList: [doc_2023_m2_a1_newField],
+ returnDoc: doc_2023_m1_a1,
+ bucketFilter: makeBucketFilter({meta: {$eq: 1}}, {
+ $and: [
+ {"control.min.a": {$_internalExprLte: 1}},
+ {"control.max.a": {$_internalExprGte: 1}},
+ ]
+ }),
+ residualFilter: {a: {$eq: 1}},
+ nBucketsUnpacked: 1,
+ nMatched: 1,
+ nModified: 1,
+ nUpserted: 0,
+ },
+ });
+ })();
+
+ // Expect removal of the timeField to fail.
+ (function testPipelineRemoveTimeField() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_2023_m1_a1],
+ cmd: {
+ filter: {},
+ update: [{$set: {[metaFieldName]: 2}}, {$unset: timeFieldName}],
+ },
+ res: {
+ errorCode: ErrorCodes.BadValue,
+ resultDocList: [doc_2023_m1_a1],
+ },
+ });
+ })();
+
+ // Expect changing the type of the timeField to fail.
+ (function testPipelineChangeTimeFieldType() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_2023_m1_a1],
+ cmd: {
+ filter: {},
+ update: [{$set: {[timeFieldName]: "string"}}],
+ },
+ res: {
+ errorCode: ErrorCodes.BadValue,
+ resultDocList: [doc_2023_m1_a1],
+ },
+ });
+ })();
+}
+
+/**
+ * Tests full measurement replacement.
+ */
+{
+ const timestamp2023 = ISODate("2023-02-06T19:19:00Z");
+ const doc_t2023_m1_id1_a1 = {[timeFieldName]: timestamp2023, [metaFieldName]: 1, _id: 1, a: 1};
+ const doc_t2023_m2_id2_a2 = {[timeFieldName]: timestamp2023, [metaFieldName]: 2, _id: 2, a: 2};
+ const doc_t2023_m2_noId_a2 = {[timeFieldName]: timestamp2023, [metaFieldName]: 2, a: 2};
+
+ // Full measurement replacement: update every field in the document, including the _id.
+ (function testReplacementUpdateChangeId() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_t2023_m1_id1_a1],
+ cmd: {
+ filter: {},
+ update: doc_t2023_m2_id2_a2,
+ },
+ res: {resultDocList: [doc_t2023_m2_id2_a2], returnDoc: doc_t2023_m1_id1_a1},
+ });
+ })();
+
+ // Full measurement replacement: update every field in the document, except the _id.
+ (function testReplacementUpdateNoId() {
+ const returnDoc = {[timeFieldName]: timestamp2023, [metaFieldName]: 2, a: 2, _id: 1};
+ testFindOneAndUpdate({
+ initialDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2],
+ cmd: {
+ filter: {_id: 1},
+ update: doc_t2023_m2_noId_a2,
+ returnNew: true,
+ },
+ res: {
+ resultDocList: [
+ doc_t2023_m2_id2_a2,
+ returnDoc,
+ ],
+ returnDoc: returnDoc,
+ },
+ });
+ })();
+
+ // Replacement with no time field.
+ (function testReplacementUpdateNoTimeField() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2],
+ cmd: {
+ filter: {_id: 1},
+ update: {[metaFieldName]: 1, a: 1, _id: 10},
+ },
+ res: {
+ errorCode: ErrorCodes.BadValue,
+ resultDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2],
+ }
+ });
+ })();
+
+ // Replacement with time field of the wrong type.
+ (function testReplacementUpdateWrongTypeTimeField() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2],
+ cmd: {
+ filter: {_id: 1},
+ update: {[metaFieldName]: 1, a: 1, _id: 10, [timeFieldName]: "string"},
+ },
+ res: {
+ errorCode: ErrorCodes.BadValue,
+ resultDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2],
+ },
+ });
+ })();
+
+ // Replacement that results in two duplicate measurements.
+ (function testReplacementUpdateDuplicateIds() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2],
+ cmd: {
+ filter: {_id: 1},
+ update: doc_t2023_m2_id2_a2,
+ },
+ res: {
+ resultDocList: [doc_t2023_m2_id2_a2, doc_t2023_m2_id2_a2],
+ returnDoc: doc_t2023_m1_id1_a1,
+ },
+ });
+ })();
+
+ // Replacement that only references the meta field. Still fails because of the missing time
+ // field.
+ (function testReplacementMetaOnly() {
+ testFindOneAndUpdate({
+ initialDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2],
+ cmd: {
+ filter: {[metaFieldName]: 1},
+ update: {[metaFieldName]: 3},
+ },
+ res: {
+ errorCode: ErrorCodes.BadValue,
+ resultDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2],
+ },
+ });
+ })();
+
+ // Tests upsert with full measurement & returnNew = false.
+ (function testUpsert() {
+ if (FixtureHelpers.isMongos(db)) {
+ jsTestLog("Skipping findAndModify upsert test on sharded cluster.");
+ return;
+ }
+
+ testFindOneAndUpdate({
+ initialDocList: [doc_t2023_m1_id1_a1],
+ cmd: {
+ filter: {[metaFieldName]: {$eq: 2}},
+ update: doc_t2023_m2_id2_a2,
+ // returnNew defaults to false.
+ upsert: true,
+ },
+ res: {
+ resultDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2],
+ returnDoc: null,
+ bucketFilter: makeBucketFilter({meta: {$eq: 2}}),
+ residualFilter: {},
+ nBucketsUnpacked: 0,
+ nMatched: 0,
+ nModified: 0,
+ nUpserted: 1,
+ },
+ });
+ })();
+
+ // Tests upsert with full measurement & returnNew = true.
+ (function testUpsertWithReturnNew() {
+ if (FixtureHelpers.isMongos(db)) {
+ jsTestLog("Skipping findAndModify upsert test on sharded cluster.");
+ return;
+ }
+
+ testFindOneAndUpdate({
+ initialDocList: [doc_t2023_m1_id1_a1],
+ cmd: {
+ filter: {[metaFieldName]: {$eq: 2}},
+ update: doc_t2023_m2_id2_a2,
+ returnNew: true,
+ upsert: true,
+ },
+ res: {
+ resultDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2],
+ returnDoc: doc_t2023_m2_id2_a2,
+ bucketFilter: makeBucketFilter({meta: {$eq: 2}}),
+ residualFilter: {},
+ nBucketsUnpacked: 0,
+ nMatched: 0,
+ nModified: 0,
+ nUpserted: 1,
+ },
+ });
+ })();
+
+ // Tests upsert with full measurement: no-op when the query matches but update is a no-op.
+ (function testNoopUpsert() {
+ if (FixtureHelpers.isMongos(db)) {
+ jsTestLog("Skipping findAndModify upsert test on sharded cluster.");
+ return;
+ }
+
+ testFindOneAndUpdate({
+ initialDocList: [doc_t2023_m1_id1_a1],
+ cmd: {filter: {}, update: {$unset: {z: ""}}, upsert: true},
+ res: {
+ resultDocList: [doc_t2023_m1_id1_a1],
+ returnDoc: doc_t2023_m1_id1_a1,
+ nUpserted: 0,
+ },
+ });
+ })();
+}
diff --git a/jstests/core/timeseries/timeseries_geonear_edge_case_measurements.js b/jstests/core/timeseries/timeseries_geonear_edge_case_measurements.js
index b68ce61349956..1e01661f40a66 100644
--- a/jstests/core/timeseries/timeseries_geonear_edge_case_measurements.js
+++ b/jstests/core/timeseries/timeseries_geonear_edge_case_measurements.js
@@ -9,16 +9,13 @@
* ]
*/
-(function() {
-"use strict";
-
load("jstests/core/timeseries/libs/geo.js");
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
if (!TimeseriesTest.timeseriesMetricIndexesEnabled(db.getMongo())) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled.");
- return;
+ quit();
}
Random.setRandomSeed(7813223789272959000);
@@ -178,5 +175,4 @@ for (const minOrMax of ['maxDistance', 'minDistance']) {
// Make sure the time-series results match.
const tsResult = tsColl.aggregate(pipeline).toArray();
assert.sameMembers(result, tsResult);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_geonear_measurements.js b/jstests/core/timeseries/timeseries_geonear_measurements.js
index a79b7ed98095d..fa8683b60757b 100644
--- a/jstests/core/timeseries/timeseries_geonear_measurements.js
+++ b/jstests/core/timeseries/timeseries_geonear_measurements.js
@@ -22,16 +22,13 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/feature_flag_util.js");
+import {aggPlanHasStage, getAggPlanStage} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled.");
- return;
+ quit();
}
Random.setRandomSeed();
@@ -655,4 +652,3 @@ function runExamples(coll, isTimeseries, has2dsphereIndex) {
insertTestData(coll);
runExamples(coll, true /* isTimeseries */, true /* has2dsphereIndex */);
}
-})();
diff --git a/jstests/core/timeseries/timeseries_geonear_random_measurements.js b/jstests/core/timeseries/timeseries_geonear_random_measurements.js
index 3f1d59f4c6cca..a38d577fffef9 100644
--- a/jstests/core/timeseries/timeseries_geonear_random_measurements.js
+++ b/jstests/core/timeseries/timeseries_geonear_random_measurements.js
@@ -9,16 +9,13 @@
* ]
*/
-(function() {
-"use strict";
-
load("jstests/core/timeseries/libs/geo.js");
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
if (!TimeseriesTest.timeseriesMetricIndexesEnabled(db.getMongo())) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled.");
- return;
+ quit();
}
Random.setRandomSeed();
@@ -116,5 +113,4 @@ for (const doc of docs) {
assertSortedAscending(result.map(d => d.dist));
assertSortedAscending(tsResult.map(d => d.dist));
print('Got ' + result.length + ' results');
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_graph_lookup.js b/jstests/core/timeseries/timeseries_graph_lookup.js
index 6b71e56ea9bea..bf43190ae9c29 100644
--- a/jstests/core/timeseries/timeseries_graph_lookup.js
+++ b/jstests/core/timeseries/timeseries_graph_lookup.js
@@ -4,12 +4,10 @@
* @tags: [
* # We need a timeseries collection.
* requires_timeseries,
+ * references_foreign_collection,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const testDB = db.getSiblingDB(jsTestName());
@@ -148,5 +146,4 @@ TimeseriesTest.run((insert) => {
collAOption = timeseriesCollOption;
collBOption = timeseriesCollOption;
testFunc(collAOption, collBOption);
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_groupby_reorder.js b/jstests/core/timeseries/timeseries_groupby_reorder.js
index c4c34bc12492e..1948810a9d483 100644
--- a/jstests/core/timeseries/timeseries_groupby_reorder.js
+++ b/jstests/core/timeseries/timeseries_groupby_reorder.js
@@ -8,11 +8,7 @@
* requires_fcv_61,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/fixture_helpers.js");
-load("jstests/core/timeseries/libs/timeseries.js");
const coll = db.timeseries_groupby_reorder;
coll.drop();
@@ -26,7 +22,7 @@ assert.commandWorked(coll.insert({_id: 0, t: t, b: 2, c: 2}));
assert.commandWorked(coll.insert({_id: 0, t: t, b: 3, c: 3}));
// Test reordering the groupby and internal unpack buckets.
-if (!isMongos(db)) {
+if (!FixtureHelpers.isMongos(db)) {
const res = coll.explain("queryPlanner").aggregate([
{$group: {_id: '$meta', accmin: {$min: '$b'}, accmax: {$max: '$c'}}}
]);
@@ -51,5 +47,4 @@ res = coll.aggregate([{
}
}])
.toArray();
-assert.docEq([{"_id": null, "accmin": 2, "accmax": 6}], res);
-})();
+assert.docEq([{"_id": null, "accmin": 2, "accmax": 6}], res);
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_hint.js b/jstests/core/timeseries/timeseries_hint.js
index 72a3d698a54d9..3c23a0407af35 100644
--- a/jstests/core/timeseries/timeseries_hint.js
+++ b/jstests/core/timeseries/timeseries_hint.js
@@ -13,11 +13,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/analyze_plan.js");
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
const coll = db.timeseries_hint;
coll.drop();
@@ -88,5 +84,4 @@ runTest({
},
expectedResult: docsDesc,
expectedDirection: 'backward',
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_id_range.js b/jstests/core/timeseries/timeseries_id_range.js
index 6b0048d52df9e..9917a492108be 100644
--- a/jstests/core/timeseries/timeseries_id_range.js
+++ b/jstests/core/timeseries/timeseries_id_range.js
@@ -15,11 +15,8 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js');
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {getAggPlanStage, getPlanStage} from "jstests/libs/analyze_plan.js";
TimeseriesTest.run((insert) => {
// These dates will all be inserted into individual buckets.
@@ -239,5 +236,4 @@ TimeseriesTest.run((insert) => {
expl = coll.explain("executionStats").aggregate(pipeline);
assert.eq(3, expl.stages[0].$cursor.executionStats.totalDocsExamined);
})();
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_index.js b/jstests/core/timeseries/timeseries_index.js
index 4a2a8b3f027a3..81da8341746c6 100644
--- a/jstests/core/timeseries/timeseries_index.js
+++ b/jstests/core/timeseries/timeseries_index.js
@@ -9,11 +9,8 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/feature_flag_util.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load("jstests/libs/fixture_helpers.js");
TimeseriesTest.run((insert) => {
@@ -309,4 +306,3 @@ TimeseriesTest.run((insert) => {
testCreateIndexFailed({$natural: -1});
testCreateIndexFailed({$hint: 'my_index_name'});
});
-})();
diff --git a/jstests/core/timeseries/timeseries_index_build_failure.js b/jstests/core/timeseries/timeseries_index_build_failure.js
index 8b6775b597be2..1a5929e42ef5e 100644
--- a/jstests/core/timeseries/timeseries_index_build_failure.js
+++ b/jstests/core/timeseries/timeseries_index_build_failure.js
@@ -10,10 +10,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const coll = db.timeseries_index_skipped_record_tracker;
@@ -33,5 +30,4 @@ TimeseriesTest.run((insert) => {
const bucketColl = db.getCollection("system.buckets." + coll.getName());
assert.commandFailedWithCode(bucketColl.createIndex({"control.min.time": "2dsphere"}), 16755);
-});
-}());
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_index_collation.js b/jstests/core/timeseries/timeseries_index_collation.js
index ae2a126889ae6..9791b5996b629 100644
--- a/jstests/core/timeseries/timeseries_index_collation.js
+++ b/jstests/core/timeseries/timeseries_index_collation.js
@@ -9,10 +9,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const timeFieldName = 'tm';
@@ -99,5 +96,4 @@ TimeseriesTest.run((insert) => {
assert.eq(false,
indexSpecsString[0].collation.numericOrdering,
'Invalid index spec for index_string: ' + tojson(indexSpecsString[0]));
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_index_partial.js b/jstests/core/timeseries/timeseries_index_partial.js
index dd2affb71ea84..001472eb38bbe 100644
--- a/jstests/core/timeseries/timeseries_index_partial.js
+++ b/jstests/core/timeseries/timeseries_index_partial.js
@@ -2,8 +2,6 @@
* Test creating and using partial indexes, on a time-series collection.
*
* @tags: [
- * # TODO (SERVER-73316): remove
- * assumes_against_mongod_not_mongos,
* # Explain of a resolved view must be executed by mongos.
* directly_against_shardsvrs_incompatible,
* # Refusing to run a test that issues an aggregation command with explain because it may return
@@ -13,46 +11,58 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/feature_flag_util.js");
+import {
+ getAggPlanStage,
+ getPlanStages,
+ getRejectedPlan,
+ getRejectedPlans
+} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled.");
- return;
+ quit();
}
const coll = db.timeseries_index_partial;
const timeField = 'time';
const metaField = 'm';
-
-coll.drop();
-assert.commandWorked(db.createCollection(coll.getName(), {timeseries: {timeField, metaField}}));
-
-const buckets = db.getCollection('system.buckets.' + coll.getName());
let extraIndexes = [];
let extraBucketIndexes = [];
-if (FixtureHelpers.isSharded(buckets)) {
- // If the collection is sharded, expect an implicitly-created index on time.
- // It will appear differently in listIndexes depending on whether you look at the time-series
- // collection or the buckets collection.
- extraIndexes.push({
- "v": 2,
- "key": {"time": 1},
- "name": "control.min.time_1",
- });
- extraBucketIndexes.push({
- "v": 2,
- "key": {"control.min.time": 1},
- "name": "control.min.time_1",
- });
-}
+let buckets = [];
-// TODO SERVER-66438: Remove feature flag check.
-if (FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesScalabilityImprovements")) {
+function resetCollection(collation) {
+ coll.drop();
+ extraIndexes = [];
+ extraBucketIndexes = [];
+
+ if (collation) {
+ assert.commandWorked(db.createCollection(coll.getName(), {
+ timeseries: {timeField, metaField},
+ collation: collation,
+ }));
+ } else {
+ assert.commandWorked(
+ db.createCollection(coll.getName(), {timeseries: {timeField, metaField}}));
+ }
+ buckets = db.getCollection('system.buckets.' + coll.getName());
+ // If the collection is sharded, expect an implicitly-created index on time. It will appear
+ // differently in listIndexes depending on whether you look at the time-series collection or
+ // the buckets collection.
+ // TODO SERVER-77112 fix this logic once this issue is fixed.
+ if (FixtureHelpers.isSharded(buckets)) {
+ extraIndexes.push({
+ "v": 2,
+ "key": {"time": 1},
+ "name": "control.min.time_1",
+ });
+ extraBucketIndexes.push({
+ "v": 2,
+ "key": {"control.min.time": 1},
+ "name": "control.min.time_1",
+ });
+ }
// When enabled, the {meta: 1, time: 1} index gets built by default on the time-series
// bucket collection.
extraIndexes.push({
@@ -67,6 +77,8 @@ if (FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesScalabilityImprovements")
});
}
+resetCollection();
+
assert.sameMembers(coll.getIndexes(), extraIndexes);
assert.sameMembers(buckets.getIndexes(), extraBucketIndexes);
@@ -109,10 +121,16 @@ assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression:
// If scan is not present, check rejected plans
if (scan === null) {
const rejectedPlans = getRejectedPlans(getAggPlanStage(explain, "$cursor")["$cursor"]);
- if (rejectedPlans.length === 1) {
- const scans = getPlanStages(getRejectedPlan(rejectedPlans[0]), "IXSCAN");
- if (scans.length === 1) {
- scan = scans[0];
+ if (rejectedPlans.length === 2) {
+ let firstScan = getPlanStages(getRejectedPlan(rejectedPlans[0]), "IXSCAN");
+ let secondScan = getPlanStages(getRejectedPlan(rejectedPlans[1]), "IXSCAN");
+ // Both plans should have an "IXSCAN" stage and one stage should scan the index on
+ // the 'a' field.
+ if (firstScan.length === 1 && secondScan.length === 1) {
+ scan = firstScan[0];
+ if (secondScan[0]["indexName"] == "a_1") {
+ scan = secondScan[0];
+ }
}
}
} else {
@@ -129,7 +147,7 @@ assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression:
const result = coll.aggregate([{$match: predicate}], {hint: {a: 1}}).toArray();
const unindexed =
coll.aggregate([{$_internalInhibitOptimization: {}}, {$match: predicate}]).toArray();
- assert.docEq(result, unindexed);
+ assert.sameMembers(result, unindexed);
}
function checkPlanAndResults(predicate) {
checkPlan(predicate);
@@ -165,6 +183,12 @@ assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression:
// Test some predicates on the time field.
{
+ // TODO SERVER-77112 we can change this to assert.commandWorkedOrFailed, since the indexes
+ // made by 'createIndex' should be identical to the implicit index made by
+ // 'shardCollection'.
+ if (!FixtureHelpers.isSharded(buckets)) {
+ assert.commandWorked(coll.createIndex({[timeField]: 1}));
+ }
const t0 = ISODate('2000-01-01T00:00:00Z');
const t1 = ISODate('2000-01-01T00:00:01Z');
const t2 = ISODate('2000-01-01T00:00:02Z');
@@ -192,6 +216,11 @@ assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression:
coll.createIndex({a: 1}, {partialFilterExpression: {[timeField]: {$gte: t1}}}));
check({a: {$lt: 999}, [timeField]: {$gte: t1}});
check({a: {$lt: 999}, [timeField]: {$gte: t2}});
+
+ // Drop the index, so it doesn't interfere with other tests.
+ if (!FixtureHelpers.isSharded(buckets)) {
+ assert.commandWorked(coll.dropIndex({[timeField]: 1}));
+ }
}
assert.commandWorked(coll.dropIndex({a: 1}));
@@ -253,12 +282,9 @@ assert.sameMembers(buckets.getIndexes(), extraBucketIndexes.concat([
// Test how partialFilterExpression interacts with collation.
{
+ // Recreate the collection with a collation.
const numericCollation = {locale: "en_US", numericOrdering: true};
- coll.drop();
- assert.commandWorked(db.createCollection(coll.getName(), {
- timeseries: {timeField, metaField},
- collation: numericCollation,
- }));
+ resetCollection(numericCollation);
assert.commandWorked(coll.insert([
{[timeField]: ISODate(), [metaField]: {x: "1000", y: 1}, a: "120"},
@@ -601,4 +627,3 @@ assert.sameMembers(buckets.getIndexes(), extraBucketIndexes.concat([
}
});
}
-})();
diff --git a/jstests/core/timeseries/timeseries_index_spec.js b/jstests/core/timeseries/timeseries_index_spec.js
index d2ac8e58687de..8ca6b5f06b104 100644
--- a/jstests/core/timeseries/timeseries_index_spec.js
+++ b/jstests/core/timeseries/timeseries_index_spec.js
@@ -9,11 +9,8 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/feature_flag_util.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
TimeseriesTest.run(() => {
const collName = "timeseries_index_spec";
@@ -137,4 +134,3 @@ TimeseriesTest.run(() => {
assert(foundIndex);
}
});
-}());
diff --git a/jstests/core/timeseries/timeseries_index_stats.js b/jstests/core/timeseries/timeseries_index_stats.js
index 5ac1b4c10d4e8..d191ec24fa020 100644
--- a/jstests/core/timeseries/timeseries_index_stats.js
+++ b/jstests/core/timeseries/timeseries_index_stats.js
@@ -13,10 +13,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
load("jstests/libs/fixture_helpers.js"); // For isSharded.
TimeseriesTest.run((insert) => {
@@ -97,4 +94,3 @@ TimeseriesTest.run((insert) => {
assert.sameMembers(
Object.keys(indexKeys), multiStageDocs[0].index_names, tojson(multiStageDocs));
});
-})();
diff --git a/jstests/core/timeseries/timeseries_index_ttl_partial.js b/jstests/core/timeseries/timeseries_index_ttl_partial.js
index e0f64b6b8422f..03e3114e4400f 100644
--- a/jstests/core/timeseries/timeseries_index_ttl_partial.js
+++ b/jstests/core/timeseries/timeseries_index_ttl_partial.js
@@ -9,15 +9,12 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db.getMongo())) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled.");
- return;
+ quit();
}
const collName = "timeseries_index_ttl_partial";
@@ -148,5 +145,4 @@ const resetTsColl = function(extraOptions = {}) {
assert.commandFailedWithCode(coll.createIndex(timeAndDataSpec, filterOnMeta),
ErrorCodes.CannotCreateIndex);
}
-}());
-})();
+}());
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_index_use.js b/jstests/core/timeseries/timeseries_index_use.js
index ebcea9d8e6b41..82912c018b27c 100644
--- a/jstests/core/timeseries/timeseries_index_use.js
+++ b/jstests/core/timeseries/timeseries_index_use.js
@@ -13,11 +13,13 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/analyze_plan.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {
+ getAggPlanStage,
+ getPlanStages,
+ getRejectedPlan,
+ getRejectedPlans
+} from "jstests/libs/analyze_plan.js";
const generateTest = (useHint) => {
return (insert) => {
@@ -403,8 +405,7 @@ const generateTest = (useHint) => {
{},
collation);
- /*********************************** Tests $expr predicates
- * *********************************/
+ /*********************************** Tests $expr predicates *******************************/
resetCollections();
assert.commandWorked(insert(coll, [
{_id: 0, [timeFieldName]: ISODate('1990-01-01 00:00:00.000Z'), [metaFieldName]: 2},
@@ -433,5 +434,4 @@ const generateTest = (useHint) => {
// Run the test twice, once without hinting the index, and again hinting the index by spec.
TimeseriesTest.run(generateTest(false));
-TimeseriesTest.run(generateTest(true));
-})();
+TimeseriesTest.run(generateTest(true));
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_insert.js b/jstests/core/timeseries/timeseries_insert.js
index bbb876a3adeaa..965a38151ba71 100644
--- a/jstests/core/timeseries/timeseries_insert.js
+++ b/jstests/core/timeseries/timeseries_insert.js
@@ -5,10 +5,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
const coll = db.timeseries_insert;
coll.drop();
@@ -31,5 +28,4 @@ for (let i = 0; i < 100; i++) {
fields: host.fields,
tags: host.tags,
}));
-}
-})();
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_insert_after_delete.js b/jstests/core/timeseries/timeseries_insert_after_delete.js
index cffe0c7c91d16..5c8ec7482f882 100644
--- a/jstests/core/timeseries/timeseries_insert_after_delete.js
+++ b/jstests/core/timeseries/timeseries_insert_after_delete.js
@@ -8,18 +8,8 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
-load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'.
-
-if (FixtureHelpers.isMongos(db) &&
- !TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(db.getMongo())) {
- jsTestLog(
- "Skipping test because the sharded time-series updates and deletes feature flag is disabled");
- return;
-}
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'.
TimeseriesTest.run((insert) => {
const testDB = db.getSiblingDB(jsTestName());
@@ -43,4 +33,3 @@ TimeseriesTest.run((insert) => {
assert.docEq([objB], docs);
assert(coll.drop());
});
-})();
diff --git a/jstests/core/timeseries/timeseries_insert_after_update.js b/jstests/core/timeseries/timeseries_insert_after_update.js
index 196ccae2149a0..eef90c2c63ea6 100644
--- a/jstests/core/timeseries/timeseries_insert_after_update.js
+++ b/jstests/core/timeseries/timeseries_insert_after_update.js
@@ -14,25 +14,9 @@
* assumes_read_preference_unchanged,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
load("jstests/libs/fixture_helpers.js");
-if (FixtureHelpers.isMongos(db) &&
- !TimeseriesTest.shardedtimeseriesCollectionsEnabled(db.getMongo())) {
- jsTestLog("Skipping test because the time-series updates and deletes feature flag is disabled");
- return;
-}
-
-if (FixtureHelpers.isMongos(db) &&
- !TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(db.getMongo())) {
- jsTestLog(
- "Skipping test because the sharded time-series updates and deletes feature flag is disabled");
- return;
-}
-
TimeseriesTest.run((insert) => {
const testDB = db.getSiblingDB(jsTestName());
assert.commandWorked(testDB.dropDatabase());
@@ -76,4 +60,3 @@ TimeseriesTest.run((insert) => {
assert.eq(bucketsColl.find().itcount(), 3, bucketsColl.find().toArray());
}
});
-})();
diff --git a/jstests/core/timeseries/timeseries_internal_bounded_sort.js b/jstests/core/timeseries/timeseries_internal_bounded_sort.js
index 873c16b7421cb..d3cb5937914b2 100644
--- a/jstests/core/timeseries/timeseries_internal_bounded_sort.js
+++ b/jstests/core/timeseries/timeseries_internal_bounded_sort.js
@@ -10,16 +10,8 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js');
-load("jstests/core/timeseries/libs/timeseries.js");
-
-if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) {
- jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled.");
- return;
-}
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
const coll = db.timeseries_internal_bounded_sort;
const buckets = db['system.buckets.' + coll.getName()];
@@ -177,4 +169,3 @@ function runTest(ascending) {
runTest(true); // ascending
runTest(false); // descending
-})();
diff --git a/jstests/core/timeseries/timeseries_internal_bounded_sort_compound.js b/jstests/core/timeseries/timeseries_internal_bounded_sort_compound.js
index 92d77cf8fd609..1c01d4f353976 100644
--- a/jstests/core/timeseries/timeseries_internal_bounded_sort_compound.js
+++ b/jstests/core/timeseries/timeseries_internal_bounded_sort_compound.js
@@ -11,16 +11,8 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js');
-load("jstests/core/timeseries/libs/timeseries.js");
-
-if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) {
- jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled.");
- return;
-}
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
const coll = db.timeseries_internal_bounded_sort_compound;
const buckets = db['system.buckets.' + coll.getName()];
@@ -226,4 +218,3 @@ runTest({m: +1, t: +1});
runTest({m: +1, t: -1});
runTest({m: -1, t: +1});
runTest({m: -1, t: -1});
-})();
diff --git a/jstests/core/timeseries/timeseries_internal_bounded_sort_compound_mixed_types.js b/jstests/core/timeseries/timeseries_internal_bounded_sort_compound_mixed_types.js
index f8e435c8563fb..0134232804226 100644
--- a/jstests/core/timeseries/timeseries_internal_bounded_sort_compound_mixed_types.js
+++ b/jstests/core/timeseries/timeseries_internal_bounded_sort_compound_mixed_types.js
@@ -11,16 +11,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js');
-load("jstests/core/timeseries/libs/timeseries.js");
-
-if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) {
- jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled.");
- return;
-}
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
const coll = db.timeseries_internal_bounded_sort_compound_mixed_types;
const buckets = db['system.buckets.' + coll.getName()];
@@ -163,4 +154,3 @@ runTest({m: +1, t: +1});
runTest({m: +1, t: -1});
runTest({m: -1, t: +1});
runTest({m: -1, t: -1});
-})();
diff --git a/jstests/core/timeseries/timeseries_internal_bounded_sort_overflow.js b/jstests/core/timeseries/timeseries_internal_bounded_sort_overflow.js
index 6a588b5ada2e3..9731b1311c712 100644
--- a/jstests/core/timeseries/timeseries_internal_bounded_sort_overflow.js
+++ b/jstests/core/timeseries/timeseries_internal_bounded_sort_overflow.js
@@ -11,16 +11,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js');
-load("jstests/core/timeseries/libs/timeseries.js");
-
-if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) {
- jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled.");
- return;
-}
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
const coll = db.timeseries_internal_bounded_sort_overflow;
const buckets = db['system.buckets.' + coll.getName()];
@@ -49,4 +40,3 @@ const result = buckets
// Make sure the result is in order.
assert.eq(result[0].t, docs[0].t);
assert.eq(result[1].t, docs[1].t);
-})();
diff --git a/jstests/core/timeseries/timeseries_internal_bucket_geo_within.js b/jstests/core/timeseries/timeseries_internal_bucket_geo_within.js
index f4467bde490ce..09fd754a835ee 100644
--- a/jstests/core/timeseries/timeseries_internal_bucket_geo_within.js
+++ b/jstests/core/timeseries/timeseries_internal_bucket_geo_within.js
@@ -4,8 +4,6 @@
* collection.
*
* @tags: [
- * # TODO (SERVER-73321): remove
- * assumes_against_mongod_not_mongos,
* # Explain of a resolved view must be executed by mongos.
* directly_against_shardsvrs_incompatible,
* # Time series geo functionality requires optimization.
@@ -20,10 +18,7 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
const coll = db.timeseries_internal_bucket_geo_within;
coll.drop();
@@ -53,10 +48,6 @@ for (let collScanStage of collScanStages) {
"field": "loc"
}
};
- // TODO SERVER-60373 Fix duplicate predicates for sharded time-series collection
- if (FixtureHelpers.isSharded(bucketsColl)) {
- expectedPredicate = {$and: [expectedPredicate, expectedPredicate]};
- }
assert.docEq(expectedPredicate, collScanStage.filter, collScanStages);
}
@@ -319,5 +310,4 @@ assert.sameMembers(results, [
pipeline = [{$match: {loc: {$geoWithin: {$centerSphere: [[0, 80], 1], $center: [[0, 0], 5]}}}}];
err = assert.throws(() => coll.explain().aggregate(pipeline));
assert.eq(err.code, ErrorCodes.BadValue, err);
-}
-}());
+}
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_lastpoint.js b/jstests/core/timeseries/timeseries_lastpoint.js
index f3ea1f5c09372..9433a2a0b780f 100644
--- a/jstests/core/timeseries/timeseries_lastpoint.js
+++ b/jstests/core/timeseries/timeseries_lastpoint.js
@@ -13,13 +13,17 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js");
-load("jstests/core/timeseries/libs/timeseries_agg_helpers.js");
-load("jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js");
-load("jstests/libs/analyze_plan.js");
+import {TimeseriesAggTests} from "jstests/core/timeseries/libs/timeseries_agg_helpers.js";
+import {
+ createBoringCollections,
+ getMapInterestingValuesToEquivalentsStage,
+ createInterestingCollections,
+ expectDistinctScan,
+ expectCollScan,
+ expectIxscan,
+ testAllTimeMetaDirections,
+} from "jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js";
const testDB = TimeseriesAggTests.getTestDb();
assert.commandWorked(testDB.dropDatabase());
@@ -141,4 +145,3 @@ function getGroupStage(time, extraFields = []) {
];
});
}
-})();
diff --git a/jstests/core/timeseries/timeseries_lastpoint_top.js b/jstests/core/timeseries/timeseries_lastpoint_top.js
index f6a93b7091980..028d3bd90f494 100644
--- a/jstests/core/timeseries/timeseries_lastpoint_top.js
+++ b/jstests/core/timeseries/timeseries_lastpoint_top.js
@@ -15,14 +15,17 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js");
-load("jstests/core/timeseries/libs/timeseries_agg_helpers.js");
-load("jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js");
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/feature_flag_util.js");
+import {TimeseriesAggTests} from "jstests/core/timeseries/libs/timeseries_agg_helpers.js";
+import {
+ createBoringCollections,
+ getMapInterestingValuesToEquivalentsStage,
+ createInterestingCollections,
+ expectDistinctScan,
+ expectCollScan,
+ expectIxscan,
+ testAllTimeMetaDirections,
+} from "jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js";
const testDB = TimeseriesAggTests.getTestDb();
assert.commandWorked(testDB.dropDatabase());
@@ -30,7 +33,7 @@ assert.commandWorked(testDB.dropDatabase());
// TODO SERVER-73509 The test doesn't work yet, even though this feature flag is gone.
if (true /* previously guarded by featureFlagLastPointQuery */) {
jsTestLog("Skipping the test.");
- return;
+ quit();
}
/**
@@ -165,4 +168,3 @@ function getGroupStage({time, sortBy, n, extraFields = []}) {
]);
});
}
-})();
diff --git a/jstests/core/timeseries/timeseries_list_collections.js b/jstests/core/timeseries/timeseries_list_collections.js
index 318f50b8f3a00..47e582016716f 100644
--- a/jstests/core/timeseries/timeseries_list_collections.js
+++ b/jstests/core/timeseries/timeseries_list_collections.js
@@ -6,10 +6,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
const testDB = db.getSiblingDB(jsTestName());
@@ -144,5 +141,4 @@ if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(testDB)) {
collation: {locale: 'ja'},
expireAfterSeconds: NumberLong(100),
});
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_list_collections_filter_name.js b/jstests/core/timeseries/timeseries_list_collections_filter_name.js
index f95469caf4889..e8113b186df54 100644
--- a/jstests/core/timeseries/timeseries_list_collections_filter_name.js
+++ b/jstests/core/timeseries/timeseries_list_collections_filter_name.js
@@ -7,11 +7,6 @@
* requires_timeseries,
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/core/timeseries/libs/timeseries.js");
-
const timeFieldName = 'time';
const coll = db.timeseries_list_collections_filter_name;
@@ -36,5 +31,4 @@ const collectionOptions = [{
info: {readOnly: false},
}];
-assert.eq(collections, collectionOptions);
-})();
+assert.eq(collections, collectionOptions);
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_lookup.js b/jstests/core/timeseries/timeseries_lookup.js
index 62b4ffeeeb0a1..8d2a85558de83 100644
--- a/jstests/core/timeseries/timeseries_lookup.js
+++ b/jstests/core/timeseries/timeseries_lookup.js
@@ -7,12 +7,10 @@
* does_not_support_stepdowns,
* # We need a timeseries collection.
* requires_timeseries,
+ * references_foreign_collection,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const testDB = db.getSiblingDB(jsTestName());
@@ -245,5 +243,4 @@ TimeseriesTest.run((insert) => {
testFunc(collAOption, collBOption);
});
});
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_match_pushdown.js b/jstests/core/timeseries/timeseries_match_pushdown.js
index 1b45895b46ea5..f044fecb41a82 100644
--- a/jstests/core/timeseries/timeseries_match_pushdown.js
+++ b/jstests/core/timeseries/timeseries_match_pushdown.js
@@ -9,10 +9,7 @@
* directly_against_shardsvrs_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
const coll = db.timeseries_match_pushdown;
coll.drop();
@@ -409,5 +406,4 @@ runTest({
]
},
expectedDocs: [],
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_match_pushdown_with_project.js b/jstests/core/timeseries/timeseries_match_pushdown_with_project.js
index bec8998f9d4d3..2f90599583b5f 100644
--- a/jstests/core/timeseries/timeseries_match_pushdown_with_project.js
+++ b/jstests/core/timeseries/timeseries_match_pushdown_with_project.js
@@ -8,10 +8,7 @@
* directly_against_shardsvrs_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
const coll = db.timeseries_match_pushdown_with_project;
coll.drop();
@@ -126,5 +123,4 @@ runTest({
{[timeField]: aTime, a: 8, _id: 8},
{[timeField]: aTime, a: 9, _id: 9},
],
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_merge.js b/jstests/core/timeseries/timeseries_merge.js
index 041a88a24762f..75040c6c1e521 100644
--- a/jstests/core/timeseries/timeseries_merge.js
+++ b/jstests/core/timeseries/timeseries_merge.js
@@ -7,12 +7,10 @@
* does_not_support_stepdowns,
* # We need a timeseries collection.
* requires_timeseries,
+ * references_foreign_collection,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries_agg_helpers.js");
+import {TimeseriesAggTests} from "jstests/core/timeseries/libs/timeseries_agg_helpers.js";
const testDB = TimeseriesAggTests.getTestDb();
assert.commandWorked(testDB.dropDatabase());
@@ -107,5 +105,4 @@ let runMergeOnTestCase = () => {
runSimpleMergeTestCase();
runMergeOnErrorTestCase();
-runMergeOnTestCase();
-})();
+runMergeOnTestCase();
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_metadata.js b/jstests/core/timeseries/timeseries_metadata.js
index 53a5dac863eb8..b82afdb38c648 100644
--- a/jstests/core/timeseries/timeseries_metadata.js
+++ b/jstests/core/timeseries/timeseries_metadata.js
@@ -9,10 +9,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const collNamePrefix = 'timeseries_metadata_';
@@ -172,5 +169,4 @@ TimeseriesTest.run((insert) => {
{_id: 2, time: t[2], meta: {a: [2, 1, 3]}, x: 20},
{_id: 3, time: t[3], meta: {a: [2, 1, 3]}, x: 30},
]);
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_metric_index_2dsphere.js b/jstests/core/timeseries/timeseries_metric_index_2dsphere.js
index 2fa470bbf59ed..5b7d8dd5e3bc3 100644
--- a/jstests/core/timeseries/timeseries_metric_index_2dsphere.js
+++ b/jstests/core/timeseries/timeseries_metric_index_2dsphere.js
@@ -16,15 +16,12 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/feature_flag_util.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) {
- return;
+ quit();
}
TimeseriesTest.run((insert) => {
@@ -179,4 +176,3 @@ TimeseriesTest.run((insert) => {
assert.commandWorked(timeseriescoll.dropIndex(twoDSphereTimeseriesIndexSpec));
});
-})();
diff --git a/jstests/core/timeseries/timeseries_metric_index_ascending_descending.js b/jstests/core/timeseries/timeseries_metric_index_ascending_descending.js
index c997bb587890c..e7f17a200f877 100644
--- a/jstests/core/timeseries/timeseries_metric_index_ascending_descending.js
+++ b/jstests/core/timeseries/timeseries_metric_index_ascending_descending.js
@@ -8,17 +8,14 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/feature_flag_util.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load("jstests/libs/fixture_helpers.js");
if (!FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesMetricIndexes")) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled.");
- return;
+ quit();
}
TimeseriesTest.run((insert) => {
@@ -168,4 +165,3 @@ TimeseriesTest.run((insert) => {
bucketIndexes = bucketsColl.getIndexes();
assert.eq(13 + numExtraIndexes, bucketIndexes.length, tojson(bucketIndexes));
});
-}());
diff --git a/jstests/core/timeseries/timeseries_metric_index_compound.js b/jstests/core/timeseries/timeseries_metric_index_compound.js
index 41e245bccab67..e5ae44d75f513 100644
--- a/jstests/core/timeseries/timeseries_metric_index_compound.js
+++ b/jstests/core/timeseries/timeseries_metric_index_compound.js
@@ -8,16 +8,13 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/feature_flag_util.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled.");
- return;
+ quit();
}
TimeseriesTest.run((insert) => {
@@ -203,4 +200,3 @@ TimeseriesTest.run((insert) => {
testBadIndexForData({[metaFieldName + ".loc2"]: "2d", a: 1});
testBadIndexForData({[metaFieldName + ".r"]: "hashed", a: 1});
});
-}());
diff --git a/jstests/core/timeseries/timeseries_metric_index_hashed.js b/jstests/core/timeseries/timeseries_metric_index_hashed.js
index fd9310fe65280..9de697e149e53 100644
--- a/jstests/core/timeseries/timeseries_metric_index_hashed.js
+++ b/jstests/core/timeseries/timeseries_metric_index_hashed.js
@@ -6,15 +6,12 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
if (!TimeseriesTest.timeseriesMetricIndexesEnabled(db.getMongo())) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled.");
- return;
+ quit();
}
TimeseriesTest.run((insert) => {
@@ -46,5 +43,4 @@ TimeseriesTest.run((insert) => {
testIndex({x: 1, y: "hashed"});
testIndex({[`${metaFieldName}.tag`]: 1, x: "hashed"});
testIndex({x: 1, [`${metaFieldName}.tag`]: -1, y: "hashed"});
-});
-}());
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_metric_index_wildcard.js b/jstests/core/timeseries/timeseries_metric_index_wildcard.js
index eba9648f88033..2520a0058adcb 100644
--- a/jstests/core/timeseries/timeseries_metric_index_wildcard.js
+++ b/jstests/core/timeseries/timeseries_metric_index_wildcard.js
@@ -6,15 +6,12 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
if (!TimeseriesTest.timeseriesMetricIndexesEnabled(db.getMongo())) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled.");
- return;
+ quit();
}
TimeseriesTest.run((insert) => {
@@ -48,5 +45,4 @@ TimeseriesTest.run((insert) => {
testIndex({"$**": -1, x: 1});
testIndex({[`${metaFieldName}.tag`]: 1, "x.$**": 1});
testIndex({"$**": 1, [`${metaFieldName}.tag`]: -1});
-});
-}());
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_min_max.js b/jstests/core/timeseries/timeseries_min_max.js
index 4abf641df33b0..c2e5428f63b9a 100644
--- a/jstests/core/timeseries/timeseries_min_max.js
+++ b/jstests/core/timeseries/timeseries_min_max.js
@@ -10,10 +10,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const collNamePrefix = 'timeseries_min_max_';
@@ -126,5 +123,4 @@ TimeseriesTest.run((insert) => {
runTest({a: NumberInt(1)}, {a: NumberInt(1)}, {a: NumberLong(2)});
runTest({a: NumberDecimal(2.5)}, {a: NumberInt(1)}, {a: NumberDecimal(2.5)});
runTest({a: Number(0.5)}, {a: Number(0.5)}, {a: NumberDecimal(2.5)});
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_out.js b/jstests/core/timeseries/timeseries_out.js
deleted file mode 100644
index 46beccd99ede0..0000000000000
--- a/jstests/core/timeseries/timeseries_out.js
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Verifies that time-series collections work as expected with $out.
- *
- *
- * @tags: [
- * # TimeseriesAggTests doesn't handle stepdowns.
- * does_not_support_stepdowns,
- * # We need a timeseries collection.
- * requires_timeseries,
- * ]
- */
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries_agg_helpers.js");
-
-const testDB = TimeseriesAggTests.getTestDb();
-assert.commandWorked(testDB.dropDatabase());
-const numHosts = 10;
-const numIterations = 20;
-
-let [inColl, observerInColl] = TimeseriesAggTests.prepareInputCollections(numHosts, numIterations);
-
-// Gets the expected results from non time-series observer input collection.
-let expectedResults =
- TimeseriesAggTests.getOutputAggregateResults(observerInColl, [{$out: "observer_out"}]);
-
-// Gets the actual results from time-series input collection.
-let actualResults = TimeseriesAggTests.getOutputAggregateResults(inColl, [{$out: "out"}]);
-
-// Verifies that the number of measurements is same as expected.
-assert.eq(actualResults.length, expectedResults.length, actualResults);
-
-// Verifies that every measurement is same as expected.
-for (var i = 0; i < expectedResults.length; ++i) {
- assert.eq(actualResults[i], expectedResults[i], actualResults);
-}
-})();
diff --git a/jstests/core/timeseries/timeseries_out_non_sharded.js b/jstests/core/timeseries/timeseries_out_non_sharded.js
new file mode 100644
index 0000000000000..5654c5e5791ea
--- /dev/null
+++ b/jstests/core/timeseries/timeseries_out_non_sharded.js
@@ -0,0 +1,167 @@
+/**
+ * Verifies that $out writes to a time-series collection from an unsharded collection.
+ * There is a test for sharded source collections in jstests/sharding/timeseries_out_sharded.js.
+ *
+ * @tags: [
+ * references_foreign_collection,
+ * # TimeseriesAggTests doesn't handle stepdowns.
+ * does_not_support_stepdowns,
+ * # We need a timeseries collection.
+ * requires_timeseries,
+ * requires_fcv_71,
+ * featureFlagAggOutTimeseries,
+ * ]
+ */
+import {TimeseriesAggTests} from "jstests/core/timeseries/libs/timeseries_agg_helpers.js";
+
+const numHosts = 10;
+const numIterations = 20;
+
+const testDB = TimeseriesAggTests.getTestDb();
+const dbName = testDB.getName();
+assert.commandWorked(testDB.dropDatabase());
+const targetCollName = "out_time";
+
+let [inColl, observerInColl] =
+ TimeseriesAggTests.prepareInputCollections(numHosts, numIterations, true);
+
+function runTest({
+ observer: observerPipeline,
+ timeseries: timeseriesPipeline,
+ drop: shouldDrop = true,
+ value: valueToCheck = null
+}) {
+ let expectedTSOptions = null;
+ if (!shouldDrop) {
+ // To test if an index is preserved by $out when replacing an existing collection.
+ assert.commandWorked(testDB[targetCollName].createIndex({usage_guest: 1}));
+ // To test if $out preserves the original collection options.
+ let collections = testDB.getCollectionInfos({name: targetCollName});
+ assert.eq(collections.length, 1, collections);
+ expectedTSOptions = collections[0]["options"]["timeseries"];
+ } else {
+ expectedTSOptions = timeseriesPipeline[0]["$out"]["timeseries"];
+ }
+
+ // Gets the expected results from a non time-series observer input collection.
+ const expectedResults = TimeseriesAggTests.getOutputAggregateResults(
+ observerInColl, observerPipeline, null, shouldDrop);
+
+ // Gets the actual results from a time-series input collection.
+ const actualResults =
+ TimeseriesAggTests.getOutputAggregateResults(inColl, timeseriesPipeline, null, shouldDrop);
+
+ // Verifies that the number of measurements is same as expected.
+ TimeseriesAggTests.verifyResults(actualResults, expectedResults);
+ if (valueToCheck) {
+ for (var i = 0; i < expectedResults.length; ++i) {
+ assert.eq(actualResults[i], {"time": valueToCheck}, actualResults);
+ }
+ }
+
+ let collections = testDB.getCollectionInfos({name: targetCollName});
+ assert.eq(collections.length, 1, collections);
+
+ // Verifies a time-series collection was not made, if that is expected.
+ if (!expectedTSOptions) {
+ assert(!collections[0]["options"]["timeseries"], collections);
+ return;
+ }
+
+ // Verifies the time-series options are correct, if a time-series collection is expected.
+ let actualOptions = collections[0]["options"]["timeseries"];
+ for (let option in expectedTSOptions) {
+ // Must loop through each option, since 'actualOptions' will contain default fields and
+ // values that do not exist in 'expectedTSOptions'.
+ assert.eq(expectedTSOptions[option], actualOptions[option], actualOptions);
+ }
+
+ // Verifies the original index is maintained, if $out is replacing an existing collection.
+ if (!shouldDrop) {
+ let indexSpecs = testDB[targetCollName].getIndexes();
+ assert.eq(indexSpecs.filter(index => index.name == "usage_guest_1").length, 1);
+ }
+}
+
+// Tests that $out works with a source time-series collections writing to a non-timeseries
+// collection.
+runTest({observer: [{$out: "observer_out"}], timeseries: [{$out: targetCollName}]});
+
+// Tests that $out creates a time-series collection when the collection does not exist.
+let timeseriesPipeline = TimeseriesAggTests.generateOutPipeline(
+ targetCollName, dbName, {timeField: "time", metaField: "tags"});
+runTest({observer: [{$out: "observer_out"}], timeseries: timeseriesPipeline});
+
+// Test that $out can replace an existing time-series collection without the 'timeseries' option.
+// Change an option in the existing time-series collections.
+assert.commandWorked(testDB.runCommand({collMod: targetCollName, expireAfterSeconds: 360}));
+// Run the $out stage.
+timeseriesPipeline = [{$out: targetCollName}];
+runTest({observer: [{$out: "observer_out"}], timeseries: timeseriesPipeline, drop: false});
+
+// Test that $out can replace an existing time-series collection with the 'timeseries' option.
+let newDate = new Date('1999-09-30T03:24:00');
+let observerPipeline = [{$set: {"time": newDate}}, {$out: "observer_out"}];
+timeseriesPipeline = TimeseriesAggTests.generateOutPipeline(
+ targetCollName, dbName, {timeField: "time", metaField: "tags"}, {$set: {"time": newDate}});
+// Run the $out stage and confirm all the documents have the new value.
+runTest({observer: observerPipeline, timeseries: timeseriesPipeline, drop: false, value: newDate});
+
+// Test $out to time-series succeeds with a non-existent database.
+const destDB = testDB.getSiblingDB("outDifferentDB");
+assert.commandWorked(destDB.dropDatabase());
+timeseriesPipeline =
+ TimeseriesAggTests.generateOutPipeline(targetCollName, destDB.getName(), {timeField: "time"});
+// TODO SERVER-75856 remove this conditional.
+if (FixtureHelpers.isMongos(testDB)) { // this is not supported in mongos.
+ assert.throwsWithCode(() => inColl.aggregate(timeseriesPipeline), ErrorCodes.NamespaceNotFound);
+} else {
+ inColl.aggregate(timeseriesPipeline);
+ assert.eq(300, destDB[targetCollName].find().itcount());
+}
+
+// Tests that an error is raised when trying to create a time-series collection from a non
+// time-series collection.
+let pipeline = TimeseriesAggTests.generateOutPipeline("observer_out", dbName, {timeField: "time"});
+assert.throwsWithCode(() => inColl.aggregate(pipeline), 7268700);
+assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7268700);
+
+// Tests that an error is raised for invalid timeseries options.
+pipeline = TimeseriesAggTests.generateOutPipeline(
+ targetCollName, dbName, {timeField: "time", invalidField: "invalid"});
+assert.throwsWithCode(() => inColl.aggregate(pipeline), 40415);
+assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 40415);
+
+// Tests that an error is raised if the user changes the 'timeField'.
+pipeline =
+ TimeseriesAggTests.generateOutPipeline(targetCollName, dbName, {timeField: "usage_guest_nice"});
+assert.throwsWithCode(() => inColl.aggregate(pipeline), 7406103);
+assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7406103);
+
+// Tests that an error is raised if the user changes the 'metaField'.
+pipeline = TimeseriesAggTests.generateOutPipeline(
+ targetCollName, dbName, {timeField: "time", metaField: "usage_guest_nice"});
+assert.throwsWithCode(() => inColl.aggregate(pipeline), 7406103);
+assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7406103);
+
+// Tests that an error is raised if the user changes 'bucketManSpanSeconds'.
+pipeline = TimeseriesAggTests.generateOutPipeline(
+ targetCollName,
+ dbName,
+ {timeField: "time", bucketMaxSpanSeconds: 330, bucketRoundingSeconds: 330});
+assert.throwsWithCode(() => inColl.aggregate(pipeline), 7406103);
+assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7406103);
+
+// Tests that an error is raised if the user changes 'granularity'.
+pipeline = TimeseriesAggTests.generateOutPipeline(
+ targetCollName, dbName, {timeField: "time", granularity: "minutes"});
+assert.throwsWithCode(() => inColl.aggregate(pipeline), 7406103);
+assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7406103);
+
+// Tests that an error is raised if a conflicting view exists.
+if (!FixtureHelpers.isMongos(testDB)) { // can not shard a view.
+ assert.commandWorked(testDB.createCollection("view_out", {viewOn: "out"}));
+ pipeline = TimeseriesAggTests.generateOutPipeline("view_out", dbName, {timeField: "time"});
+ assert.throwsWithCode(() => inColl.aggregate(pipeline), 7268703);
+ assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7268703);
+}
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_out_of_order.js b/jstests/core/timeseries/timeseries_out_of_order.js
index ff84239377373..6cea3c5c87e8d 100644
--- a/jstests/core/timeseries/timeseries_out_of_order.js
+++ b/jstests/core/timeseries/timeseries_out_of_order.js
@@ -10,10 +10,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-'use strict';
-
-load('jstests/core/timeseries/libs/timeseries.js');
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const collNamePrefix = 'timeseries_out_of_order_';
@@ -67,5 +64,4 @@ TimeseriesTest.run((insert) => {
assert.eq(buckets[1].control.min[timeFieldName], times[2]);
assert.eq(buckets[1].control.max[timeFieldName], times[2]);
});
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_partial_index_opt.js b/jstests/core/timeseries/timeseries_partial_index_opt.js
new file mode 100644
index 0000000000000..40c649bb61f44
--- /dev/null
+++ b/jstests/core/timeseries/timeseries_partial_index_opt.js
@@ -0,0 +1,55 @@
+/**
+ * Test partial index optimization on a time-series collection.
+ * If a query expression is covered by the partial index filter, it is removed from the filter in
+ * the fetch stage.
+ *
+ * @tags: [
+ * # Explain of a resolved view must be executed by mongos.
+ * directly_against_shardsvrs_incompatible,
+ * # Refusing to run a test that issues a command with explain because it may return
+ * # incomplete results if interrupted by a stepdown.
+ * does_not_support_stepdowns,
+ * # We need a timeseries collection.
+ * requires_timeseries,
+ * requires_fcv_70,
+ * ]
+ */
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
+
+const coll = db.timeseries_partial_index_opt;
+
+coll.drop();
+assert.commandWorked(
+ db.createCollection(coll.getName(), {timeseries: {timeField: "time", metaField: "tag"}}));
+
+assert.commandWorked(coll.insertMany([
+ {_id: 0, time: new Date("2021-07-29T07:46:38.746Z"), tag: 2, a: 5},
+ {_id: 1, time: new Date("2021-08-29T00:15:38.001Z"), tag: 1, a: 5, b: 8},
+ {_id: 2, time: new Date("2021-11-29T12:20:34.821Z"), tag: 1, a: 7, b: 12},
+ {_id: 3, time: new Date("2021-03-09T07:29:34.201Z"), tag: 2, a: 2, b: 7},
+ {_id: 4, time: new Date("2021-10-09T07:29:34.201Z"), tag: 4, a: 8, b: 10}
+]));
+
+// Check that the plan uses partial index scan with 'indexName' and the filter of the fetch
+// stage does not contain the field in the partial filter expression.
+function checkIndexScanAndFilter(coll, predicate, indexName, filterField) {
+ const explain = coll.find(predicate).explain();
+ const scan = getAggPlanStage(explain, "IXSCAN");
+ assert.eq(scan.indexName, indexName, scan);
+
+ const fetch = getAggPlanStage(explain, "FETCH");
+ if (fetch !== null && fetch.hasOwnProperty("filter")) {
+ const filter = fetch.filter;
+ assert(!filter.hasOwnProperty(filterField),
+ "Unexpected field " + filterField + " in fetch filter: " + tojson(filter));
+ }
+}
+
+const timeDate = ISODate("2021-10-01 00:00:00.000Z");
+assert.commandWorked(
+ coll.createIndex({time: 1}, {name: "time_1_tag", partialFilterExpression: {tag: {$gt: 1}}}));
+checkIndexScanAndFilter(coll, {time: {$gte: timeDate}, tag: {$gt: 1}}, "time_1_tag", "tag");
+
+assert.commandWorked(
+ coll.createIndex({tag: 1}, {name: "tag_1_b", partialFilterExpression: {b: {$gte: 10}}}));
+checkIndexScanAndFilter(coll, {tag: {$gt: 1}, b: {$gte: 10}}, "tag_1_b", "b");
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_predicates.js b/jstests/core/timeseries/timeseries_predicates.js
index d43f20a49e765..f356979bbcbb0 100644
--- a/jstests/core/timeseries/timeseries_predicates.js
+++ b/jstests/core/timeseries/timeseries_predicates.js
@@ -9,11 +9,6 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-
const coll = db.timeseries_predicates_normal;
const tsColl = db.timeseries_predicates_timeseries;
coll.drop();
@@ -380,5 +375,4 @@ checkAllBucketings({"mt.a": {$size: 1}}, [
{mt: {a: [{b: 2}]}},
{mt: {a: [{b: 3}]}},
{mt: {a: [{b: 2}, {b: 3}]}},
-]);
-})();
+]);
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_predicates_with_projections.js b/jstests/core/timeseries/timeseries_predicates_with_projections.js
index 65b4ff155cb0d..c162204d1d94a 100644
--- a/jstests/core/timeseries/timeseries_predicates_with_projections.js
+++ b/jstests/core/timeseries/timeseries_predicates_with_projections.js
@@ -9,11 +9,6 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-
const coll = db.timeseries_predicates_with_projections_normal;
const tsColl = db.timeseries_predicates_with_projections_timeseries;
coll.drop();
@@ -63,5 +58,4 @@ checkPredicateResult({y: 1}, {x: {$lt: 0}}, [
checkPredicateResult({x: 1}, {"mm.x": {$lt: 0}}, [
{mm: {x: -1}},
{mm: {x: 1}},
-]);
-})();
+]);
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_project.js b/jstests/core/timeseries/timeseries_project.js
index 85093904aba76..809d737f59a5b 100644
--- a/jstests/core/timeseries/timeseries_project.js
+++ b/jstests/core/timeseries/timeseries_project.js
@@ -7,11 +7,6 @@
* requires_fcv_62,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-
const coll = db.timeseries_project;
coll.drop();
assert.commandWorked(
@@ -128,5 +123,4 @@ pipeline = [{$project: {a: 1, _id: 0}}, {$project: {newMeta: "$x"}}];
tsDoc = tsColl.aggregate(pipeline).toArray();
regDoc = regColl.aggregate(pipeline).toArray();
assert.docEq(regDoc, tsDoc);
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_reopened_bucket_insert.js b/jstests/core/timeseries/timeseries_reopened_bucket_insert.js
index 5b4c5d2c8799d..ba515eb1a08eb 100644
--- a/jstests/core/timeseries/timeseries_reopened_bucket_insert.js
+++ b/jstests/core/timeseries/timeseries_reopened_bucket_insert.js
@@ -11,16 +11,14 @@
* assumes_read_preference_unchanged,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load("jstests/libs/fixture_helpers.js"); // For isSharded.
if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled.");
- return;
+ quit();
}
const testDB = db.getSiblingDB(jsTestName());
@@ -216,6 +214,45 @@ const expectToReopenArchivedBuckets = function() {
jsTestLog("Exiting expectToReopenArchivedBuckets.");
}();
+// TODO SERVER-77454: Investigate re-enabling this.
+const expectToReopenCompressedBuckets = function() {
+ if (!FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) {
+ return;
+ }
+
+ jsTestLog("Entering expectToReopenCompressedBuckets...");
+ resetCollection();
+
+ let initialMeasurements = [];
+ for (let i = 0; i < 5; ++i) {
+ initialMeasurements.push({
+ [timeField]: ISODate("2022-08-26T19:19:00Z"),
+ [metaField]: "ReopenedBucket1",
+ });
+ }
+ const forward = {
+ [timeField]: ISODate("2022-08-27T19:19:00Z"),
+ [metaField]: "ReopenedBucket1",
+ };
+ const backward = {
+ [timeField]: ISODate("2022-08-26T19:19:00Z"),
+ [metaField]: "ReopenedBucket1",
+ };
+
+ for (let i = 0; i < initialMeasurements.length; ++i) {
+ checkIfBucketReopened(
+ initialMeasurements[i], /* willCreateBucket */ i == 0, /* willReopenBucket */ false);
+ }
+ // Time forwards will open a new bucket, and close and compress the old one.
+ checkIfBucketReopened(forward, /* willCreateBucket */ true, /* willReopenBucket */ false);
+ assert.eq(1, bucketsColl.find({"control.version": 2}).toArray().length);
+
+ // We expect to reopen the compressed bucket with time backwards.
+ checkIfBucketReopened(backward, /* willCreateBucket */ false, /* willReopenBucket */ true);
+
+ jsTestLog("Exiting expectToReopenCompressedBuckets.");
+};
+
const failToReopenNonSuitableBuckets = function() {
jsTestLog("Entering failToReopenNonSuitableBuckets...");
resetCollection();
@@ -737,4 +774,3 @@ const reopenBucketsWhenSuitableIndexExistsNoMeta = function() {
}();
coll.drop();
-})();
diff --git a/jstests/core/timeseries/timeseries_resume_after.js b/jstests/core/timeseries/timeseries_resume_after.js
index e26b728a0e028..be1c5716ecc32 100644
--- a/jstests/core/timeseries/timeseries_resume_after.js
+++ b/jstests/core/timeseries/timeseries_resume_after.js
@@ -13,10 +13,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const timeFieldName = "time";
@@ -94,5 +91,24 @@ TimeseriesTest.run((insert) => {
resumeToken = res.cursor.postBatchResumeToken;
jsTestLog("Got resume token " + tojson(resumeToken));
-});
-})();
+
+ // Test that '$_resumeAfter' fails if the recordId is Long.
+ assert.commandFailedWithCode(db.runCommand({
+ find: bucketsColl.getName(),
+ filter: {},
+ $_requestResumeToken: true,
+ $_resumeAfter: {'$recordId': NumberLong(10)},
+ hint: {$natural: 1}
+ }),
+ 7738600);
+
+ // Test that '$_resumeAfter' fails if querying the time-series view.
+ assert.commandFailedWithCode(db.runCommand({
+ find: coll.getName(),
+ filter: {},
+ $_requestResumeToken: true,
+ $_resumeAfter: {'$recordId': BinData(5, '1234')},
+ hint: {$natural: 1}
+ }),
+ ErrorCodes.InvalidPipelineOperator);
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_show_record_id.js b/jstests/core/timeseries/timeseries_show_record_id.js
index 681933439184f..1c569d2d37ed5 100644
--- a/jstests/core/timeseries/timeseries_show_record_id.js
+++ b/jstests/core/timeseries/timeseries_show_record_id.js
@@ -6,10 +6,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const timeFieldName = "time";
@@ -58,5 +55,4 @@ TimeseriesTest.run((insert) => {
const bucketsColl = db.getCollection("system.buckets." + coll.getName());
checkRecordId(bucketsColl.find().showRecordId().toArray());
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_simple.js b/jstests/core/timeseries/timeseries_simple.js
index c86b2c2575087..43327d610314f 100644
--- a/jstests/core/timeseries/timeseries_simple.js
+++ b/jstests/core/timeseries/timeseries_simple.js
@@ -9,10 +9,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const coll = db.timeseries_simple;
@@ -116,5 +113,4 @@ TimeseriesTest.run((insert) => {
bucketDoc.data[key],
'invalid bucket data for field ' + key + ': ' + tojson(bucketDoc));
});
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_sparse.js b/jstests/core/timeseries/timeseries_sparse.js
index 1362fc6eac48c..adf243ccc8d23 100644
--- a/jstests/core/timeseries/timeseries_sparse.js
+++ b/jstests/core/timeseries/timeseries_sparse.js
@@ -9,10 +9,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const collNamePrefix = 'timeseries_sparse_';
@@ -90,5 +87,4 @@ TimeseriesTest.run((insert) => {
{_id: 2, time: t[2], b: 22, c: 20},
{_id: 3, time: t[3], c: 33, d: 30},
]);
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_sparse_index.js b/jstests/core/timeseries/timeseries_sparse_index.js
index 16b2fef370422..c827f6c62199d 100644
--- a/jstests/core/timeseries/timeseries_sparse_index.js
+++ b/jstests/core/timeseries/timeseries_sparse_index.js
@@ -11,16 +11,13 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/feature_flag_util.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled.");
- return;
+ quit();
}
TimeseriesTest.run((insert) => {
@@ -137,4 +134,3 @@ TimeseriesTest.run((insert) => {
{"meta.abc": 1, "control.max.tm": -1, "control.min.tm": -1},
3);
});
-}());
diff --git a/jstests/core/timeseries/timeseries_special_indexes_metadata.js b/jstests/core/timeseries/timeseries_special_indexes_metadata.js
index 5b6b150e45927..678f6f40d678f 100644
--- a/jstests/core/timeseries/timeseries_special_indexes_metadata.js
+++ b/jstests/core/timeseries/timeseries_special_indexes_metadata.js
@@ -14,11 +14,8 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/analyze_plan.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js";
load("jstests/libs/fixture_helpers.js");
TimeseriesTest.run((insert) => {
@@ -378,4 +375,3 @@ TimeseriesTest.run((insert) => {
assert(planWildcardStage.multiKeyPaths.hasOwnProperty("meta.d.zip"),
"Index has wrong multikey paths after insert; plan: " + tojson(planWildcardStage));
});
-})();
diff --git a/jstests/core/timeseries/timeseries_streaming_group.js b/jstests/core/timeseries/timeseries_streaming_group.js
index 89dfbe95a7b49..60d52c187848e 100644
--- a/jstests/core/timeseries/timeseries_streaming_group.js
+++ b/jstests/core/timeseries/timeseries_streaming_group.js
@@ -10,10 +10,7 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
load("jstests/libs/fail_point_util.js"); // For configureFailPoint
const ts = db.timeseries_streaming_group;
@@ -64,6 +61,33 @@ for (let i = 0; i < numTimes; i++) {
assert.commandWorked(ts.insert(documents));
assert.commandWorked(coll.insert(documents));
+// Incorrect use of $_internalStreamingGroup should return error
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: "timeseires_streaming_group_regular_collection",
+ pipeline: [{
+ $_internalStreamingGroup: {
+ _id: {symbol: "$symbol", time: "$time"},
+ count: {$sum: 1},
+ $monotonicIdFields: ["price"]
+ }
+ }],
+ cursor: {},
+}),
+ 7026705);
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: "timeseires_streaming_group_regular_collection",
+ pipeline: [{$_internalStreamingGroup: {_id: null, count: {$sum: 1}}}],
+ cursor: {},
+}),
+ 7026702);
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: "timeseires_streaming_group_regular_collection",
+ pipeline:
+ [{$_internalStreamingGroup: {_id: null, count: {$sum: 1}, $monotonicIdFields: ["_id"]}}],
+ cursor: {},
+}),
+ 7026708);
+
const runTest = function(pipeline, expectedMonotonicIdFields) {
const explain = assert.commandWorked(ts.explain().aggregate(pipeline));
const streamingGroupStage = getAggPlanStage(explain, "$_internalStreamingGroup");
@@ -128,4 +152,3 @@ runTest(
{$sort: {_id: 1}}
],
["_id"]);
-})();
diff --git a/jstests/core/timeseries/timeseries_union_with.js b/jstests/core/timeseries/timeseries_union_with.js
index f32fdd8289258..ab2437b1ef33d 100644
--- a/jstests/core/timeseries/timeseries_union_with.js
+++ b/jstests/core/timeseries/timeseries_union_with.js
@@ -7,12 +7,10 @@
* does_not_support_stepdowns,
* # We need a timeseries collection.
* requires_timeseries,
+ * references_foreign_collection,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
const testDB = db.getSiblingDB(jsTestName());
@@ -102,5 +100,4 @@ TimeseriesTest.run((insert) => {
testFunc(collAOption, collBOption);
});
});
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_update.js b/jstests/core/timeseries/timeseries_update.js
index 9eca3ee44fd2d..a257eaa969fdc 100644
--- a/jstests/core/timeseries/timeseries_update.js
+++ b/jstests/core/timeseries/timeseries_update.js
@@ -10,24 +10,10 @@
* requires_timeseries,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
load("jstests/libs/fixture_helpers.js");
-if (FixtureHelpers.isMongos(db) &&
- !TimeseriesTest.shardedtimeseriesCollectionsEnabled(db.getMongo())) {
- jsTestLog("Skipping test because the sharded time-series feature flag is disabled");
- return;
-}
-
-if (FixtureHelpers.isMongos(db) &&
- !TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(db.getMongo())) {
- jsTestLog(
- "Skipping test because the sharded time-series updates and deletes feature flag is disabled");
- return;
-}
+const arbitraryUpdatesEnabled = TimeseriesTest.arbitraryUpdatesEnabled(db);
const timeFieldName = "time";
const metaFieldName = "tag";
@@ -95,13 +81,15 @@ TimeseriesTest.run((insert) => {
const arrayDoc3 = {_id: 3, [timeFieldName]: dateTime, [metaFieldName]: [3, 6, 10]};
/************************************ multi:false updates ************************************/
- testUpdate({
- initialDocList: [doc1],
- updateList: [{q: {[metaFieldName]: {b: "B"}}, u: {$set: {[metaFieldName]: {b: "C"}}}}],
- resultDocList: [doc1],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
+ if (!arbitraryUpdatesEnabled) {
+ testUpdate({
+ initialDocList: [doc1],
+ updateList: [{q: {[metaFieldName]: {b: "B"}}, u: {$set: {[metaFieldName]: {b: "C"}}}}],
+ resultDocList: [doc1],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
+ }
/************************************ multi:true updates *************************************/
/************************** Tests updating with an update document ***************************/
@@ -206,58 +194,60 @@ TimeseriesTest.run((insert) => {
});
// Query on a field that is not the metaField.
- testUpdate({
- initialDocList: [doc1],
- updateList: [{
- q: {measurement: "cpu"},
- u: {$set: {[metaFieldName]: {c: "C"}}},
- multi: true,
- }],
- resultDocList: [doc1],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
-
- // Query on the metaField and modify a field that is not the metaField.
- testUpdate({
- initialDocList: [doc2],
- updateList: [{
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$set: {f2: "f2"}},
- multi: true,
- }],
- resultDocList: [doc2],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
-
- // Query on the metaField and a field that is not the metaField.
- testUpdate({
- initialDocList: [doc1],
- updateList: [
- {
- q: {[metaFieldName]: {a: "A", b: "B"}, measurement: "cpu"},
+ if (!arbitraryUpdatesEnabled) {
+ testUpdate({
+ initialDocList: [doc1],
+ updateList: [{
+ q: {measurement: "cpu"},
u: {$set: {[metaFieldName]: {c: "C"}}},
multi: true,
- },
- ],
- resultDocList: [doc1],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
+ }],
+ resultDocList: [doc1],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
- // Query on the metaField and modify the metaField and fields that are not the metaField.
- testUpdate({
- initialDocList: [doc2],
- updateList: [{
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$set: {[metaFieldName]: {e: "E"}, f3: "f3"}, $inc: {f2: 3}, $unset: {f1: ""}},
- multi: true,
- }],
- resultDocList: [doc2],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
+ // Query on the metaField and modify a field that is not the metaField.
+ testUpdate({
+ initialDocList: [doc2],
+ updateList: [{
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$set: {f2: "f2"}},
+ multi: true,
+ }],
+ resultDocList: [doc2],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
+
+ // Query on the metaField and a field that is not the metaField.
+ testUpdate({
+ initialDocList: [doc1],
+ updateList: [
+ {
+ q: {[metaFieldName]: {a: "A", b: "B"}, measurement: "cpu"},
+ u: {$set: {[metaFieldName]: {c: "C"}}},
+ multi: true,
+ },
+ ],
+ resultDocList: [doc1],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
+
+ // Query on the metaField and modify the metaField and fields that are not the metaField.
+ testUpdate({
+ initialDocList: [doc2],
+ updateList: [{
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$set: {[metaFieldName]: {e: "E"}, f3: "f3"}, $inc: {f2: 3}, $unset: {f1: ""}},
+ multi: true,
+ }],
+ resultDocList: [doc2],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
+ }
// Compound query on the metaField using dot notation and modify the metaField.
testUpdate({
@@ -301,17 +291,19 @@ TimeseriesTest.run((insert) => {
});
// Query on a field that is not the metaField using dot notation and modify the metaField.
- testUpdate({
- initialDocList: [doc1],
- updateList: [{
- q: {"measurement.A": "cpu"},
- u: {$set: {[metaFieldName]: {c: "C"}}},
- multi: true,
- }],
- resultDocList: [doc1],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
+ if (!arbitraryUpdatesEnabled) {
+ testUpdate({
+ initialDocList: [doc1],
+ updateList: [{
+ q: {"measurement.A": "cpu"},
+ u: {$set: {[metaFieldName]: {c: "C"}}},
+ multi: true,
+ }],
+ resultDocList: [doc1],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
+ }
// Query with an empty document (i.e update all documents in the collection).
testUpdate({
@@ -346,17 +338,19 @@ TimeseriesTest.run((insert) => {
});
// Rename the metaField.
- testUpdate({
- initialDocList: [doc1, doc2, doc4],
- updateList: [{
- q: {[metaFieldName + ".a"]: "A"},
- u: {$rename: {[metaFieldName]: "Z"}},
- multi: true,
- }],
- resultDocList: [doc1, doc2, doc4],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
+ if (!arbitraryUpdatesEnabled) {
+ testUpdate({
+ initialDocList: [doc1, doc2, doc4],
+ updateList: [{
+ q: {[metaFieldName + ".a"]: "A"},
+ u: {$rename: {[metaFieldName]: "Z"}},
+ multi: true,
+ }],
+ resultDocList: [doc1, doc2, doc4],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
+ }
// Rename a subfield of the metaField.
testUpdate({
@@ -372,17 +366,19 @@ TimeseriesTest.run((insert) => {
});
// Rename a subfield of the metaField to something not in the metaField.
- testUpdate({
- initialDocList: [doc1, doc2, doc4],
- updateList: [{
- q: {[metaFieldName + ".a"]: "A"},
- u: {$rename: {[metaFieldName + ".a"]: "notMetaField.a"}},
- multi: true,
- }],
- resultDocList: [doc1, doc2, doc4],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
+ if (!arbitraryUpdatesEnabled) {
+ testUpdate({
+ initialDocList: [doc1, doc2, doc4],
+ updateList: [{
+ q: {[metaFieldName + ".a"]: "A"},
+ u: {$rename: {[metaFieldName + ".a"]: "notMetaField.a"}},
+ multi: true,
+ }],
+ resultDocList: [doc1, doc2, doc4],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
+ }
// For all documents that have at least one 2 in its metaField array, update the first 2
// to be 100 using the positional $ operator.
@@ -534,251 +530,253 @@ TimeseriesTest.run((insert) => {
});
// Multiple updates, ordered: Query on the metaField and on a field that is not the metaField.
- testUpdate({
- initialDocList: [doc1],
- updateList: [
- {
- q: {[metaFieldName]: {a: "A", b: "B"}},
- u: {$set: {[metaFieldName]: {c: "C", d: 1}}},
- multi: true,
- },
- {
- q: {measurement: "cpu", [metaFieldName + ".d"]: 1},
- u: {$set: {[metaFieldName + ".c"]: "CC"}},
- multi: true,
- }
- ],
- resultDocList: [{_id: 1, [timeFieldName]: dateTime, [metaFieldName]: {c: "C", d: 1}}],
- n: 1,
- failCode: ErrorCodes.InvalidOptions,
- });
+ if (!arbitraryUpdatesEnabled) {
+ testUpdate({
+ initialDocList: [doc1],
+ updateList: [
+ {
+ q: {[metaFieldName]: {a: "A", b: "B"}},
+ u: {$set: {[metaFieldName]: {c: "C", d: 1}}},
+ multi: true,
+ },
+ {
+ q: {measurement: "cpu", [metaFieldName + ".d"]: 1},
+ u: {$set: {[metaFieldName + ".c"]: "CC"}},
+ multi: true,
+ }
+ ],
+ resultDocList: [{_id: 1, [timeFieldName]: dateTime, [metaFieldName]: {c: "C", d: 1}}],
+ n: 1,
+ failCode: ErrorCodes.InvalidOptions,
+ });
- // Multiple updates, ordered: Query on the metaField and modify the metaField and a field that
- // is not the metaField using dot notation.
- testUpdate({
- initialDocList: [doc2],
- updateList: [
- {
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$inc: {[metaFieldName + ".d"]: 6}},
- multi: true,
- },
- {
- q: {[metaFieldName]: {c: "C", d: 8}},
- u: {$set: {"f1.0": "f2"}},
- multi: true,
- }
- ],
- resultDocList: [{
- _id: 2,
- [timeFieldName]: dateTime,
- [metaFieldName]: {c: "C", d: 8},
- f: [{"k": "K", "v": "V"}],
- }],
- n: 1,
- failCode: ErrorCodes.InvalidOptions,
- });
+ // Multiple updates, ordered: Query on the metaField and modify the metaField and a field
+ // that is not the metaField using dot notation.
+ testUpdate({
+ initialDocList: [doc2],
+ updateList: [
+ {
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$inc: {[metaFieldName + ".d"]: 6}},
+ multi: true,
+ },
+ {
+ q: {[metaFieldName]: {c: "C", d: 8}},
+ u: {$set: {"f1.0": "f2"}},
+ multi: true,
+ }
+ ],
+ resultDocList: [{
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {c: "C", d: 8},
+ f: [{"k": "K", "v": "V"}],
+ }],
+ n: 1,
+ failCode: ErrorCodes.InvalidOptions,
+ });
- // Multiple updates, ordered: Query on the metaField and modify a field that is not the
- // metaField using dot notation.
- testUpdate({
- initialDocList: [doc2],
- updateList: [
- {
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$set: {"f1.0": "f2"}},
- multi: true,
- },
- {
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$inc: {[metaFieldName + ".d"]: 6}},
- multi: true,
- }
- ],
- resultDocList: [doc2],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
+ // Multiple updates, ordered: Query on the metaField and modify a field that is not the
+ // metaField using dot notation.
+ testUpdate({
+ initialDocList: [doc2],
+ updateList: [
+ {
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$set: {"f1.0": "f2"}},
+ multi: true,
+ },
+ {
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$inc: {[metaFieldName + ".d"]: 6}},
+ multi: true,
+ }
+ ],
+ resultDocList: [doc2],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
- // Multiple updates, unordered: Query on the metaField and modify a field that is not the
- // metaField using dot notation.
- testUpdate({
- initialDocList: [doc2],
- updateList: [
- {
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$set: {"f1.0": "f2"}},
- multi: true,
- },
- {
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$inc: {[metaFieldName + ".d"]: 6}},
- multi: true,
- }
- ],
- ordered: false,
- resultDocList: [{
- _id: 2,
- [timeFieldName]: dateTime,
- [metaFieldName]: {c: "C", d: 8},
- f: [{"k": "K", "v": "V"}],
- }],
- n: 1,
- failCode: ErrorCodes.InvalidOptions,
- });
+ // Multiple updates, unordered: Query on the metaField and modify a field that is not the
+ // metaField using dot notation.
+ testUpdate({
+ initialDocList: [doc2],
+ updateList: [
+ {
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$set: {"f1.0": "f2"}},
+ multi: true,
+ },
+ {
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$inc: {[metaFieldName + ".d"]: 6}},
+ multi: true,
+ }
+ ],
+ ordered: false,
+ resultDocList: [{
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {c: "C", d: 8},
+ f: [{"k": "K", "v": "V"}],
+ }],
+ n: 1,
+ failCode: ErrorCodes.InvalidOptions,
+ });
- // Multiple updates, ordered: Modify the metaField, a field that is not the metaField, and the
- // metaField. Only the first update should succeed.
- testUpdate({
- initialDocList: [doc2],
- updateList: [
- {
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$inc: {[metaFieldName + ".d"]: 6}},
- multi: true,
- },
- {
- q: {[metaFieldName]: {c: "C", d: 8}},
- u: {$set: {"f1.0": "f2"}},
- multi: true,
- },
- {
- q: {[metaFieldName]: {c: "C", d: 8}},
- u: {$inc: {[metaFieldName + ".d"]: 7}},
- multi: true,
- }
- ],
- resultDocList: [{
- _id: 2,
- [timeFieldName]: dateTime,
- [metaFieldName]: {c: "C", d: 8},
- f: [{"k": "K", "v": "V"}],
- }],
- n: 1,
- failCode: ErrorCodes.InvalidOptions,
- });
+ // Multiple updates, ordered: Modify the metaField, a field that is not the metaField, and
+ // the metaField. Only the first update should succeed.
+ testUpdate({
+ initialDocList: [doc2],
+ updateList: [
+ {
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$inc: {[metaFieldName + ".d"]: 6}},
+ multi: true,
+ },
+ {
+ q: {[metaFieldName]: {c: "C", d: 8}},
+ u: {$set: {"f1.0": "f2"}},
+ multi: true,
+ },
+ {
+ q: {[metaFieldName]: {c: "C", d: 8}},
+ u: {$inc: {[metaFieldName + ".d"]: 7}},
+ multi: true,
+ }
+ ],
+ resultDocList: [{
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {c: "C", d: 8},
+ f: [{"k": "K", "v": "V"}],
+ }],
+ n: 1,
+ failCode: ErrorCodes.InvalidOptions,
+ });
- // Multiple updates, unordered: Modify the metaField, a field that is not the metaField, and the
- // metaField. The first and last updates should succeed.
- testUpdate({
- initialDocList: [doc2],
- updateList: [
- {
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$inc: {[metaFieldName + ".d"]: 6}},
- multi: true,
- },
- {
- q: {[metaFieldName]: {c: "C", d: 8}},
- u: {$set: {"f1.0": "f2"}},
- multi: true,
- },
- {
- q: {[metaFieldName]: {c: "C", d: 8}},
- u: {$inc: {[metaFieldName + ".d"]: 7}},
- multi: true,
- }
- ],
- resultDocList: [{
- _id: 2,
- [timeFieldName]: dateTime,
- [metaFieldName]: {c: "C", d: 15},
- f: [{"k": "K", "v": "V"}],
- }],
- ordered: false,
- n: 2,
- failCode: ErrorCodes.InvalidOptions,
- });
+ // Multiple updates, unordered: Modify the metaField, a field that is not the metaField, and
+ // the metaField. The first and last updates should succeed.
+ testUpdate({
+ initialDocList: [doc2],
+ updateList: [
+ {
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$inc: {[metaFieldName + ".d"]: 6}},
+ multi: true,
+ },
+ {
+ q: {[metaFieldName]: {c: "C", d: 8}},
+ u: {$set: {"f1.0": "f2"}},
+ multi: true,
+ },
+ {
+ q: {[metaFieldName]: {c: "C", d: 8}},
+ u: {$inc: {[metaFieldName + ".d"]: 7}},
+ multi: true,
+ }
+ ],
+ resultDocList: [{
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {c: "C", d: 15},
+ f: [{"k": "K", "v": "V"}],
+ }],
+ ordered: false,
+ n: 2,
+ failCode: ErrorCodes.InvalidOptions,
+ });
- // Multiple updates, unordered: Query on the metaField and modify a field that is not the
- // metaField using dot notation.
- testUpdate({
- initialDocList: [doc2],
- updateList: [
- {
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$set: {"f1.0": "f2"}},
- multi: true,
- },
- {
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$inc: {[metaFieldName + ".d"]: 6}},
- multi: true,
- }
- ],
- ordered: false,
- resultDocList: [{
- _id: 2,
- [timeFieldName]: dateTime,
- [metaFieldName]: {c: "C", d: 8},
- f: [{"k": "K", "v": "V"}],
- }],
- n: 1,
- failCode: ErrorCodes.InvalidOptions,
- });
+ // Multiple updates, unordered: Query on the metaField and modify a field that is not the
+ // metaField using dot notation.
+ testUpdate({
+ initialDocList: [doc2],
+ updateList: [
+ {
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$set: {"f1.0": "f2"}},
+ multi: true,
+ },
+ {
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$inc: {[metaFieldName + ".d"]: 6}},
+ multi: true,
+ }
+ ],
+ ordered: false,
+ resultDocList: [{
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {c: "C", d: 8},
+ f: [{"k": "K", "v": "V"}],
+ }],
+ n: 1,
+ failCode: ErrorCodes.InvalidOptions,
+ });
- // Multiple updates, ordered: Modify the metaField, a field that is not the metaField, and the
- // metaField. Only the first update should succeed.
- testUpdate({
- initialDocList: [doc2],
- updateList: [
- {
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$inc: {[metaFieldName + ".d"]: 6}},
- multi: true,
- },
- {
- q: {[metaFieldName]: {c: "C", d: 8}},
- u: {$set: {"f1.0": "f2"}},
- multi: true,
- },
- {
- q: {[metaFieldName]: {c: "C", d: 8}},
- u: {$inc: {[metaFieldName + ".d"]: 7}},
- multi: true,
- }
- ],
- resultDocList: [{
- _id: 2,
- [timeFieldName]: dateTime,
- [metaFieldName]: {c: "C", d: 8},
- f: [{"k": "K", "v": "V"}],
- }],
- n: 1,
- failCode: ErrorCodes.InvalidOptions,
- });
+ // Multiple updates, ordered: Modify the metaField, a field that is not the metaField, and
+ // the metaField. Only the first update should succeed.
+ testUpdate({
+ initialDocList: [doc2],
+ updateList: [
+ {
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$inc: {[metaFieldName + ".d"]: 6}},
+ multi: true,
+ },
+ {
+ q: {[metaFieldName]: {c: "C", d: 8}},
+ u: {$set: {"f1.0": "f2"}},
+ multi: true,
+ },
+ {
+ q: {[metaFieldName]: {c: "C", d: 8}},
+ u: {$inc: {[metaFieldName + ".d"]: 7}},
+ multi: true,
+ }
+ ],
+ resultDocList: [{
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {c: "C", d: 8},
+ f: [{"k": "K", "v": "V"}],
+ }],
+ n: 1,
+ failCode: ErrorCodes.InvalidOptions,
+ });
- // Multiple updates, unordered: Modify the metaField, a field that is not the metaField, and the
- // metaField.
- testUpdate({
- initialDocList: [doc2],
- updateList: [
- {
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {$inc: {[metaFieldName + ".d"]: 6}},
- multi: true,
- },
- {
- q: {[metaFieldName]: {c: "C", d: 8}},
- u: {$set: {"f1.0": "f2"}},
- multi: true,
- },
- {
- q: {[metaFieldName]: {c: "C", d: 8}},
- u: {$inc: {[metaFieldName + ".d"]: 7}},
- multi: true,
- }
- ],
- resultDocList: [{
- _id: 2,
- [timeFieldName]: dateTime,
- [metaFieldName]: {c: "C", d: 15},
- f: [{"k": "K", "v": "V"}],
- }],
- ordered: false,
- n: 2,
- failCode: ErrorCodes.InvalidOptions,
- });
+ // Multiple updates, unordered: Modify the metaField, a field that is not the metaField, and
+ // the metaField.
+ testUpdate({
+ initialDocList: [doc2],
+ updateList: [
+ {
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {$inc: {[metaFieldName + ".d"]: 6}},
+ multi: true,
+ },
+ {
+ q: {[metaFieldName]: {c: "C", d: 8}},
+ u: {$set: {"f1.0": "f2"}},
+ multi: true,
+ },
+ {
+ q: {[metaFieldName]: {c: "C", d: 8}},
+ u: {$inc: {[metaFieldName + ".d"]: 7}},
+ multi: true,
+ }
+ ],
+ resultDocList: [{
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {c: "C", d: 15},
+ f: [{"k": "K", "v": "V"}],
+ }],
+ ordered: false,
+ n: 2,
+ failCode: ErrorCodes.InvalidOptions,
+ });
+ }
// Multiple unordered updates on multiple matching documents.
testUpdate({
@@ -845,71 +843,78 @@ TimeseriesTest.run((insert) => {
n: 2
});
- // Query for documents using $jsonSchema with the metaField in dot notation required.
- testUpdate({
- initialDocList: [doc1, doc2, doc3],
- updateList: [{
- q: {"$jsonSchema": {"required": [metaFieldName + ".a"]}},
- u: {$set: {[metaFieldName]: "a"}},
- multi: true
- }],
- resultDocList: [doc1, doc2, doc3],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
+ if (!arbitraryUpdatesEnabled) {
+ // Query for documents using $jsonSchema with the metaField in dot notation required.
+ testUpdate({
+ initialDocList: [doc1, doc2, doc3],
+ updateList: [{
+ q: {"$jsonSchema": {"required": [metaFieldName + ".a"]}},
+ u: {$set: {[metaFieldName]: "a"}},
+ multi: true
+ }],
+ resultDocList: [doc1, doc2, doc3],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
+ }
// Query for documents using $jsonSchema with a field that is not the metaField required.
- testUpdate({
- initialDocList: [doc1, doc2, doc3],
- updateList: [{
- q: {"$jsonSchema": {"required": [metaFieldName, timeFieldName]}},
- u: {$set: {[metaFieldName]: "a"}},
- multi: true
- }],
- resultDocList: [doc1, doc2, doc3],
- n: 0,
- failCode: ErrorCodes.InvalidOptions
- });
+ if (!arbitraryUpdatesEnabled) {
+ testUpdate({
+ initialDocList: [doc1, doc2, doc3],
+ updateList: [{
+ q: {"$jsonSchema": {"required": [metaFieldName, timeFieldName]}},
+ u: {$set: {[metaFieldName]: "a"}},
+ multi: true
+ }],
+ resultDocList: [doc1, doc2, doc3],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions
+ });
+ }
const nestedMetaObj =
{_id: 6, [timeFieldName]: dateTime, [metaFieldName]: {[metaFieldName]: "A"}};
-
- // Query for documents using $jsonSchema with the metaField required and a required subfield of
- // the metaField with the same name as the metaField.
- testUpdate({
- initialDocList: [doc1, nestedMetaObj],
- updateList: [{
- q: {
- "$jsonSchema": {
- "required": [metaFieldName],
- "properties": {[metaFieldName]: {"required": [metaFieldName]}}
- }
- },
- u: {$set: {[metaFieldName]: "a"}},
- multi: true
- }],
- resultDocList: [doc1, {_id: 6, [timeFieldName]: dateTime, [metaFieldName]: "a"}],
- n: 1
- });
+ if (!arbitraryUpdatesEnabled) {
+ // Query for documents using $jsonSchema with the metaField required and a required
+ // subfield of the metaField with the same name as the metaField.
+ testUpdate({
+ initialDocList: [doc1, nestedMetaObj],
+ updateList: [{
+ q: {
+ "$jsonSchema": {
+ "required": [metaFieldName],
+ "properties": {[metaFieldName]: {"required": [metaFieldName]}}
+ }
+ },
+ u: {$set: {[metaFieldName]: "a"}},
+ multi: true
+ }],
+ resultDocList: [doc1, {_id: 6, [timeFieldName]: dateTime, [metaFieldName]: "a"}],
+ n: 1
+ });
+ }
// Query for documents using $jsonSchema with the metaField required and an optional field that
// is not the metaField.
- testUpdate({
- initialDocList: [doc1, nestedMetaObj],
- updateList: [{
- q: {
- "$jsonSchema": {
- "required": [metaFieldName],
- "properties": {"measurement": {description: "can be any value"}}
- }
- },
- u: {$set: {[metaFieldName]: "a"}},
- multi: true
- }],
- resultDocList: [doc1, nestedMetaObj],
- n: 0,
- failCode: ErrorCodes.InvalidOptions
- });
+ if (!arbitraryUpdatesEnabled) {
+ testUpdate({
+ initialDocList: [doc1, nestedMetaObj],
+ updateList: [{
+ q: {
+ "$jsonSchema": {
+ "required": [metaFieldName],
+ "properties": {"measurement": {description: "can be any value"}}
+ }
+ },
+ u: {$set: {[metaFieldName]: "a"}},
+ multi: true
+ }],
+ resultDocList: [doc1, nestedMetaObj],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions
+ });
+ }
// Multiple updates, unordered: Modify the metaField of all documents using arrayFilters.
testUpdate({
@@ -1030,18 +1035,20 @@ TimeseriesTest.run((insert) => {
});
// Do the same test case as above but with upsert:true, which should fail.
- testUpdate({
- initialDocList: [doc1, doc4, doc5],
- updateList: [{
- q: {[metaFieldName]: "Z"},
- u: {$set: {[metaFieldName]: 5}},
- multi: true,
- upsert: true,
- }],
- resultDocList: [doc1, doc4, doc5],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
+ if (!arbitraryUpdatesEnabled) {
+ testUpdate({
+ initialDocList: [doc1, doc4, doc5],
+ updateList: [{
+ q: {[metaFieldName]: "Z"},
+ u: {$set: {[metaFieldName]: 5}},
+ multi: true,
+ upsert: true,
+ }],
+ resultDocList: [doc1, doc4, doc5],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
+ }
// Variables defined in the let option can only be used in the update if the update is an
// pipeline update. Since this update is an update document, the literal name of the variable
@@ -1058,94 +1065,98 @@ TimeseriesTest.run((insert) => {
n: 1,
});
- /************************** Tests updating with an update pipeline **************************/
- // Modify the metaField, which should fail since update pipelines are not supported.
- testUpdate({
- initialDocList: [doc1],
- updateList: [{
- q: {},
- u: [
- {$addFields: {[metaFieldName + ".c"]: "C", [metaFieldName + ".e"]: "E"}},
- {$unset: metaFieldName + ".e"}
- ],
- multi: true,
- }],
- resultDocList: [doc1],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
+ if (!arbitraryUpdatesEnabled) {
+ /************************** Tests updating with an update pipeline ************************/
+ // Modify the metaField, which should fail since update pipelines are not supported.
+ testUpdate({
+ initialDocList: [doc1],
+ updateList: [{
+ q: {},
+ u: [
+ {$addFields: {[metaFieldName + ".c"]: "C", [metaFieldName + ".e"]: "E"}},
+ {$unset: metaFieldName + ".e"}
+ ],
+ multi: true,
+ }],
+ resultDocList: [doc1],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
- /************************ Tests updating with a replacement document *************************/
- // Replace a document to have no metaField, which should fail since updates with replacement
- // documents are not supported.
- testUpdate({
- initialDocList: [doc2],
- updateList: [{
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {f2: {e: "E", f: "F"}, f3: 7},
- multi: true,
- }],
- resultDocList: [doc2],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
+ /************************ Tests updating with a replacement document **********************/
+ // Replace a document to have no metaField, which should fail since updates with replacement
+ // documents are not supported.
+ testUpdate({
+ initialDocList: [doc2],
+ updateList: [{
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {f2: {e: "E", f: "F"}, f3: 7},
+ multi: true,
+ }],
+ resultDocList: [doc2],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
- // Replace a document with an empty document, which should fail since updates with replacement
- // documents are not supported.
- testUpdate({
- initialDocList: [doc2],
- updateList: [{
- q: {[metaFieldName]: {c: "C", d: 2}},
- u: {},
- multi: true,
- }],
- resultDocList: [doc2],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- });
+ // Replace a document with an empty document, which should fail since updates with
+ // replacement documents are not supported.
+ testUpdate({
+ initialDocList: [doc2],
+ updateList: [{
+ q: {[metaFieldName]: {c: "C", d: 2}},
+ u: {},
+ multi: true,
+ }],
+ resultDocList: [doc2],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ });
+ }
/*********************** Tests updating a collection with no metaField. **********************/
// Query on a field which is not the (nonexistent) metaField.
- testUpdate({
- initialDocList: [doc3],
- updateList: [{
- q: {f: "F"},
- u: {},
- multi: true,
- }],
- resultDocList: [doc3],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- hasMetaField: false,
- });
+ if (!arbitraryUpdatesEnabled) {
+ testUpdate({
+ initialDocList: [doc3],
+ updateList: [{
+ q: {f: "F"},
+ u: {$set: {f: "FF"}},
+ multi: true,
+ }],
+ resultDocList: [doc3],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ hasMetaField: false,
+ });
- // Query on all documents and update them to be empty documents.
- testUpdate({
- initialDocList: [doc3],
- updateList: [{
- q: {},
- u: {},
- multi: true,
- }],
- resultDocList: [doc3],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- hasMetaField: false,
- });
+ // Query on all documents and update them to be empty documents.
+ testUpdate({
+ initialDocList: [doc3],
+ updateList: [{
+ q: {},
+ u: {$set: {f: "FF"}},
+ multi: true,
+ }],
+ resultDocList: [doc3],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ hasMetaField: false,
+ });
- // Query on all documents and update them to be nonempty documents.
- testUpdate({
- initialDocList: [doc3],
- updateList: [{
- q: {},
- u: {f: "FF"},
- multi: true,
- }],
- resultDocList: [doc3],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
- hasMetaField: false,
- });
+ // Query on all documents and update them to be nonempty documents.
+ testUpdate({
+ initialDocList: [doc3],
+ updateList: [{
+ q: {},
+ u: {$set: {f: "FF"}},
+ multi: true,
+ }],
+ resultDocList: [doc3],
+ n: 0,
+ failCode: ErrorCodes.InvalidOptions,
+ hasMetaField: false,
+ });
+ }
/************************ Tests updating a collection using collation. ************************/
const collationDoc1 = {_id: 1, [timeFieldName]: dateTime, [metaFieldName]: "café"};
@@ -1187,4 +1198,3 @@ TimeseriesTest.run((insert) => {
n: 1,
});
});
-}());
diff --git a/jstests/core/timeseries/timeseries_update_concurrent.js b/jstests/core/timeseries/timeseries_update_concurrent.js
index c6a7b41aadf79..85cb4fa02c874 100644
--- a/jstests/core/timeseries/timeseries_update_concurrent.js
+++ b/jstests/core/timeseries/timeseries_update_concurrent.js
@@ -21,10 +21,7 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
load("jstests/libs/curop_helpers.js");
load("jstests/libs/parallel_shell_helpers.js");
@@ -118,10 +115,11 @@ validateUpdateIndex(
// Attempt to update a document in a collection that has been replaced with a new time-series
// collection with a different metaField.
-validateUpdateIndex(
- docs,
- [{q: {[metaFieldName]: {a: "B"}}, u: {$set: {[metaFieldName]: {c: "C"}}}, multi: true}],
- testCases.REPLACE_METAFIELD,
- ErrorCodes.InvalidOptions,
- "meta");
-})();
+if (!TimeseriesTest.arbitraryUpdatesEnabled(db)) {
+ validateUpdateIndex(
+ docs,
+ [{q: {[metaFieldName]: {a: "B"}}, u: {$set: {[metaFieldName]: {c: "C"}}}, multi: true}],
+ testCases.REPLACE_METAFIELD,
+ ErrorCodes.InvalidOptions,
+ "meta");
+}
diff --git a/jstests/core/timeseries/timeseries_update_hint.js b/jstests/core/timeseries/timeseries_update_hint.js
index 98ecca8e872e9..65014827a3e12 100644
--- a/jstests/core/timeseries/timeseries_update_hint.js
+++ b/jstests/core/timeseries/timeseries_update_hint.js
@@ -19,10 +19,6 @@
* uses_parallel_shell,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
load("jstests/libs/curop_helpers.js");
load('jstests/libs/parallel_shell_helpers.js');
@@ -355,5 +351,4 @@ testUpdateHintFailed({
resultDocList: [hintDoc1, hintDoc2, hintDoc3],
nModifiedBuckets: 0,
failCode: ErrorCodes.BadValue,
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/timeseries/timeseries_update_multi.js b/jstests/core/timeseries/timeseries_update_multi.js
index 5f1bc9ae0de1e..1cd26c3746236 100644
--- a/jstests/core/timeseries/timeseries_update_multi.js
+++ b/jstests/core/timeseries/timeseries_update_multi.js
@@ -6,9 +6,9 @@
* requires_multi_updates,
* # We need a timeseries collection.
* requires_timeseries,
+ * requires_non_retryable_writes,
* featureFlagTimeseriesUpdatesSupport,
- * # TODO (SERVER-73454): Re-enable the tests.
- * __TEMPORARILY_DISABLED__,
+ * featureFlagLargeBatchedOperations,
* ]
*/
@@ -27,14 +27,20 @@ assert.commandWorked(testDB.dropDatabase());
/**
* Confirms that a set of updates returns the expected set of documents.
+ *
+ * If this is an upsert and we expect a document to be inserted, 'upsertedDoc' must be non-null. We
+ * will use the 'upsertedId' returned from the update command unioned with 'upsertedDoc' to
+ * construct the inserted document. This will be added to 'resultDocList' to validate the
+ * collection's contents.
*/
function testUpdate({
initialDocList,
createCollectionWithMetaField = true,
updateList,
resultDocList,
- n,
- nModified = n,
+ nMatched,
+ nModified = nMatched,
+ upsertedDoc,
failCode,
}) {
const coll = testDB.getCollection(collNamePrefix + count++);
@@ -47,303 +53,867 @@ function testUpdate({
assert.commandWorked(coll.insert(initialDocList));
const updateCommand = {update: coll.getName(), updates: updateList};
- const res = failCode ? assert.commandFailedWithCode(testDB.runCommand(updateCommand), failCode)
- : assert.commandWorked(testDB.runCommand(updateCommand));
+ const res = failCode ? assert.commandFailedWithCode(coll.runCommand(updateCommand), failCode)
+ : assert.commandWorked(coll.runCommand(updateCommand));
+
+ if (!failCode) {
+ if (upsertedDoc) {
+ assert.eq(1, res.n, tojson(res));
+ assert.eq(0, res.nModified, tojson(res));
+ assert(res.hasOwnProperty("upserted"), tojson(res));
+ assert.eq(1, res.upserted.length);
+
+ if (upsertedDoc.hasOwnProperty("_id")) {
+ assert.eq(upsertedDoc._id, res.upserted[0]._id);
+ } else {
+ upsertedDoc["_id"] = res.upserted[0]._id;
+ }
+ resultDocList.push(upsertedDoc);
+ } else {
+ assert.eq(nMatched, res.n);
+ assert.eq(nModified, res.nModified);
+ assert(!res.hasOwnProperty("upserted"), tojson(res));
+ }
+ }
- assert.eq(n, res.n);
- assert.eq(nModified, res.nModified);
const resDocs = coll.find().toArray();
assert.eq(resDocs.length, resultDocList.length);
- resultDocList.forEach(resultDoc => {
- assert.docEq(resultDoc,
- coll.findOne({_id: resultDoc._id}),
- "Expected document " + resultDoc["_id"] +
- " not found in result collection:" + tojson(resDocs));
- });
+ assert.sameMembers(
+ resultDocList, resDocs, "Collection contents did not match expected after update");
}
-const doc_a_b_no_metrics = {
+const doc_id_1_a_b_no_metrics = {
_id: 1,
[timeFieldName]: dateTime,
[metaFieldName]: {a: "A", b: "B"},
};
-const doc_a_b_array_metric = {
+const doc_id_2_a_b_array_metric = {
_id: 2,
[timeFieldName]: dateTime,
[metaFieldName]: {a: "A", b: "B"},
f: [{"k": "K", "v": "V"}],
};
-const doc_a_b_string_metric = {
+const doc_id_3_a_b_string_metric = {
_id: 3,
[timeFieldName]: dateTime,
[metaFieldName]: {a: "A", b: "B"},
f: "F",
};
-const doc_no_meta_string_metric = {
+const doc_id_4_no_meta_string_metric = {
_id: 4,
[timeFieldName]: dateTime,
f: "F",
};
-const doc_a_c_array_metric_1 = {
+const doc_id_5_a_c_array_metric = {
_id: 5,
[timeFieldName]: dateTime,
[metaFieldName]: {a: "A", c: "C"},
f: [2, 3],
};
-const doc_a_c_array_metric_2 = {
+const doc_id_6_a_c_array_metric = {
_id: 6,
[timeFieldName]: dateTime,
[metaFieldName]: {a: "A", c: "C"},
f: [1, 10],
};
-const doc_no_meta_int_metric = {
+const doc_id_7_no_meta_int_metric = {
_id: 7,
[timeFieldName]: dateTime,
g: 1,
};
+const doc_id_8_array_meta = {
+ _id: 8,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: [1, 2, 3, 4],
+ f: [4, 3, 2, 1],
+};
/**
* Tests op-style updates
*/
// Query on the _id field and modify the metaField.
-testUpdate({
- initialDocList: [doc_a_b_no_metrics, doc_a_b_array_metric],
- updateList: [{
- q: {_id: {$lt: 10}},
- u: {$set: {[metaFieldName]: {c: "C"}}},
- multi: true,
- }],
- resultDocList: [
- {_id: 1, [timeFieldName]: dateTime, [metaFieldName]: {c: "C"}},
- {
- _id: 2,
- [timeFieldName]: dateTime,
- [metaFieldName]: {c: "C"},
- f: [{"k": "K", "v": "V"}],
- },
- ],
- n: 2,
-});
+(function testMetricFieldQueryMetaFieldUpdate() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric],
+ updateList: [{
+ q: {_id: {$lt: 10}},
+ u: {$set: {[metaFieldName]: {c: "C"}}},
+ multi: true,
+ }],
+ resultDocList: [
+ {_id: 1, [timeFieldName]: dateTime, [metaFieldName]: {c: "C"}},
+ {
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {c: "C"},
+ f: [{"k": "K", "v": "V"}],
+ },
+ ],
+ nMatched: 2,
+ });
+})();
+
+// Query doesn't match any docs.
+(function testZeroMeasurementUpdate() {
+ testUpdate({
+ initialDocList:
+ [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric, doc_id_5_a_c_array_metric],
+ updateList: [{
+ q: {someField: "doesNotExist"},
+ u: {$set: {[metaFieldName]: {c: "C"}}},
+ multi: true,
+ }],
+ resultDocList:
+ [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric, doc_id_5_a_c_array_metric],
+ nMatched: 0,
+ });
+})();
+
+// No-op update.
+(function testNoopUpdate() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric],
+ updateList: [{
+ q: {},
+ u: {$set: {[metaFieldName]: {a: "A", b: "B"}}},
+ multi: true,
+ }],
+ resultDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric],
+ nMatched: 2,
+ nModified: 0
+ });
+})();
// Query on the metaField and modify the timeField.
-testUpdate({
- initialDocList: [doc_a_b_no_metrics, doc_a_b_array_metric],
- updateList: [{
- q: {[metaFieldName]: {a: "A", b: "B"}},
- u: {$set: {[timeFieldName]: dateTimeUpdated}},
- multi: true,
- }],
- resultDocList: [
- {
- _id: 1,
- [timeFieldName]: dateTimeUpdated,
- [metaFieldName]: {a: "A", b: "B"},
- },
- {
- _id: 2,
- [timeFieldName]: dateTimeUpdated,
- [metaFieldName]: {a: "A", b: "B"},
- f: [{"k": "K", "v": "V"}],
- },
- ],
- n: 2,
-});
+// Skip tests changing the shard key value in sharding.
+if (!db.getMongo().isMongos()) {
+ (function testMetaFieldQueryTimeFieldUpdate() {
+ testUpdate({
+ initialDocList:
+ [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric, doc_id_5_a_c_array_metric],
+ updateList: [{
+ q: {[metaFieldName]: {a: "A", b: "B"}},
+ u: {$set: {[timeFieldName]: dateTimeUpdated}},
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 1,
+ [timeFieldName]: dateTimeUpdated,
+ [metaFieldName]: {a: "A", b: "B"},
+ },
+ {
+ _id: 2,
+ [timeFieldName]: dateTimeUpdated,
+ [metaFieldName]: {a: "A", b: "B"},
+ f: [{"k": "K", "v": "V"}],
+ },
+ doc_id_5_a_c_array_metric
+ ],
+ nMatched: 2,
+ });
+ })();
+}
+
+// Query on the metaField and a metric field.
+(function testMetaFieldQueryMetricFieldMetric() {
+ testUpdate({
+ initialDocList: [doc_id_3_a_b_string_metric, doc_id_2_a_b_array_metric],
+ updateList: [{
+ q: {[metaFieldName]: {a: "A", b: "B"}, f: "F"},
+ u: {$set: {[metaFieldName]: {c: "C"}}},
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 3,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {c: "C"},
+ f: "F",
+ },
+ doc_id_2_a_b_array_metric
+ ],
+ nMatched: 1,
+ });
+})();
+
+// Query on the metaField and modify the metaField and a metric field.
+(function testMetaFieldQueryMetaAndMetricFieldUpdate() {
+ testUpdate({
+ initialDocList: [doc_id_3_a_b_string_metric, doc_id_2_a_b_array_metric],
+ updateList: [{
+ q: {[metaFieldName]: {a: "A", b: "B"}},
+ u: {$set: {[metaFieldName]: {c: "C"}, f: "FF"}},
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 3,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {c: "C"},
+ f: "FF",
+ },
+ {
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {c: "C"},
+ f: "FF",
+ }
+ ],
+ nMatched: 2,
+ });
+})();
-// Query on the metric field and remove the timeField.
// This command will fail because all time-series collections require a time field.
-testUpdate({
- initialDocList: [doc_a_b_string_metric, doc_a_c_array_metric_1],
- updateList: [{
- q: {f: "F"},
- u: {$unset: {[timeFieldName]: ""}},
- multi: true,
- }],
- resultDocList: [
- doc_a_b_string_metric,
- doc_a_c_array_metric_1,
- ],
- n: 0,
- failCode: ErrorCodes.InvalidOptions,
-});
+// Skip tests changing the shard key value in sharding.
+if (!db.getMongo().isMongos()) {
+ (function testRemoveTimeField() {
+ testUpdate({
+ initialDocList: [doc_id_3_a_b_string_metric, doc_id_5_a_c_array_metric],
+ updateList: [{
+ q: {f: "F"},
+ u: {$unset: {[timeFieldName]: ""}},
+ multi: true,
+ }],
+ resultDocList: [
+ doc_id_3_a_b_string_metric,
+ doc_id_5_a_c_array_metric,
+ ],
+ nMatched: 0,
+ failCode: ErrorCodes.BadValue,
+ });
+ })();
+}
+
+// This command will fail because the time field must be a timestamp.
+// Skip tests changing the shard key value in sharding.
+if (!db.getMongo().isMongos()) {
+ (function testChangeTimeFieldType() {
+ testUpdate({
+ initialDocList: [doc_id_3_a_b_string_metric, doc_id_5_a_c_array_metric],
+ updateList: [{
+ q: {f: "F"},
+ u: {$set: {[timeFieldName]: "hello"}},
+ multi: true,
+ }],
+ resultDocList: [
+ doc_id_3_a_b_string_metric,
+ doc_id_5_a_c_array_metric,
+ ],
+ nMatched: 0,
+ failCode: ErrorCodes.BadValue,
+ });
+ })();
+}
// Query on the time field and remove the metaField.
-testUpdate({
- initialDocList: [doc_a_b_no_metrics, doc_a_b_array_metric, doc_a_c_array_metric_1],
- updateList: [{
- q: {[timeField]: dateTime},
- u: {$unset: {[metaFieldName]: ""}, multi: true},
- multi: true,
- }],
- resultDocList: [
- {
- _id: 1,
- [timeFieldName]: dateTime,
- },
- {
- _id: 2,
- [timeFieldName]: dateTime,
- f: [{"k": "K", "v": "V"}],
- },
- {
- _id: 5,
- [timeFieldName]: dateTime,
- f: [2, 3],
- },
- ],
- n: 3,
-});
+(function testTimeFieldQueryRemoveMetaField() {
+ testUpdate({
+ initialDocList:
+ [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric, doc_id_5_a_c_array_metric],
+ updateList: [{
+ q: {[timeFieldName]: dateTime},
+ u: {$unset: {[metaFieldName]: ""}},
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 1,
+ [timeFieldName]: dateTime,
+ },
+ {
+ _id: 2,
+ [timeFieldName]: dateTime,
+ f: [{"k": "K", "v": "V"}],
+ },
+ {
+ _id: 5,
+ [timeFieldName]: dateTime,
+ f: [2, 3],
+ },
+ ],
+ nMatched: 3,
+ });
+})();
+
+(function testRenameMetaField() {
+ // Rename the metaField.
+ testUpdate({
+ initialDocList: [doc_id_3_a_b_string_metric],
+ updateList: [{
+ q: {},
+ u: {$rename: {[metaFieldName]: "Z"}},
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 3,
+ [timeFieldName]: dateTime,
+ Z: {a: "A", b: "B"},
+ f: "F",
+ },
+ ],
+ nMatched: 1,
+ });
+})();
+
+// Rename a subfield of the metaField to something not in the metaField.
+(function testRenameMetaSubfield() {
+ testUpdate({
+ initialDocList: [doc_id_3_a_b_string_metric],
+ updateList: [{
+ q: {[metaFieldName + ".a"]: "A"},
+ u: {$rename: {[metaFieldName + ".a"]: "Z.a"}},
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 3,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {b: "B"},
+ Z: {a: "A"},
+ f: "F",
+ },
+ ],
+ nMatched: 1,
+ });
+})();
// Expand a metric field.
-testUpdate({
- initialDocList: [doc_a_b_no_metrics, doc_a_b_array_metric],
- updateList: [{
- q: {[metaFieldName]: {a: "A", b: "B"}},
- u: {$set: {f: "x".repeat(5 * 1024 * 1024)}},
- multi: true,
- }],
- resultDocList: [
- {
- _id: 1,
- [timeFieldName]: dateTime,
- [metaFieldName]: {a: "A", b: "B"},
- f: "x".repeat(5 * 1024 * 1024),
- },
- {
- _id: 2,
- [timeFieldName]: dateTime,
- [metaFieldName]: {a: "A", b: "B"},
- f: "x".repeat(5 * 1024 * 1024),
- },
- ],
- n: 2,
-});
+(function testExpandMetricField() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric],
+ updateList: [{
+ q: {[metaFieldName]: {a: "A", b: "B"}},
+ u: {$set: {f: "x".repeat(3 * 1024 * 1024)}},
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 1,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", b: "B"},
+ f: "x".repeat(3 * 1024 * 1024),
+ },
+ {
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", b: "B"},
+ f: "x".repeat(3 * 1024 * 1024),
+ },
+ ],
+ nMatched: 2,
+ });
+})();
// Change the type of an existing field.
-testUpdate({
- initialDocList: [doc_a_b_array_metric, doc_a_b_string_metric],
- updateList: [{
- q: {[metaFieldName]: {a: "A", b: "B"}},
- u: {$set: {f: "X"}},
- multi: true,
- }],
- resultDocList: [
- {
- _id: 2,
- [timeFieldName]: dateTime,
- [metaFieldName]: {a: "A", b: "B"},
- f: "X",
- },
- {
- _id: 3,
- [timeFieldName]: dateTime,
- [metaFieldName]: {a: "A", b: "B"},
- f: "X",
- },
- ],
- n: 2,
-});
+(function testChangeExistingFieldType() {
+ testUpdate({
+ initialDocList: [doc_id_2_a_b_array_metric, doc_id_3_a_b_string_metric],
+ updateList: [{
+ q: {[metaFieldName]: {a: "A", b: "B"}},
+ u: {$set: {f: "X"}},
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", b: "B"},
+ f: "X",
+ },
+ {
+ _id: 3,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", b: "B"},
+ f: "X",
+ },
+ ],
+ nMatched: 2,
+ });
+})();
// Add a new field.
-testUpdate({
- initialDocList: [doc_a_b_no_metrics, doc_a_b_array_metric, doc_a_b_string_metric],
- updateList: [{
- q: {_id: {$lt: 3}},
- u: {$set: {g: 42}},
- multi: true,
- }],
- resultDocList: [
- {
- _id: 1,
- [timeFieldName]: dateTime,
- [metaFieldName]: {a: "A", b: "B"},
- g: 42,
- },
- {
- _id: 2,
+(function testAddNewField() {
+ testUpdate({
+ initialDocList:
+ [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric, doc_id_3_a_b_string_metric],
+ updateList: [{
+ q: {_id: {$lt: 3}},
+ u: {$set: {g: 42}},
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 1,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", b: "B"},
+ g: 42,
+ },
+ {
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", b: "B"},
+ f: [{"k": "K", "v": "V"}],
+ g: 42,
+ },
+ doc_id_3_a_b_string_metric
+ ],
+ nMatched: 2,
+ });
+})();
+
+// Update a metric field with a positional operator.
+(function testArrayModifier() {
+ testUpdate({
+ initialDocList:
+ [doc_id_2_a_b_array_metric, doc_id_5_a_c_array_metric, doc_id_6_a_c_array_metric],
+ updateList: [{
+ q: {f: {$gt: 2}},
+ u: {$set: {'f.$': 20}},
+ multi: true,
+ }],
+ resultDocList: [
+ doc_id_2_a_b_array_metric,
+ {
+ _id: 5,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", c: "C"},
+ f: [2, 20],
+ },
+ {
+ _id: 6,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", c: "C"},
+ f: [1, 20],
+ }
+ ],
+ nMatched: 2,
+ });
+})();
+
+// Update the meta field with a positional operator.
+(function testMetaFieldArrayModifier() {
+ testUpdate({
+ initialDocList: [doc_id_8_array_meta, doc_id_2_a_b_array_metric],
+ updateList: [{
+ q: {[metaFieldName]: {$gt: 2}},
+ u: {$set: {[metaFieldName + '.$']: 20, f: 10}},
+ multi: true,
+ }],
+ resultDocList: [
+ {_id: 8, [timeFieldName]: dateTime, [metaFieldName]: [1, 2, 20, 4], f: 10},
+ doc_id_2_a_b_array_metric
+ ],
+ nMatched: 1,
+ });
+})();
+
+// Update meta and metric fields with a positional operator.
+(function testMetaAndMetricFieldArrayModifier() {
+ testUpdate({
+ initialDocList: [doc_id_8_array_meta, doc_id_2_a_b_array_metric],
+ updateList: [{
+ q: {[metaFieldName]: {$gt: 2}, f: {$gt: 2}},
+ u: {$set: {[metaFieldName + '.$']: 20, 'f.$': 10}},
+ multi: true,
+ }],
+ resultDocList: [
+ {_id: 8, [timeFieldName]: dateTime, [metaFieldName]: [20, 2, 3, 4], f: [10, 3, 2, 1]},
+ doc_id_2_a_b_array_metric
+ ],
+ nMatched: 1,
+ });
+})();
+
+// Empty query and update a metric field using a positional operator.
+(function testArrayModifierNoFilter() {
+ testUpdate({
+ initialDocList: [doc_id_3_a_b_string_metric, doc_id_5_a_c_array_metric],
+ updateList: [{
+ q: {},
+ u: {$set: {'f.$': 20}},
+ multi: true,
+ }],
+ resultDocList: [doc_id_3_a_b_string_metric, doc_id_5_a_c_array_metric],
+ failCode: ErrorCodes.BadValue,
+ });
+})();
+
+// Query on the meta field and update a metric field using a positional operator.
+(function testArrayModifierMetaFilter() {
+ testUpdate({
+ initialDocList: [doc_id_3_a_b_string_metric, doc_id_5_a_c_array_metric],
+ updateList: [{
+ q: {[metaFieldName]: {a: "A", c: "C"}},
+ u: {$set: {'f.$': 20}},
+ multi: true,
+ }],
+ resultDocList: [doc_id_3_a_b_string_metric, doc_id_5_a_c_array_metric],
+ failCode: ErrorCodes.BadValue,
+ });
+})();
+
+(function testChangeArrayElementType() {
+ testUpdate({
+ initialDocList:
+ [doc_id_2_a_b_array_metric, doc_id_5_a_c_array_metric, doc_id_6_a_c_array_metric],
+ updateList: [{
+ q: {f: {$lte: 2}},
+ u: {$set: {'f.$': {k: "v"}}},
+ multi: true,
+ }],
+ resultDocList: [
+ doc_id_2_a_b_array_metric,
+ {
+ _id: 5,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", c: "C"},
+ f: [{k: "v"}, 3],
+ },
+ {
+ _id: 6,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", c: "C"},
+ f: [{k: "v"}, 10],
+ }
+ ],
+ nMatched: 2,
+ });
+})();
+
+(function testChangeMeasurementId() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateList: [{
+ q: {},
+ u: {$set: {_id: 10}},
+ multi: true,
+ }],
+ resultDocList: [{
+ _id: 10,
[timeFieldName]: dateTime,
[metaFieldName]: {a: "A", b: "B"},
- f: [{"k": "K", "v": "V"}],
- g: 42,
- },
- doc_a_b_string_metric
- ],
- n: 2,
-});
+ }],
+ nMatched: 1
+ });
+})();
+
+// Use a non-idempotent update to insert the updated measurement later in the index to verify
+// handling of the Halloween Problem.
+(function testHalloweenProblem() {
+ testUpdate({
+ initialDocList: [doc_id_2_a_b_array_metric, doc_id_3_a_b_string_metric],
+ updateList: [{
+ q: {},
+ u: {$set: {[metaFieldName + '.a']: "B"}, $inc: {x: 1}},
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "B", b: "B"},
+ f: [{"k": "K", "v": "V"}],
+ x: 1,
+ },
+ {
+ _id: 3,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "B", b: "B"},
+ f: "F",
+ x: 1,
+ },
+ ],
+ nMatched: 2,
+ });
+})();
/**
* Tests pipeline-style updates
*/
// Add a field of the sum of an array field using aggregation pipeline.
-testUpdate({
- initialDocList: [doc_a_c_array_metric_1, doc_a_c_array_metric_2],
- updateList: [{
- q: {[metaFieldName]: {a: "A", c: "C"}},
- u: [{$set: {sumF: {$sum: "$f"}}}],
- multi: true,
- }],
- resultDocList: [
- {
- _id: 5,
- [timeFieldName]: dateTime,
- [metaFieldName]: {a: "A", c: "C"},
- f: [2, 3],
- sumF: 5,
- },
- {
- _id: 6,
- [timeFieldName]: dateTime,
- [metaFieldName]: {a: "A", c: "C"},
- f: [5, 6],
- sumF: 11,
- },
- ],
- n: 2,
-});
+(function testUpdatePipelineArrayAggregation() {
+ testUpdate({
+ initialDocList: [doc_id_5_a_c_array_metric, doc_id_6_a_c_array_metric],
+ updateList: [{
+ q: {[metaFieldName]: {a: "A", c: "C"}},
+ u: [{$set: {sumF: {$sum: "$f"}}}],
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 5,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", c: "C"},
+ f: [2, 3],
+ sumF: 5,
+ },
+ {
+ _id: 6,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", c: "C"},
+ f: [1, 10],
+ sumF: 11,
+ },
+ ],
+ nMatched: 2,
+ });
+})();
// Add a new field for all measurements.
-testUpdate({
- initialDocList: [doc_no_meta_string_metric, doc_no_meta_int_metric],
- createCollectionWithMetaField: false,
- updateList: [{
- q: {},
- u: [{$set: {newField: true}}],
- multi: true,
- }],
- resultDocList: [
- {
- _id: 4,
- [timeFieldName]: dateTime,
- f: "F",
- newField: true,
- },
- {
- _id: 7,
- [timeFieldName]: dateTime,
- g: 1,
- newField: true,
- },
- ],
- n: 2,
-});
+(function testUpdatePipelineAddNewField() {
+ testUpdate({
+ initialDocList: [doc_id_4_no_meta_string_metric, doc_id_7_no_meta_int_metric],
+ createCollectionWithMetaField: false,
+ updateList: [{
+ q: {},
+ u: [{$set: {newField: true}}],
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 4,
+ [timeFieldName]: dateTime,
+ f: "F",
+ newField: true,
+ },
+ {
+ _id: 7,
+ [timeFieldName]: dateTime,
+ g: 1,
+ newField: true,
+ },
+ ],
+ nMatched: 2,
+ });
+})();
+
+// Cause a bucket to be split into multiple new buckets by an update, i.e. update documents in the
+// same bucket to belong in different buckets.
+(function testSplitBucketWithUpdate() {
+ testUpdate({
+ initialDocList:
+ [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric, doc_id_3_a_b_string_metric],
+ updateList: [{
+ q: {},
+ u: [{$set: {[metaFieldName]: "$f"}}],
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 1,
+ [timeFieldName]: dateTime,
+ },
+ {
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: [{"k": "K", "v": "V"}],
+ f: [{"k": "K", "v": "V"}],
+ },
+ {
+ _id: 3,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: "F",
+ f: "F",
+ }
+ ],
+ nMatched: 3,
+ });
+})();
+
+// Only touch the meta field in a pipeline update.
+(function testUpdatePipelineOnlyTouchMetaField() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics, doc_id_6_a_c_array_metric],
+ updateList: [{
+ q: {[metaFieldName]: {a: "A", b: "B"}},
+ u: [{$set: {[metaFieldName]: "$" + metaFieldName + ".a"}}],
+ multi: true,
+ }],
+ resultDocList:
+ [{_id: 1, [timeFieldName]: dateTime, [metaFieldName]: "A"}, doc_id_6_a_c_array_metric],
+ nMatched: 1,
+ });
+})();
/**
* Tests upsert with multi:true.
*/
-testUpdate({
- initialDocList: [doc_a_b_no_metrics, doc_a_b_array_metric],
- updateList: [{
- q: {[metaFieldName]: {z: "Z"}},
- u: {$set: {[timeFieldName]: dateTime}},
- upsert: true,
- multi: true,
- }],
- resultDocList: [
- doc_a_b_no_metrics,
- doc_a_b_array_metric,
- {[timeFieldName]: dateTime},
- ],
- n: 1,
- nModified: 0,
-});
+// Run an upsert that doesn't include an _id.
+// Skip upsert tests in sharding as the query has to be on the shard key field.
+if (!db.getMongo().isMongos()) {
+ (function testUpsertWithNoId() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric],
+ updateList: [{
+ q: {[metaFieldName]: {z: "Z"}},
+ u: {$set: {[timeFieldName]: dateTime}},
+ upsert: true,
+ multi: true,
+ }],
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ doc_id_2_a_b_array_metric,
+ ],
+ upsertedDoc: {[metaFieldName]: {z: "Z"}, [timeFieldName]: dateTime},
+ });
+ })();
+ // Run an upsert that includes an _id.
+ (function testUpsertWithId() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateList: [{
+ q: {_id: 100},
+ u: {$set: {[timeFieldName]: dateTime}},
+ upsert: true,
+ multi: true,
+ }],
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ ],
+ upsertedDoc: {_id: 100, [timeFieldName]: dateTime},
+ });
+ })();
+
+ // Run an upsert that updates documents and skips the upsert.
+ (function testUpsertUpdatesDocs() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric],
+ updateList: [{
+ q: {[metaFieldName + ".a"]: "A"},
+ u: {$set: {f: 10}},
+ upsert: true,
+ multi: true,
+ }],
+ resultDocList: [
+ {
+ _id: 1,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", b: "B"},
+ f: 10,
+ },
+ {
+ _id: 2,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", b: "B"},
+ f: 10,
+ }
+ ],
+ nMatched: 2,
+ });
+ })();
+
+ // Run an upsert that matches documents with no-op updates and skips the upsert.
+ (function testUpsertMatchesDocs() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric],
+ updateList: [{
+ q: {[metaFieldName + ".a"]: "A"},
+ u: {$set: {[timeFieldName]: dateTime}},
+ upsert: true,
+ multi: true,
+ }],
+ resultDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric],
+ nMatched: 2,
+ nModified: 0,
+ });
+ })();
+
+ // Run an upsert that matches a bucket but no documents in it, and inserts the document into a
+ // bucket with the same parameters.
+ (function testUpsertIntoMatchedBucket() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric],
+ updateList: [{
+ q: {[metaFieldName]: {a: "A", b: "B"}, f: 111},
+ u: {$set: {[timeFieldName]: dateTime}},
+ upsert: true,
+ multi: true,
+ }],
+ upsertedDoc: {[metaFieldName]: {a: "A", b: "B"}, [timeFieldName]: dateTime, f: 111},
+ resultDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric],
+ });
+ })();
+
+ // Run an upsert that doesn't insert a time field.
+ (function testUpsertNoTimeField() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateList: [{
+ q: {[metaFieldName]: {z: "Z"}},
+ u: {$set: {f: 10}},
+ upsert: true,
+ multi: true,
+ }],
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ ],
+ failCode: ErrorCodes.BadValue,
+ });
+ })();
+
+ // Run an upsert where the time field is provided in the query.
+ (function testUpsertQueryOnTimeField() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateList: [{
+ q: {[timeFieldName]: dateTimeUpdated},
+ u: {$set: {f: 10}},
+ upsert: true,
+ multi: true,
+ }],
+ upsertedDoc: {
+ [timeFieldName]: dateTimeUpdated,
+ f: 10,
+ },
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ ],
+ });
+ })();
+
+ // Run an upsert where a document to insert is supplied by the request.
+ (function testUpsertSupplyDoc() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateList: [{
+ q: {[timeFieldName]: dateTimeUpdated},
+ u: [{$set: {f: 10}}],
+ upsert: true,
+ multi: true,
+ upsertSupplied: true,
+ c: {new: {[timeFieldName]: dateTime, f: 100}}
+ }],
+ upsertedDoc: {
+ [timeFieldName]: dateTime,
+ f: 100,
+ },
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ ],
+ });
+ })();
+
+ // Run an upsert where a document to insert is supplied by the request and does not have a time
+ // field.
+ (function testUpsertSupplyDocNoTimeField() {
+ testUpdate({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateList: [{
+ q: {[timeFieldName]: dateTimeUpdated},
+ u: [{$set: {f: 10}}],
+ upsert: true,
+ multi: true,
+ upsertSupplied: true,
+ c: {new: {[metaFieldName]: {a: "A"}, f: 100}}
+ }],
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ ],
+ failCode: ErrorCodes.BadValue,
+ });
+ })();
+}
})();
diff --git a/jstests/core/timeseries/timeseries_update_one.js b/jstests/core/timeseries/timeseries_update_one.js
index b53f645c5a038..fa524a3910e7b 100644
--- a/jstests/core/timeseries/timeseries_update_one.js
+++ b/jstests/core/timeseries/timeseries_update_one.js
@@ -5,51 +5,19 @@
* # We need a timeseries collection.
* requires_timeseries,
* featureFlagTimeseriesUpdatesSupport,
- * # TODO (SERVER-73726): Re-enable the time-series updateOne test.
- * __TEMPORARILY_DISABLED__,
+ * # TODO SERVER-76454 Remove the following two tags.
+ * does_not_support_retryable_writes,
+ * requires_non_retryable_writes,
* ]
*/
-(function() {
-"use strict";
-
-const timeFieldName = "time";
-const metaFieldName = "mm";
-const collNamePrefix = "timeseries_update_one_";
-let count = 0;
-const testDB = db.getSiblingDB(jsTestName());
-assert.commandWorked(testDB.dropDatabase());
-
-/**
- * Ensure the updateOne command operates correctly by examining documents after the update.
- */
-function testUpdateOne({initialDocList, updateQuery, updateObj, resultDocList, n, upsert = false}) {
- const coll = testDB.getCollection(collNamePrefix + count++);
- if (initialDocList) {
- assert.commandWorked(testDB.createCollection(
- coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
- assert.commandWorked(coll.insert(initialDocList));
- }
-
- const updateCommand = {
- update: coll.getName(),
- updates: [{q: updateQuery, u: updateObj, multi: false, upsert: upsert}]
- };
- const res = assert.commandWorked(testDB.runCommand(updateCommand));
- assert.eq(n, res.n);
- assert.eq((upsert) ? n - 1 : n, res.nModified);
-
- if (resultDocList) {
- const resDocs = coll.find().toArray();
- assert.eq(resDocs.length, resultDocList.length);
- resultDocList.forEach(resultDoc => {
- assert.docEq(resultDoc,
- coll.findOne({_id: resultDoc._id}),
- "Expected document " + resultDoc["_id"] +
- " not found in result collection:" + tojson(resDocs));
- });
- }
-}
+import {
+ getTestDB,
+ metaFieldName,
+ prepareCollection,
+ testUpdateOne,
+ timeFieldName
+} from "jstests/core/timeseries/libs/timeseries_writes_util.js";
/**
* Tests op-style updates.
@@ -82,85 +50,138 @@ function testUpdateOne({initialDocList, updateQuery, updateObj, resultDocList, n
const query_m1_b1 = {b: {$eq: 1}, [metaFieldName]: {$eq: 1}};
// Metric field update: unset field.
- testUpdateOne({
- initialDocList: [doc_m1_a_b, doc_m1_c_d],
- updateQuery: query_m1_a1,
- updateObj: {$unset: {a: ""}},
- resultDocList: [doc_m1_b, doc_m1_c_d],
- n: 1
- });
+ (function testUnsetMetricField() {
+ testUpdateOne({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ updateQuery: query_m1_a1,
+ updateObj: {$unset: {a: ""}},
+ resultDocList: [doc_m1_b, doc_m1_c_d],
+ nMatched: 1
+ });
+ })();
// Metric field update: add new field.
- testUpdateOne({
- initialDocList: [doc_m1_b, doc_m1_c_d],
- updateQuery: query_m1_b1,
- updateObj: {$set: {a: 1}},
- resultDocList: [doc_m1_a_b, doc_m1_c_d],
- n: 1
- });
+ (function testAddNewMetricField() {
+ testUpdateOne({
+ initialDocList: [doc_m1_b, doc_m1_c_d],
+ updateQuery: query_m1_b1,
+ updateObj: {$set: {a: 1}},
+ resultDocList: [doc_m1_a_b, doc_m1_c_d],
+ nMatched: 1
+ });
+ })();
// Metric field update: change field type (integer to array).
- testUpdateOne({
- initialDocList: [doc_m1_a_b, doc_m1_c_d],
- updateQuery: query_m1_a1,
- updateObj: {$set: {a: ["arr", "ay"]}},
- resultDocList: [doc_m1_arrayA_b, doc_m1_c_d],
- n: 1
- });
+ (function testChangeFieldType() {
+ testUpdateOne({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ updateQuery: query_m1_a1,
+ updateObj: {$set: {a: ["arr", "ay"]}},
+ resultDocList: [doc_m1_arrayA_b, doc_m1_c_d],
+ nMatched: 1
+ });
+ })();
// Metric field update: no-op with non-existent field to unset.
- testUpdateOne({
- initialDocList: [doc_m1_a_b, doc_m1_c_d],
- updateQuery: query_m1_a1,
- updateObj: {$unset: {z: ""}},
- resultDocList: [doc_m1_a_b, doc_m1_c_d],
- n: 0
- });
+ (function testMatchOneNoopUpdate() {
+ testUpdateOne({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ updateQuery: query_m1_a1,
+ updateObj: {$unset: {z: ""}},
+ resultDocList: [doc_m1_a_b, doc_m1_c_d],
+ nMatched: 1,
+ nModified: 0
+ });
+ })();
// Metric field update: no-op with non-existent field to unset.
- testUpdateOne({
- initialDocList: [doc_m1_a_b, doc_m1_c_d],
- updateQuery: {},
- updateObj: {$unset: {z: ""}},
- resultDocList: [doc_m1_a_b, doc_m1_c_d],
- n: 0
- });
+ (function testMatchMultipleNoopUpdate() {
+ testUpdateOne({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ updateQuery: {},
+ updateObj: {$unset: {z: ""}},
+ resultDocList: [doc_m1_a_b, doc_m1_c_d],
+ nMatched: 1,
+ nModified: 0
+ });
+ })();
+
+ // Metric field update: match multiple docs, only update one.
+ (function testMatchMultipleUpdateOne() {
+ testUpdateOne({
+ initialDocList: [doc_a_b, doc_m1_a_b, doc_m1_c_d],
+ updateQuery: {},
+ updateObj: {$set: {a: 100}},
+ // Don't validate exact results as we could update any doc.
+ nMatched: 1,
+ });
+ })();
+
+ // Match and update zero docs.
+ (function testMatchNone() {
+ testUpdateOne({
+ initialDocList: [doc_a_b, doc_m1_a_b, doc_m1_c_d],
+ updateQuery: {[metaFieldName]: {z: "Z"}},
+ updateObj: {$set: {a: 100}},
+ resultDocList: [doc_a_b, doc_m1_a_b, doc_m1_c_d],
+ nMatched: 0,
+ });
+ })();
+
+ // Meta-only update only updates one.
+ (function testMetaOnlyUpdateOne() {
+ testUpdateOne({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ updateQuery: {[metaFieldName]: 1},
+ updateObj: {$set: {[metaFieldName]: 2}},
+ // Don't validate exact results as we could update any doc.
+ nMatched: 1,
+ });
+ })();
// Meta field update: remove meta field.
- testUpdateOne({
- initialDocList: [doc_m1_a_b, doc_m1_c_d],
- updateQuery: query_m1_a1,
- updateObj: {$unset: {[metaFieldName]: ""}},
- resultDocList: [doc_a_b, doc_m1_c_d],
- n: 1
- });
+ (function testRemoveMetaField() {
+ testUpdateOne({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ updateQuery: query_m1_a1,
+ updateObj: {$unset: {[metaFieldName]: ""}},
+ resultDocList: [doc_a_b, doc_m1_c_d],
+ nMatched: 1
+ });
+ })();
// Meta field update: add meta field.
- testUpdateOne({
- initialDocList: [doc_a_b],
- updateQuery: {},
- updateObj: {$set: {[metaFieldName]: 1}},
- resultDocList: [doc_m1_a_b],
- n: 1
- });
+ (function testAddMetaField() {
+ testUpdateOne({
+ initialDocList: [doc_a_b],
+ updateQuery: {},
+ updateObj: {$set: {[metaFieldName]: 1}},
+ resultDocList: [doc_m1_a_b],
+ nMatched: 1
+ });
+ })();
- // Meta field update: add meta field.
- testUpdateOne({
- initialDocList: [doc_m1_b],
- updateQuery: {},
- updateObj: {$set: {[metaFieldName]: 2}},
- resultDocList: [doc_m2_b],
- n: 1
- });
+ // Meta field update: update meta field.
+ (function testUpdateMetaField() {
+ testUpdateOne({
+ initialDocList: [doc_m1_b],
+ updateQuery: {},
+ updateObj: {$set: {[metaFieldName]: 2}},
+ resultDocList: [doc_m2_b],
+ nMatched: 1
+ });
+ })();
// Meta field update: update meta field to different type (integer to string).
- testUpdateOne({
- initialDocList: [doc_m1_a_b, doc_m1_c_d],
- updateQuery: query_m1_a1,
- updateObj: {$set: {[metaFieldName]: "1"}},
- resultDocList: [doc_stringM1_a_b, doc_m1_c_d],
- n: 1
- });
+ (function testChangeMetaFieldType() {
+ testUpdateOne({
+ initialDocList: [doc_m1_a_b, doc_m1_c_d],
+ updateQuery: query_m1_a1,
+ updateObj: {$set: {[metaFieldName]: "1"}},
+ resultDocList: [doc_stringM1_a_b, doc_m1_c_d],
+ nMatched: 1
+ });
+ })();
}
/**
@@ -174,22 +195,44 @@ function testUpdateOne({initialDocList, updateQuery, updateObj, resultDocList, n
{[timeFieldName]: timestamp2022, [metaFieldName]: 2, _id: 1, a: 1, "newField": 42};
// Update timeField, metaField and add a new field.
- testUpdateOne({
- initialDocList: [doc_2023_m1_a1],
- updateQuery: {a: {$eq: 1}, [metaFieldName]: {$eq: 1}},
- updateObj: [
- {$set: {[timeFieldName]: timestamp2022}},
- {$set: {[metaFieldName]: 2}},
- {$set: {"newField": 42}},
- ],
- resultDocList: [doc_2022_m2_a1_newField],
- n: 1
- });
+ // Skip tests changing the shard key value in sharding.
+ if (!db.getMongo().isMongos()) {
+ (function testPipelineUpdateSetMultipleFields() {
+ testUpdateOne({
+ initialDocList: [doc_2023_m1_a1],
+ updateQuery: {a: {$eq: 1}, [metaFieldName]: {$eq: 1}},
+ updateObj: [
+ {$set: {[timeFieldName]: timestamp2022}},
+ {$set: {[metaFieldName]: 2}},
+ {$set: {"newField": 42}},
+ ],
+ resultDocList: [doc_2022_m2_a1_newField],
+ nMatched: 1
+ });
+ })();
+ }
// Expect removal of the timeField to fail.
- assert.commandFailedWithCode(
- regColl.updateOne({}, [{$set: {[metaFieldName]: 2}}, {$unset: {[timeFieldName]: ""}}]),
- ErrorCodes.InvalidOptions);
+ (function testRemoveTimeField() {
+ testUpdateOne({
+ initialDocList: [doc_2023_m1_a1],
+ updateQuery: {},
+ updateObj: [{$set: {[metaFieldName]: 2}}, {$unset: timeFieldName}],
+ resultDocList: [doc_2023_m1_a1],
+ failCode: ErrorCodes.BadValue,
+ });
+ })();
+
+ // Expect changing the type of the timeField to fail.
+ (function testChangeTimeFieldType() {
+ testUpdateOne({
+ initialDocList: [doc_2023_m1_a1],
+ updateQuery: {},
+ updateObj: [{$set: {[timeFieldName]: "string"}}],
+ resultDocList: [doc_2023_m1_a1],
+ failCode: ErrorCodes.BadValue,
+ });
+ })();
}
/**
@@ -198,49 +241,264 @@ function testUpdateOne({initialDocList, updateQuery, updateObj, resultDocList, n
{
const timestamp2023 = ISODate("2023-02-06T19:19:00Z");
const timestamp2022 = ISODate("2022-02-06T19:19:00Z");
- const doc_t2023_m1_id_a = {[timeFieldName]: timestamp2023, [metaFieldName]: 1, _id: 1, a: 1};
- const doc_t2022_m2_stringId_stringA =
- {[timeFieldName]: timestamp2022, [metaFieldName]: 2, "_id": 2, "a": 2};
-
- // Full measurement replacement: update every field in the document.
- testUpdateOne({
- initialDocList: [doc_t2023_m1_id_a],
- updateQuery: {},
- updateObj: doc_t2022_m2_stringId_stringA,
- resultDocList: [doc_t2022_m2_stringId_stringA],
- n: 1
- });
+ const doc_t2023_m1_id1_a1 = {[timeFieldName]: timestamp2023, [metaFieldName]: 1, _id: 1, a: 1};
+ const doc_t2022_m2_id2_a2 = {[timeFieldName]: timestamp2022, [metaFieldName]: 2, _id: 2, a: 2};
+ const doc_t2022_m2_noId_a2 = {[timeFieldName]: timestamp2022, [metaFieldName]: 2, a: 2};
+
+ // Skip tests changing the shard key value in sharding.
+ if (!db.getMongo().isMongos()) {
+ // Full measurement replacement: update every field in the document, including the _id.
+ (function testReplacementUpdateChangeId() {
+ testUpdateOne({
+ initialDocList: [doc_t2023_m1_id1_a1],
+ updateQuery: {},
+ updateObj: doc_t2022_m2_id2_a2,
+ resultDocList: [doc_t2022_m2_id2_a2],
+ nMatched: 1
+ });
+ })();
+
+ // Full measurement replacement: update every field in the document, except the _id.
+ (function testReplacementUpdateNoId() {
+ testUpdateOne({
+ initialDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2],
+ updateQuery: {_id: 1},
+ updateObj: doc_t2022_m2_noId_a2,
+ resultDocList: [
+ doc_t2022_m2_id2_a2,
+ {[timeFieldName]: timestamp2022, [metaFieldName]: 2, a: 2, _id: 1},
+ ],
+ nMatched: 1
+ });
+ })();
+
+ // Replacement that results in two duplicate measurements.
+ (function testReplacementUpdateDuplicateIds() {
+ testUpdateOne({
+ initialDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2],
+ updateQuery: {_id: 1},
+ updateObj: doc_t2022_m2_id2_a2,
+ resultDocList: [doc_t2022_m2_id2_a2, doc_t2022_m2_id2_a2],
+ nMatched: 1,
+ });
+ })();
+ }
+
+ // Replacement with no time field.
+ (function testReplacementUpdateNoTimeField() {
+ testUpdateOne({
+ initialDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2],
+ updateQuery: {_id: 1},
+ updateObj: {[metaFieldName]: 1, a: 1, _id: 10},
+ resultDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2],
+ failCode: ErrorCodes.BadValue,
+ });
+ })();
+
+ // Replacement with time field of the wrong type.
+ (function testReplacementUpdateWrongTypeTimeField() {
+ testUpdateOne({
+ initialDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2],
+ updateQuery: {_id: 1},
+ updateObj: {[metaFieldName]: 1, a: 1, _id: 10, [timeFieldName]: "string"},
+ resultDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2],
+ failCode: ErrorCodes.BadValue,
+ });
+ })();
+
+ // Replacement that only references the meta field. Still fails because of the missing time
+ // field.
+ (function testReplacementMetaOnly() {
+ testUpdateOne({
+ initialDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2],
+ updateQuery: {[metaFieldName]: 1},
+ updateObj: {[metaFieldName]: 3},
+ resultDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2],
+ failCode: ErrorCodes.BadValue,
+ });
+ })();
// Tests upsert with full measurement.
- testUpdateOne({
- initialDocList: [doc_t2023_m1_id_a],
- updateQuery: {[metaFieldName]: {$eq: 2}},
- updateObj: doc_t2022_m2_stringId_stringA,
- resultDocList: [doc_t2023_m1_id_a, doc_t2022_m2_stringId_stringA],
- n: 1,
- upsert: true
- });
-
- // Tests upsert with full measurement: no-op when the query doesn't match and upsert is false.
- testUpdateOne({
- initialDocList: [doc_t2023_m1_id_a],
- updateQuery: {[metaFieldName]: {$eq: 2}},
- updateObj: doc_t2022_m2_stringId_stringA,
- resultDocList: [doc_t2023_m1_id_a],
- n: 0,
- upsert: false
- });
+ (function testUpsert() {
+ testUpdateOne({
+ initialDocList: [doc_t2023_m1_id1_a1],
+ updateQuery: {[metaFieldName]: {$eq: 2}},
+ updateObj: doc_t2022_m2_id2_a2,
+ resultDocList: [doc_t2023_m1_id1_a1],
+ upsert: true,
+ upsertedDoc: doc_t2022_m2_id2_a2,
+ });
+ })();
+
+ // Tests upsert with full measurement: no-op when the query matches but update is a no-op.
+ (function testNoopUpsert() {
+ testUpdateOne({
+ initialDocList: [doc_t2023_m1_id1_a1],
+ updateQuery: {},
+ updateObj: {$unset: {z: ""}},
+ resultDocList: [doc_t2023_m1_id1_a1],
+ nMatched: 1,
+ nModified: 0,
+ upsert: true
+ });
+ })();
+
+ // Run a replacement upsert that includes an _id in the query.
+ (function testReplacementUpsertWithId() {
+ testUpdateOne({
+ initialDocList: [doc_t2023_m1_id1_a1],
+ updateQuery: {_id: 100},
+ updateObj: {[timeFieldName]: ISODate("2023-02-06T19:19:01Z"), a: 5},
+ upsert: true,
+ upsertedDoc: {_id: 100, [timeFieldName]: ISODate("2023-02-06T19:19:01Z"), a: 5},
+ resultDocList: [doc_t2023_m1_id1_a1],
+ });
+ })();
+}
+
+/**
+ * Tests upsert with multi:false.
+ */
+{
+ const dateTime = ISODate("2021-07-12T16:00:00Z");
+ const dateTimeUpdated = ISODate("2023-01-27T16:00:00Z");
+ const doc_id_1_a_b_no_metrics = {
+ _id: 1,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", b: "B"},
+ };
+
+ // Run an upsert that doesn't include an _id.
+ (function testUpsertWithNoId() {
+ testUpdateOne({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateQuery: {[metaFieldName]: {z: "Z"}},
+ updateObj: {$set: {[timeFieldName]: dateTime}},
+ upsert: true,
+ upsertedDoc: {[metaFieldName]: {z: "Z"}, [timeFieldName]: dateTime},
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ ],
+ });
+ })();
+
+ // Run an upsert that includes an _id.
+ (function testUpsertWithId() {
+ testUpdateOne({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateQuery: {_id: 100},
+ updateObj: {$set: {[timeFieldName]: dateTime}},
+ upsert: true,
+ upsertedDoc: {_id: 100, [timeFieldName]: dateTime},
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ ],
+ });
+ })();
+
+ // Run an upsert that updates documents and skips the upsert.
+ (function testUpsertUpdatesDocs() {
+ testUpdateOne({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateQuery: {[metaFieldName + ".a"]: "A"},
+ updateObj: {$set: {f: 10}},
+ upsert: true,
+ resultDocList: [
+ {
+ _id: 1,
+ [timeFieldName]: dateTime,
+ [metaFieldName]: {a: "A", b: "B"},
+ f: 10,
+ },
+ ],
+ nMatched: 1,
+ });
+ })();
+
+ // Run an upsert that matches a bucket but no documents in it, and inserts the document into a
+ // bucket with the same parameters.
+ (function testUpsertIntoMatchedBucket() {
+ testUpdateOne({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateQuery: {[metaFieldName]: {a: "A", b: "B"}, f: 111},
+ updateObj: {$set: {[timeFieldName]: dateTime}},
+ upsert: true,
+ upsertedDoc: {[metaFieldName]: {a: "A", b: "B"}, [timeFieldName]: dateTime, f: 111},
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ ],
+ });
+ })();
+
+ // Run an upsert that doesn't insert a time field.
+ (function testUpsertNoTimeField() {
+ testUpdateOne({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateQuery: {[metaFieldName]: {z: "Z"}},
+ updateObj: {$set: {f: 10}},
+ upsert: true,
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ ],
+ failCode: ErrorCodes.BadValue,
+ });
+ })();
+
+ // Run an upsert where the time field is provided in the query.
+ (function testUpsertQueryOnTimeField() {
+ testUpdateOne({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateQuery: {[timeFieldName]: dateTimeUpdated},
+ updateObj: {$set: {f: 10}},
+ upsert: true,
+ upsertedDoc: {[timeFieldName]: dateTimeUpdated, f: 10},
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ ],
+ });
+ })();
+
+ // Run an upsert where a document to insert is supplied by the request.
+ (function testUpsertSupplyDoc() {
+ testUpdateOne({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateQuery: {[timeFieldName]: dateTimeUpdated},
+ updateObj: [{$set: {f: 10}}],
+ upsert: true,
+ upsertedDoc: {[timeFieldName]: dateTime, f: 100},
+ c: {new: {[timeFieldName]: dateTime, f: 100}},
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ ],
+ });
+ })();
+
+ // Run an upsert where a document to insert is supplied by the request and does not have a time
+ // field.
+ (function testUpsertSupplyDocNoTimeField() {
+ testUpdateOne({
+ initialDocList: [doc_id_1_a_b_no_metrics],
+ updateQuery: {[timeFieldName]: dateTimeUpdated},
+ updateObj: [{$set: {f: 10}}],
+ upsert: true,
+ c: {new: {[metaFieldName]: {a: "A"}, f: 100}},
+ resultDocList: [
+ doc_id_1_a_b_no_metrics,
+ ],
+ failCode: ErrorCodes.BadValue,
+ });
+ })();
}
/**
* Tests measurement modification that could exceed bucket size limit (default value of 128000
* bytes).
*/
-{
- const coll = testDB.getCollection(collNamePrefix + count++);
- assert.commandWorked(testDB.createCollection(
- coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
- count--; // Decrement count to access collection correctly in 'testUpdateOne' helper.
+(function testUpdateExceedsBucketSizeLimit() {
+ const testDB = getTestDB();
+ const collName = "testUpdateExceedsBucketSizeLimit";
+ const coll = testDB.getCollection(collName);
+ prepareCollection({collName, initialDocList: []});
// Fill up a bucket to roughly 120000 bytes by inserting 4 batches of 30 documents sized at
// 1000 bytes.
@@ -248,7 +506,7 @@ function testUpdateOne({initialDocList, updateQuery, updateObj, resultDocList, n
while (batchNum < 4) {
let batch = [];
for (let i = 0; i < 30; i++) {
- const doc = {_id: i, [timeField]: ISODate(), value: "a".repeat(1000)};
+ const doc = {_id: i, [timeFieldName]: ISODate(), value: "a".repeat(1000)};
batch.push(doc);
}
@@ -257,13 +515,10 @@ function testUpdateOne({initialDocList, updateQuery, updateObj, resultDocList, n
}
// Update any of the measurements with a document which will exceed the 128000 byte threshold.
- const chunkyDoc = {_id: 128000, [timeField]: ISODate(), value: "a".repeat(10000)};
- testUpdateOne({
- // initialDocList: We manually inserted measurements.
- updateQuery: {},
- updateObj: chunkyDoc,
- // resultDocList: No need to check all of the measurements.
- n: 1
- });
-}
+ const chunkyDoc = {_id: 128000, [timeFieldName]: ISODate(), value: "a".repeat(10000)};
+
+ const updateCommand = {update: collName, updates: [{q: {}, u: chunkyDoc, multi: false}]};
+ const res = assert.commandWorked(testDB.runCommand(updateCommand));
+ assert.eq(1, res.n, tojson(res));
+ assert.eq(1, res.nModified, tojson(res));
})();
diff --git a/jstests/core/timeseries/timeseries_user_system_buckets.js b/jstests/core/timeseries/timeseries_user_system_buckets.js
index 02ae0cb8f59d9..6835fca1864e8 100644
--- a/jstests/core/timeseries/timeseries_user_system_buckets.js
+++ b/jstests/core/timeseries/timeseries_user_system_buckets.js
@@ -13,8 +13,8 @@
* requires_fcv_63
* ]
*/
-userCollSystemBuckets = db.system.buckets.coll;
-userColl = db.coll;
+let userCollSystemBuckets = db.system.buckets.coll;
+let userColl = db.coll;
userCollSystemBuckets.drop();
userColl.drop();
@@ -25,10 +25,10 @@ assert.commandWorked(userCollSystemBuckets.insert({a: 1}));
// A user collection with the same postfix should not be considered time series collection
assert.commandWorked(userColl.insert({a: 2}));
-docs = userColl.find().toArray();
+let docs = userColl.find().toArray();
assert.eq(1, docs.length);
-docsSystemBuckets = userCollSystemBuckets.find().toArray();
+let docsSystemBuckets = userCollSystemBuckets.find().toArray();
assert.eq(1, docsSystemBuckets.length);
userCollSystemBuckets.drop();
diff --git a/jstests/core/top.js b/jstests/core/top.js
index 02c1a3e0d1c85..ea671c46ed879 100644
--- a/jstests/core/top.js
+++ b/jstests/core/top.js
@@ -15,8 +15,6 @@
* tenant_migration_incompatible,
* does_not_support_repeated_reads,
* requires_fcv_62,
- * # TODO SERVER-67640: Verify 'top' and $collStats work correctly for queries in CQF.
- * cqf_incompatible,
* ]
*/
diff --git a/jstests/core/transaction_too_large_for_cache.js b/jstests/core/transaction_too_large_for_cache.js
index dddcbed9f4b49..b06ead39b621b 100644
--- a/jstests/core/transaction_too_large_for_cache.js
+++ b/jstests/core/transaction_too_large_for_cache.js
@@ -7,6 +7,7 @@
* requires_persistence,
* requires_non_retryable_writes,
* requires_wiredtiger,
+ * no_selinux
* ]
*/
diff --git a/jstests/core/txns/abort_expired_transaction.js b/jstests/core/txns/abort_expired_transaction.js
index b461856280e4b..a4ed72cc7dbe6 100644
--- a/jstests/core/txns/abort_expired_transaction.js
+++ b/jstests/core/txns/abort_expired_transaction.js
@@ -35,6 +35,11 @@ try {
const session = db.getMongo().startSession(sessionOptions);
const sessionDb = session.getDatabase(testDBName);
+ // Number of passes made by the "abortExpiredTransactions" thread before the transaction
+ // expires.
+ const abortExpiredTransactionsPassesPreAbort =
+ db.serverStatus().metrics.abortExpiredTransactions.passes;
+
let txnNumber = 0;
jsTest.log("Insert a document starting a transaction.");
@@ -66,6 +71,14 @@ try {
"currentOp reports that the idle transaction still exists, it has not been " +
"aborted as expected.");
+ assert.soon(() => {
+ // For this expired transaction to abort, the "abortExpiredTransactions" thread has to
+ // perform at least one pass.
+ const serverStatus = db.serverStatus();
+ return abortExpiredTransactionsPassesPreAbort <
+ serverStatus.metrics.abortExpiredTransactions.passes;
+ });
+
jsTest.log(
"Attempt to do a write in the transaction, which should fail because the transaction " +
"was aborted");
diff --git a/jstests/core/txns/aggregation_in_transaction.js b/jstests/core/txns/aggregation_in_transaction.js
index 76195d0caa042..1c4da78ab2025 100644
--- a/jstests/core/txns/aggregation_in_transaction.js
+++ b/jstests/core/txns/aggregation_in_transaction.js
@@ -1,5 +1,5 @@
// Tests that aggregation is supported in transactions.
-// @tags: [uses_transactions, uses_snapshot_read_concern]
+// @tags: [uses_transactions, uses_snapshot_read_concern, references_foreign_collection]
(function() {
"use strict";
diff --git a/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js b/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js
index a32cc59e255a9..49f03c6fddcb6 100644
--- a/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js
+++ b/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js
@@ -11,7 +11,7 @@
(function() {
"use strict";
-const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid";
+load("jstests/libs/fixture_helpers.js");
const session = db.getMongo().startSession();
const sessionDb = session.getDatabase("admin");
@@ -53,7 +53,7 @@ nonRetryableWriteCommands.forEach(function(command) {
[50768, 50889]);
});
-if (!isMongos) {
+if (!FixtureHelpers.isMongos(db)) {
nonRetryableWriteCommandsMongodOnly.forEach(function(command) {
jsTest.log("Testing command: " + tojson(command));
assert.commandFailedWithCode(
diff --git a/jstests/core/txns/commands_not_allowed_in_txn.js b/jstests/core/txns/commands_not_allowed_in_txn.js
index a261b14a0be25..45352c2710921 100644
--- a/jstests/core/txns/commands_not_allowed_in_txn.js
+++ b/jstests/core/txns/commands_not_allowed_in_txn.js
@@ -29,7 +29,7 @@ const sessionOptions = {
const session = db.getMongo().startSession(sessionOptions);
const sessionDb = session.getDatabase(dbName);
-const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid";
+const runningOnMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid";
assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
assert.commandWorked(testDB.runCommand({
@@ -69,7 +69,7 @@ function testCommand(command) {
assert(res.errmsg.match(errmsgRegExp), res);
// Mongos has special handling for commitTransaction to support commit recovery.
- if (!isMongos) {
+ if (!runningOnMongos) {
assert.commandFailedWithCode(sessionDb.adminCommand({
commitTransaction: 1,
txnNumber: NumberLong(txnNumber),
@@ -134,7 +134,7 @@ const commands = [
];
// There is no applyOps command on mongos.
-if (!isMongos) {
+if (!runningOnMongos) {
commands.push({
applyOps: [{
op: "u",
@@ -164,7 +164,7 @@ assert.commandFailedWithCode(sessionDb.runCommand({
ErrorCodes.OperationNotSupportedInTransaction);
// Mongos has special handling for commitTransaction to support commit recovery.
-if (!isMongos) {
+if (!runningOnMongos) {
// The failed find should abort the transaction so a commit should fail.
assert.commandFailedWithCode(sessionDb.adminCommand({
commitTransaction: 1,
diff --git a/jstests/core/txns/concurrent_drops_and_creates.js b/jstests/core/txns/concurrent_drops_and_creates.js
index 71b7b83651160..7c7bfd6d58730 100644
--- a/jstests/core/txns/concurrent_drops_and_creates.js
+++ b/jstests/core/txns/concurrent_drops_and_creates.js
@@ -10,13 +10,9 @@
* uses_transactions,
* ]
*/
-(function() {
-"use strict";
-
// TODO (SERVER-39704): Remove the following load after SERVER-397074 is completed
// For retryOnceOnTransientAndRestartTxnOnMongos.
load('jstests/libs/auto_retry_transaction_in_sharding.js');
-load("jstests/libs/feature_flag_util.js");
const dbName1 = "test1";
const dbName2 = "test2";
@@ -63,11 +59,8 @@ retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
sessionOutsideTxn.advanceClusterTime(session.getClusterTime());
assert.commandWorked(testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}}));
-// This test cause a StaleConfig error on sharding so even with the PointInTimeCatalogLookups flag
-// enabled no command will succeed.
-// TODO SERVER-67289: Remove feature flag check.
-if (FeatureFlagUtil.isPresentAndEnabled(db, "PointInTimeCatalogLookups") &&
- !session.getClient().isMongos()) {
+// This test cause a StaleConfig error on sharding so no command will succeed.
+if (!session.getClient().isMongos()) {
// We can perform reads on the dropped collection as it existed when we started the transaction.
assert.commandWorked(sessionDB2.runCommand({find: sessionCollB.getName()}));
@@ -112,25 +105,13 @@ assert.commandWorked(sessionCollA.insert({}));
sessionOutsideTxn.advanceClusterTime(session.getClusterTime());
assert.commandWorked(testDB2.runCommand({create: collNameB}));
-// TODO SERVER-67289: Remove feature flag check.
-if (FeatureFlagUtil.isPresentAndEnabled(db, "PointInTimeCatalogLookups")) {
- // We can insert to collection B in the transaction as the transaction does not have a
- // collection on this namespace (even as it exist at latest). A collection will be implicitly
- // created and we will fail to commit this transaction with a WriteConflict error.
- retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
- assert.commandWorked(sessionCollB.insert({}));
- }, txnOptions);
+// We can insert to collection B in the transaction as the transaction does not have a collection on
+// this namespace (even as it exist at latest). A collection will be implicitly created and we will
+// fail to commit this transaction with a WriteConflict error.
+retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ assert.commandWorked(sessionCollB.insert({}));
+}, txnOptions);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.WriteConflict);
-} else {
- // We cannot write to collection B in the transaction, since it experienced catalog changes
- // since the transaction's read timestamp. Since our implementation of the in-memory collection
- // catalog always has the most recent collection metadata, we do not allow you to read from a
- // collection at a time prior to its most recent catalog changes.
- assert.commandFailedWithCode(sessionCollB.insert({}), ErrorCodes.SnapshotUnavailable);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-}
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.WriteConflict);
-session.endSession();
-}());
+session.endSession();
\ No newline at end of file
diff --git a/jstests/core/txns/create_collection_parallel.js b/jstests/core/txns/create_collection_parallel.js
index cbbb968c88fef..e0f0c5bd42327 100644
--- a/jstests/core/txns/create_collection_parallel.js
+++ b/jstests/core/txns/create_collection_parallel.js
@@ -7,12 +7,8 @@
* uses_transactions,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/create_collection_txn_helpers.js");
load("jstests/libs/auto_retry_transaction_in_sharding.js");
-load("jstests/libs/feature_flag_util.js");
const dbName = 'test_txns_create_collection_parallel';
@@ -66,23 +62,14 @@ function runParallelCollectionCreateTest(command, explicitCreate) {
session.commitTransaction();
assert.eq(sessionColl.find({}).itcount(), 1);
- // TODO SERVER-67289: Remove feature flag check.
- if (FeatureFlagUtil.isPresentAndEnabled(db, "PointInTimeCatalogLookups")) {
- // create cannot observe the collection created in the other transaction so the command
- // will succeed and we will instead throw WCE when trying to commit the transaction.
- retryOnceOnTransientAndRestartTxnOnMongos(secondSession, () => {
- assert.commandWorked(secondSessionDB.runCommand({create: collName}));
- }, {writeConcern: {w: "majority"}});
-
- assert.commandFailedWithCode(secondSession.commitTransaction_forTesting(),
- ErrorCodes.WriteConflict);
- } else {
- assert.commandFailedWithCode(secondSessionDB.runCommand({create: collName}),
- ErrorCodes.NamespaceExists);
+ // create cannot observe the collection created in the other transaction so the command will
+ // succeed and we will instead throw WCE when trying to commit the transaction.
+ retryOnceOnTransientAndRestartTxnOnMongos(secondSession, () => {
+ assert.commandWorked(secondSessionDB.runCommand({create: collName}));
+ }, {writeConcern: {w: "majority"}});
- assert.commandFailedWithCode(secondSession.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- }
+ assert.commandFailedWithCode(secondSession.commitTransaction_forTesting(),
+ ErrorCodes.WriteConflict);
assert.eq(distinctSessionColl.find({}).itcount(), 0);
sessionColl.drop({writeConcern: {w: "majority"}});
@@ -195,5 +182,4 @@ runParallelCollectionCreateTest("insert", false /*explicitCreate*/);
runParallelCollectionCreateTest("update", true /*explicitCreate*/);
runParallelCollectionCreateTest("update", false /*explicitCreate*/);
runParallelCollectionCreateTest("findAndModify", true /*explicitCreate*/);
-runParallelCollectionCreateTest("findAndModify", false /*explicitCreate*/);
-}());
+runParallelCollectionCreateTest("findAndModify", false /*explicitCreate*/);
\ No newline at end of file
diff --git a/jstests/core/txns/create_indexes_parallel.js b/jstests/core/txns/create_indexes_parallel.js
index d04116482958d..7b6bc1fde0d15 100644
--- a/jstests/core/txns/create_indexes_parallel.js
+++ b/jstests/core/txns/create_indexes_parallel.js
@@ -7,12 +7,8 @@
* uses_transactions,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/auto_retry_transaction_in_sharding.js");
load("jstests/libs/create_index_txn_helpers.js");
-load("jstests/libs/feature_flag_util.js");
let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyIndex) {
const dbName = 'test_txns_create_indexes_parallel';
@@ -93,26 +89,14 @@ let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyInd
assert.eq(secondSessionColl.find({}).itcount(), 1);
assert.eq(secondSessionColl.getIndexes().length, 2);
- // TODO SERVER-67289: Remove feature flag check.
- if (FeatureFlagUtil.isPresentAndEnabled(db, "PointInTimeCatalogLookups")) {
- // createIndexes cannot observe the index created in the other transaction so the command
- // will succeed and we will instead throw WCE when trying to commit the transaction.
- retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
- assert.commandWorked(sessionColl.runCommand(
- {createIndexes: collName, indexes: [conflictingIndexSpecs]}));
- }, {writeConcern: {w: "majority"}});
-
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.WriteConflict);
- } else {
- // createIndexes takes minimum visible snapshots of new collections into consideration when
- // checking for existing indexes.
- assert.commandFailedWithCode(
- sessionColl.runCommand({createIndexes: collName, indexes: [conflictingIndexSpecs]}),
- ErrorCodes.SnapshotUnavailable);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- }
+ // createIndexes cannot observe the index created in the other transaction so the command will
+ // succeed and we will instead throw WCE when trying to commit the transaction.
+ retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ assert.commandWorked(
+ sessionColl.runCommand({createIndexes: collName, indexes: [conflictingIndexSpecs]}));
+ }, {writeConcern: {w: "majority"}});
+
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.WriteConflict);
assert.eq(sessionColl.find({}).itcount(), 1);
assert.eq(sessionColl.getIndexes().length, 2);
@@ -207,5 +191,4 @@ let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyInd
doParallelCreateIndexesTest(false /*explicitCollectionCreate*/, false /*multikeyIndex*/);
doParallelCreateIndexesTest(true /*explicitCollectionCreate*/, false /*multikeyIndex*/);
doParallelCreateIndexesTest(false /*explicitCollectionCreate*/, true /*multikeyIndex*/);
-doParallelCreateIndexesTest(true /*explicitCollectionCreate*/, true /*multikeyIndex*/);
-}());
+doParallelCreateIndexesTest(true /*explicitCollectionCreate*/, true /*multikeyIndex*/);
\ No newline at end of file
diff --git a/jstests/core/txns/dbstats_not_blocked_by_txn.js b/jstests/core/txns/dbstats_not_blocked_by_txn.js
index 6555a216e1497..3eaec86df82e3 100644
--- a/jstests/core/txns/dbstats_not_blocked_by_txn.js
+++ b/jstests/core/txns/dbstats_not_blocked_by_txn.js
@@ -7,6 +7,8 @@
*/
(function() {
"use strict";
+load("jstests/libs/fixture_helpers.js");
+
var dbName = 'dbstats_not_blocked_by_txn';
var mydb = db.getSiblingDB(dbName);
@@ -16,8 +18,7 @@ mydb.createCollection("foo", {writeConcern: {w: "majority"}});
var session = db.getMongo().startSession();
var sessionDb = session.getDatabase(dbName);
-const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid";
-if (isMongos) {
+if (FixtureHelpers.isMongos(db)) {
// Before starting the transaction below, access the collection so it can be implicitly
// sharded and force all shards to refresh their database versions because the refresh
// requires an exclusive lock and would block behind the transaction.
diff --git a/jstests/core/txns/list_collections_not_blocked_by_txn.js b/jstests/core/txns/list_collections_not_blocked_by_txn.js
index 1ef9bb17b386e..7b615c076f8ce 100644
--- a/jstests/core/txns/list_collections_not_blocked_by_txn.js
+++ b/jstests/core/txns/list_collections_not_blocked_by_txn.js
@@ -9,6 +9,7 @@
// TODO (SERVER-39704): Remove the following load after SERVER-397074 is completed
// For withTxnAndAutoRetryOnMongos.
load('jstests/libs/auto_retry_transaction_in_sharding.js');
+load("jstests/libs/fixture_helpers.js");
var dbName = 'list_collections_not_blocked';
var mydb = db.getSiblingDB(dbName);
@@ -19,8 +20,7 @@ mydb.foo.drop({writeConcern: {w: "majority"}});
assert.commandWorked(mydb.createCollection("foo", {writeConcern: {w: "majority"}}));
-const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid";
-if (isMongos) {
+if (FixtureHelpers.isMongos(db)) {
// Before starting the transaction below, access the collection so it can be implicitly
// sharded and force all shards to refresh their database versions because the refresh
// requires an exclusive lock and would block behind the transaction.
diff --git a/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js b/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js
index d3c26b49de884..81f6c0c579017 100644
--- a/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js
+++ b/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js
@@ -112,4 +112,4 @@ assert.docEq(doc1, testColl.findOne(doc1));
assert.docEq(doc1, sessionColl.findOne(doc1));
assert.docEq(doc2, testColl.findOne(doc2));
assert.docEq(doc2, sessionColl.findOne(doc2));
-}());
\ No newline at end of file
+}());
diff --git a/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js b/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js
index 75310ac65d2ba..083a2b978cd2f 100644
--- a/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js
+++ b/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js
@@ -6,6 +6,7 @@
// TODO(SERVER-39704): Remove the following load after SERVER-39704 is completed
// For withTxnAndAutoRetryOnMongos.
load('jstests/libs/auto_retry_transaction_in_sharding.js');
+load("jstests/libs/fixture_helpers.js");
const dbName = 'noop_createIndexes_not_blocked';
const collName = 'test';
@@ -16,8 +17,7 @@ testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
const session = db.getMongo().startSession({causalConsistency: false});
const sessionDB = session.getDatabase(dbName);
-const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid";
-if (isMongos) {
+if (FixtureHelpers.isMongos(db)) {
// Access the collection before creating indexes so it can be implicitly sharded.
assert.eq(sessionDB[collName].find().itcount(), 0);
}
diff --git a/jstests/core/txns/prepare_conflict.js b/jstests/core/txns/prepare_conflict.js
index e338461dc14ce..4aec0afab33f4 100644
--- a/jstests/core/txns/prepare_conflict.js
+++ b/jstests/core/txns/prepare_conflict.js
@@ -7,9 +7,6 @@
* uses_prepare_transaction,
* uses_transactions,
* uses_parallel_shell,
- * # TODO SERVER-70847: Snapshot reads do not succeed on non-conflicting documents while txn is
- * # in prepare.
- * cqf_incompatible,
* requires_profiling,
* ]
*/
diff --git a/jstests/core/txns/prepare_conflict_aggregation_behavior.js b/jstests/core/txns/prepare_conflict_aggregation_behavior.js
index 17c5a79811a41..f1e159df32929 100644
--- a/jstests/core/txns/prepare_conflict_aggregation_behavior.js
+++ b/jstests/core/txns/prepare_conflict_aggregation_behavior.js
@@ -5,7 +5,11 @@
*
* The test runs commands that are not allowed with security token: endSession, prepareTransaction.
* @tags: [
- * not_allowed_with_security_token,uses_transactions, uses_prepare_transaction]
+ * not_allowed_with_security_token,
+ * references_foreign_collection,
+ * uses_transactions,
+ * uses_prepare_transaction,
+ * ]
*/
(function() {
"use strict";
diff --git a/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js b/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js
index e1d3b74595689..a9be6e6c7b2df 100644
--- a/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js
+++ b/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js
@@ -6,9 +6,9 @@
"use strict";
load("jstests/core/txns/libs/prepare_helpers.js");
-const dbName = "prepared_transactions_do_not_block_non_conflicting_ddl";
+const dbName = "not_block_non_conflicting_ddl";
const collName = "transactions_collection";
-const otherDBName = "prepared_transactions_do_not_block_non_conflicting_ddl_other";
+const otherDBName = "not_block_non_conflicting_ddl_other";
const otherCollName = "transactions_collection_other";
const testDB = db.getSiblingDB(dbName);
const otherDB = db.getSiblingDB(otherDBName);
diff --git a/jstests/core/txns/read_concern.js b/jstests/core/txns/read_concern.js
index fac40d9bcb540..460f705c7675d 100644
--- a/jstests/core/txns/read_concern.js
+++ b/jstests/core/txns/read_concern.js
@@ -113,4 +113,4 @@ assert.commandFailedWithCode(
assert.commandWorked(session.commitTransaction_forTesting());
session.endSession();
-}());
\ No newline at end of file
+}());
diff --git a/jstests/core/txns/statement_ids_accepted.js b/jstests/core/txns/statement_ids_accepted.js
index b5a56b31870a5..14e64897cf2f8 100644
--- a/jstests/core/txns/statement_ids_accepted.js
+++ b/jstests/core/txns/statement_ids_accepted.js
@@ -8,6 +8,7 @@
// # Tenant migrations don't support applyOps.
// tenant_migration_incompatible
// ]
+load("jstests/libs/fixture_helpers.js");
(function() {
"use strict";
@@ -172,8 +173,7 @@ assert.commandWorked(sessionDb.adminCommand({
autocommit: false
}));
-const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid";
-if (!isMongos) {
+if (!FixtureHelpers.isMongos(db)) {
// Skip commands that do not exist on mongos.
jsTestLog("Check that prepareTransaction accepts a statement ID");
diff --git a/jstests/core/txns/timeseries_insert_in_txn.js b/jstests/core/txns/timeseries_insert_in_txn.js
index 299a94dccc1dd..62ab51c3c38c8 100644
--- a/jstests/core/txns/timeseries_insert_in_txn.js
+++ b/jstests/core/txns/timeseries_insert_in_txn.js
@@ -5,10 +5,7 @@
* uses_transactions,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
TimeseriesTest.run((insert) => {
// Use a custom database, to avoid conflict with other tests that use the system.js collection.
@@ -28,5 +25,4 @@ TimeseriesTest.run((insert) => {
ErrorCodes.OperationNotSupportedInTransaction);
assert.commandFailedWithCode(session.abortTransaction_forTesting(),
ErrorCodes.NoSuchTransaction);
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js b/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js
index 481a1693b8bf6..7e8832fec516a 100644
--- a/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js
+++ b/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js
@@ -12,7 +12,7 @@
* command_not_supported_in_serverless,
* # TODO SERVER-70847: Snapshot reads do not succeed on non-conflicting documents while txn is
* # in prepare.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
(function() {
diff --git a/jstests/core/txns/txn_ops_allowed_on_buckets_coll.js b/jstests/core/txns/txn_ops_allowed_on_buckets_coll.js
index 4af433edefca6..fc8406f969202 100644
--- a/jstests/core/txns/txn_ops_allowed_on_buckets_coll.js
+++ b/jstests/core/txns/txn_ops_allowed_on_buckets_coll.js
@@ -6,10 +6,7 @@
* uses_snapshot_read_concern
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
const session = db.getMongo().startSession();
@@ -58,5 +55,4 @@ assert.eq(systemColl.find().itcount(), 1);
jsTestLog("Testing aggregate.");
assert.eq(systemColl.aggregate([{$match: {}}]).itcount(), 1);
-assert.commandWorked(session.commitTransaction_forTesting());
-}());
+assert.commandWorked(session.commitTransaction_forTesting());
\ No newline at end of file
diff --git a/jstests/core/type_bracket.js b/jstests/core/type_bracket.js
index 2381f6e149619..a51f5f2dfad46 100644
--- a/jstests/core/type_bracket.js
+++ b/jstests/core/type_bracket.js
@@ -2,7 +2,6 @@
"use strict";
load('jstests/aggregation/extras/utils.js'); // For assertArrayEq
-load('jstests/libs/optimizer_utils.js'); // For checkCascadesOptimizerEnabled
const t = db.type_bracket;
t.drop();
@@ -115,27 +114,45 @@ let tests = [
{filter: {a: {$gte: new Code("")}}, expected: [docs[28]]},
{filter: {a: {$lte: new Code("")}}, expected: []},
+ // MinKey/MaxKey
+ {filter: {a: {$lte: MinKey()}}, expected: [docs[0]]},
+ {filter: {a: {$lt: MinKey()}}, expected: []},
+ {filter: {a: {$gte: MaxKey()}}, expected: [docs[31]]},
+ {filter: {a: {$gt: MaxKey()}}, expected: []}
];
-// Include Min/MaxKey type bracketing tests conditional on using CQF.
-// TODO SERVER-68274: Always include these testcases once SBE correctly handles the semantics of
+// Currently, depending on which query engine is used, documents which are missing 'a' may or may
+// not be returned when comparing 'a' against MinKey/MaxKey. For example, for query
+// {a: {$gte: MinKey()}}, classic and CQF correctly return documents missing 'a', but SBE does not.
+// TODO SERVER-68274: Restrict these testcases once SBE correctly handles the semantics of
// missing fields and type bracketing (missing field is implicitly null which is greater than
// MinKey).
-if (checkCascadesOptimizerEnabled(db)) {
- tests.push(
- // MinKey
- {filter: {a: {$gte: MinKey()}}, expected: docs},
- {filter: {a: {$gt: MinKey()}}, expected: docs.slice(1)},
- {filter: {a: {$lte: MinKey()}}, expected: [docs[0]]},
- {filter: {a: {$lt: MinKey()}}, expected: []},
- // MaxKey
- {filter: {a: {$lte: MaxKey()}}, expected: docs},
- {filter: {a: {$lt: MaxKey()}}, expected: docs.slice(0, 31)},
- {filter: {a: {$gte: MaxKey()}}, expected: [docs[31]]},
- {filter: {a: {$gt: MaxKey()}}, expected: []});
-}
+let docsWithA = docs.slice();
+docsWithA.splice(29, 1);
+
+tests.push(
+ // MinKey
+ {filter: {a: {$gte: MinKey()}}, expectedList: [docs, docsWithA]},
+ {filter: {a: {$gt: MinKey()}}, expectedList: [docs.slice(1), docsWithA.slice(1)]},
+
+ // MaxKey
+ {filter: {a: {$lte: MaxKey()}}, expectedList: [docs, docsWithA]},
+ {filter: {a: {$lt: MaxKey()}}, expectedList: [docs.slice(0, 31), docsWithA.slice(0, 30)]});
for (const testData of tests) {
- runTest(testData.filter, testData.expected);
+ if (testData.hasOwnProperty("expected")) {
+ runTest(testData.filter, testData.expected);
+ } else {
+ const result = t.aggregate({$match: testData.filter}).toArray();
+ let foundMatch = false;
+ for (let i = 0; i < testData.expectedList.length; i++) {
+ const expected = testData.expectedList[i];
+ foundMatch |= arrayEq(result, expected);
+ }
+ assert(foundMatch,
+ `Actual query result did not match any of the expected options. filter=${
+ tojson(testData.filter)}, actual=${tojson(result)}, expectedList=${
+ tojson(testData.expectedList)}`);
+ }
}
}());
diff --git a/jstests/core/views/invalid_system_views.js b/jstests/core/views/invalid_system_views.js
index 8c90068c81e13..3fee8f63e0332 100644
--- a/jstests/core/views/invalid_system_views.js
+++ b/jstests/core/views/invalid_system_views.js
@@ -16,13 +16,15 @@
* # The drop of offending views may not happen on the donor after a committed migration.
* tenant_migration_incompatible,
* uses_compact,
+ * references_foreign_collection,
* ]
*/
+load("jstests/libs/fixture_helpers.js");
(function() {
"use strict";
-const isMongos = db.runCommand({isdbgrid: 1}).isdbgrid;
-const isStandalone = !isMongos && !db.runCommand({hello: 1}).hasOwnProperty("setName");
+const runningOnMongos = FixtureHelpers.isMongos(db);
+const isStandalone = !runningOnMongos && !db.runCommand({hello: 1}).hasOwnProperty("setName");
function runTest(badViewDefinition) {
let viewsDB = db.getSiblingDB("invalid_system_views");
@@ -52,7 +54,7 @@ function runTest(badViewDefinition) {
" in system.views";
}
- if (!isMongos) {
+ if (!runningOnMongos) {
// Commands that run on existing regular collections should not be impacted by the
// presence of invalid views. However, applyOps doesn't work on mongos.
assert.commandWorked(
@@ -107,7 +109,7 @@ function runTest(badViewDefinition) {
}
const storageEngine = jsTest.options().storageEngine;
- if (isMongos || storageEngine === "inMemory") {
+ if (runningOnMongos || storageEngine === "inMemory") {
print("Not testing compact command on mongos or ephemeral storage engine");
} else {
assert.commandWorked(viewsDB.runCommand({compact: "collection", force: true}),
diff --git a/jstests/core/views/views_aggregation.js b/jstests/core/views/views_aggregation.js
index 50b2edfd4a72d..235fbe40cb939 100644
--- a/jstests/core/views/views_aggregation.js
+++ b/jstests/core/views/views_aggregation.js
@@ -9,6 +9,7 @@
* requires_non_retryable_commands,
* # Explain of a resolved view must be executed by mongos.
* directly_against_shardsvrs_incompatible,
+ * references_foreign_collection,
* ]
*/
(function() {
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 5cb25656424fc..579ee1a1aadda 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -76,6 +76,7 @@
"use strict";
load('jstests/sharding/libs/last_lts_mongod_commands.js');
+load('jstests/sharding/libs/last_lts_mongos_commands.js');
// Pre-written reasons for skipping a test.
const isAnInternalCommand = "internal command";
@@ -116,20 +117,21 @@ let viewsCommandTests = {
_configsvrMoveRange: {skip: isAnInternalCommand},
_configsvrRefineCollectionShardKey: {skip: isAnInternalCommand},
_configsvrRenameCollection: {skip: isAnInternalCommand},
- _configsvrRenameCollectionMetadata: {skip: isAnInternalCommand},
_configsvrRemoveChunks: {skip: isAnInternalCommand},
_configsvrRemoveShard: {skip: isAnInternalCommand},
_configsvrRemoveShardFromZone: {skip: isAnInternalCommand},
_configsvrRemoveTags: {skip: isAnInternalCommand},
_configsvrRepairShardedCollectionChunksHistory: {skip: isAnInternalCommand},
+ _configsvrResetPlacementHistory: {skip: isAnInternalCommand},
_configsvrReshardCollection: {skip: isAnInternalCommand},
_configsvrRunRestore: {skip: isAnInternalCommand},
_configsvrSetAllowMigrations: {skip: isAnInternalCommand},
_configsvrSetClusterParameter: {skip: isAnInternalCommand},
_configsvrSetUserWriteBlockMode: {skip: isAnInternalCommand},
- _configsvrTransitionToCatalogShard: {skip: isAnInternalCommand},
+ _configsvrTransitionFromDedicatedConfigServer: {skip: isAnInternalCommand},
_configsvrTransitionToDedicatedConfigServer: {skip: isAnInternalCommand},
_configsvrUpdateZoneKeyRange: {skip: isAnInternalCommand},
+ _dropConnectionsToMongot: {skip: isAnInternalCommand},
_flushDatabaseCacheUpdates: {skip: isUnrelated},
_flushDatabaseCacheUpdatesWithWriteConcern: {skip: isUnrelated},
_flushReshardingStateChange: {skip: isUnrelated},
@@ -143,6 +145,7 @@ let viewsCommandTests = {
_killOperations: {skip: isUnrelated},
_mergeAuthzCollections: {skip: isAnInternalCommand},
_migrateClone: {skip: isAnInternalCommand},
+ _mongotConnPoolStats: {skip: isAnInternalCommand},
_movePrimary: {skip: isAnInternalCommand},
_movePrimaryRecipientAbortMigration: {skip: isAnInternalCommand},
_movePrimaryRecipientForgetMigration: {skip: isAnInternalCommand},
@@ -156,11 +159,10 @@ let viewsCommandTests = {
_shardsvrAbortReshardCollection: {skip: isAnInternalCommand},
_shardsvrCheckMetadataConsistency: {skip: isAnInternalCommand},
_shardsvrCheckMetadataConsistencyParticipant: {skip: isAnInternalCommand},
+ _shardsvrCleanupStructuredEncryptionData: {skip: isAnInternalCommand},
_shardsvrCloneCatalogData: {skip: isAnInternalCommand},
_shardsvrCompactStructuredEncryptionData: {skip: isAnInternalCommand},
_shardsvrDropCollection: {skip: isAnInternalCommand},
- // TODO SERVER-74324: deprecate _shardsvrDropCollectionIfUUIDNotMatching after 7.0 is lastLTS.
- _shardsvrDropCollectionIfUUIDNotMatching: {skip: isUnrelated},
_shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern: {skip: isUnrelated},
_shardsvrDropCollectionParticipant: {skip: isAnInternalCommand},
_shardsvrDropIndexCatalogEntryParticipant: {skip: isAnInternalCommand},
@@ -207,7 +209,14 @@ let viewsCommandTests = {
_shardsvrCollModParticipant: {skip: isAnInternalCommand},
_shardsvrParticipantBlock: {skip: isAnInternalCommand},
_shardsvrUnregisterIndex: {skip: isAnInternalCommand},
- _startStreamProcessor: {skip: isAnInternalCommand},
+ streams_startStreamProcessor: {skip: isAnInternalCommand},
+ streams_startStreamSample: {skip: isAnInternalCommand},
+ streams_stopStreamProcessor: {skip: isAnInternalCommand},
+ streams_listStreamProcessors: {skip: isAnInternalCommand},
+ streams_getMoreStreamSample: {skip: isAnInternalCommand},
+ streams_getStats: {skip: isAnInternalCommand},
+ streams_testOnlyInsert: {skip: isAnInternalCommand},
+ streams_getMetrics: {skip: isAnInternalCommand},
_transferMods: {skip: isAnInternalCommand},
_vectorClockPersist: {skip: isAnInternalCommand},
abortReshardCollection: {skip: isUnrelated},
@@ -256,12 +265,16 @@ let viewsCommandTests = {
command: {captrunc: "view", n: 2, inc: false},
expectFailure: true,
},
- checkMetadataConsistency: {skip: isUnrelated},
+ checkMetadataConsistency: {
+ command: {checkMetadataConsistency: "view"},
+ expectFailure: false,
+ },
checkShardingIndex: {skip: isUnrelated},
cleanupOrphaned: {
skip: "Tested in views/views_sharded.js",
},
cleanupReshardCollection: {skip: isUnrelated},
+ cleanupStructuredEncryptionData: {skip: isUnrelated},
clearJumboFlag: {
command: {clearJumboFlag: "test.view"},
skipStandalone: true,
@@ -276,6 +289,7 @@ let viewsCommandTests = {
},
clusterAbortTransaction: {skip: "already tested by 'abortTransaction' tests on mongos"},
clusterAggregate: {skip: "already tested by 'aggregate' tests on mongos"},
+ clusterBulkWrite: {skip: "already tested by 'bulkWrite' tests on mongos"},
clusterCommitTransaction: {skip: "already tested by 'commitTransaction' tests on mongos"},
clusterCount: {skip: "already tested by 'count' tests on mongos"},
clusterDelete: {skip: "already tested by 'delete' tests on mongos"},
@@ -292,7 +306,7 @@ let viewsCommandTests = {
configureFailPoint: {skip: isUnrelated},
configureCollectionBalancing: {skip: isUnrelated},
configureQueryAnalyzer: {
- command: {configureQueryAnalyzer: "test.view", mode: "full", sampleRate: 1},
+ command: {configureQueryAnalyzer: "test.view", mode: "full", samplesPerSecond: 1},
skipStandalone: true,
expectFailure: true,
isAdminCommand: true,
@@ -611,6 +625,7 @@ let viewsCommandTests = {
replSetTestEgress: {skip: isUnrelated},
replSetUpdatePosition: {skip: isUnrelated},
replSetResizeOplog: {skip: isUnrelated},
+ resetPlacementHistory: {skip: isUnrelated},
reshardCollection: {
command: {reshardCollection: "test.view", key: {_id: 1}},
setup: function(conn) {
@@ -718,7 +733,7 @@ let viewsCommandTests = {
testVersion2: {skip: isAnInternalCommand},
testVersions1And2: {skip: isAnInternalCommand},
top: {skip: "tested in views/views_stats.js"},
- transitionToCatalogShard: {skip: isUnrelated},
+ transitionFromDedicatedConfigServer: {skip: isUnrelated},
transitionToDedicatedConfigServer: {skip: isUnrelated},
update: {command: {update: "view", updates: [{q: {x: 1}, u: {x: 2}}]}, expectFailure: true},
updateRole: {
@@ -756,6 +771,10 @@ commandsRemovedFromMongodSinceLastLTS.forEach(function(cmd) {
viewsCommandTests[cmd] = {skip: "must define test coverage for backwards compatibility"};
});
+commandsRemovedFromMongosSinceLastLTS.forEach(function(cmd) {
+ viewsCommandTests[cmd] = {skip: "must define test coverage for backwards compatibility"};
+});
+
/**
* Helper function for failing commands or writes that checks the result 'res' of either.
* If 'code' is null we only check for failure, otherwise we confirm error code matches as
diff --git a/jstests/core/views/views_collation.js b/jstests/core/views/views_collation.js
index 169e930974044..c661c38b2cf4c 100644
--- a/jstests/core/views/views_collation.js
+++ b/jstests/core/views/views_collation.js
@@ -6,15 +6,13 @@
// requires_non_retryable_commands,
// # Explain of a resolved view must be executed by mongos.
// directly_against_shardsvrs_incompatible,
+// references_foreign_collection,
// ]
/**
* Tests the behavior of operations when interacting with a view's default collation.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
let viewsDB = db.getSiblingDB("views_collation");
assert.commandWorked(viewsDB.dropDatabase());
@@ -518,5 +516,4 @@ assert.commandWorked(findRes);
assert.eq(3, findRes.cursor.firstBatch.length);
explain = viewsDB.runCommand({explain: {find: "case_insensitive_view", filter: {f: "case"}}});
assert.neq(null, explain.queryPlanner, tojson(explain));
-assert.eq(1, explain.queryPlanner.collation.strength, tojson(explain));
-}());
+assert.eq(1, explain.queryPlanner.collation.strength, tojson(explain));
\ No newline at end of file
diff --git a/jstests/core/views/views_count.js b/jstests/core/views/views_count.js
index 92b05ef43ea50..b9d465a3638d1 100644
--- a/jstests/core/views/views_count.js
+++ b/jstests/core/views/views_count.js
@@ -8,10 +8,7 @@
// requires_fcv_63,
// ]
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const sbeEnabled = checkSBEEnabled(db);
@@ -94,5 +91,4 @@ assert.commandWorked(viewsDB.runCommand({count: "identityView", hint: "_id_"}));
assert.commandFailedWithCode(
viewsDB.runCommand({count: "identityView", collation: {locale: "en_US"}}),
- ErrorCodes.OptionNotSupportedOnView);
-}());
+ ErrorCodes.OptionNotSupportedOnView);
\ No newline at end of file
diff --git a/jstests/core/views/views_creation.js b/jstests/core/views/views_creation.js
index fa6c8eb75941d..b06ff90bbb502 100644
--- a/jstests/core/views/views_creation.js
+++ b/jstests/core/views/views_creation.js
@@ -5,6 +5,7 @@
* assumes_superuser_permissions,
* # TODO SERVER-73967: Remove this tag.
* does_not_support_stepdowns,
+ * references_foreign_collection,
* ]
*/
(function() {
@@ -111,12 +112,6 @@ assert.commandFailedWithCode(viewsDB.runCommand({
}),
40600);
-// The remainder of this test will not work on server versions < 7.0 as the 'create' command
-// is not idempotent there. TODO SERVER-74062: remove this.
-if (db.version().split('.')[0] < 7) {
- return;
-}
-
// Test that creating a view which already exists with identical options reports success.
let repeatedCmd = {
create: "existingViewTest",
@@ -158,4 +153,8 @@ assert.commandFailedWithCode(viewsDB.runCommand({
// Test that creating a view when there is already a collection with the same name fails.
assert.commandFailedWithCode(viewsDB.runCommand({create: "collection", viewOn: "collection"}),
ErrorCodes.NamespaceExists);
+
+// Ensure we accept a view with a name of greater than 64 characters (the maximum dbname length).
+assert.commandWorked(viewsDB.createView(
+ "longNamedView", "Queries_IdentityView_UnindexedLargeInMatching0_BackingCollection", []));
}());
diff --git a/jstests/core/views/views_distinct.js b/jstests/core/views/views_distinct.js
index b5d019485cc5f..a5ef205fcb086 100644
--- a/jstests/core/views/views_distinct.js
+++ b/jstests/core/views/views_distinct.js
@@ -5,15 +5,14 @@
* assumes_unsharded_collection,
* # Explain of a resolved view must be executed by mongos.
* directly_against_shardsvrs_incompatible,
+ * requires_fcv_71,
* ]
*/
-(function() {
-"use strict";
-
// For arrayEq. We don't use array.eq as it does an ordered comparison on arrays but we don't
// care about order in the distinct response.
load("jstests/aggregation/extras/utils.js");
+import {getWinningPlan, getPlanStage} from "jstests/libs/analyze_plan.js";
var viewsDB = db.getSiblingDB("views_distinct");
assert.commandWorked(viewsDB.dropDatabase());
@@ -93,6 +92,27 @@ assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats"));
assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2);
assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution"));
+// Distinct with hints work on views.
+assert.commandWorked(viewsDB.coll.createIndex({state: 1}));
+
+explainPlan = largePopView.explain().distinct("pop", {}, {hint: {state: 1}});
+assert(getPlanStage(explainPlan.stages[0].$cursor, "FETCH"));
+assert(getPlanStage(explainPlan.stages[0].$cursor, "IXSCAN"));
+
+explainPlan = largePopView.explain().distinct("pop");
+assert.neq(getWinningPlan(explainPlan.stages[0].$cursor.queryPlanner).stage,
+ "IXSCAN",
+ tojson(explainPlan));
+
+// Make sure that the hint produces the right results.
+assert(arrayEq([10, 7], largePopView.distinct("pop", {}, {hint: {state: 1}})));
+
+explainPlan =
+ largePopView.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: {bad: 1, hint: 1}});
+assert.commandFailedWithCode(explainPlan, ErrorCodes.BadValue, tojson(explainPlan));
+var regex = new RegExp("hint provided does not correspond to an existing index");
+assert(regex.test(explainPlan.errmsg));
+
// Distinct commands fail when they try to change the collation of a view.
assert.commandFailedWithCode(
viewsDB.runCommand({distinct: "identityView", key: "state", collation: {locale: "en_US"}}),
@@ -145,5 +165,4 @@ assert.commandWorked(coll.insert({a: "not leaf"}));
assertIdentityViewDistinctMatchesCollection("a");
assertIdentityViewDistinctMatchesCollection("a.b");
assertIdentityViewDistinctMatchesCollection("a.b.c");
-assertIdentityViewDistinctMatchesCollection("a.b.c.d");
-}());
+assertIdentityViewDistinctMatchesCollection("a.b.c.d");
\ No newline at end of file
diff --git a/jstests/core/views/views_stats.js b/jstests/core/views/views_stats.js
index 3bda9eec2502a..623ef6b4dfd6e 100644
--- a/jstests/core/views/views_stats.js
+++ b/jstests/core/views/views_stats.js
@@ -17,6 +17,7 @@
(function() {
"use strict";
load("jstests/libs/stats.js");
+load("jstests/libs/fixture_helpers.js");
let viewsDB = db.getSiblingDB("views_stats");
assert.commandWorked(viewsDB.dropDatabase());
@@ -42,9 +43,7 @@ lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0);
assert.writeError(view.update({}, {}));
lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0);
-let helloResponse = assert.commandWorked(viewsDB.runCommand("hello"));
-const isMongos = (helloResponse.msg === "isdbgrid");
-if (isMongos) {
+if (FixtureHelpers.isMongos(viewsDB)) {
jsTest.log("Tests are being run on a mongos; skipping top tests.");
return;
}
diff --git a/jstests/core/views/views_validation.js b/jstests/core/views/views_validation.js
index 02c060fd50a82..4dea7c9491bed 100644
--- a/jstests/core/views/views_validation.js
+++ b/jstests/core/views/views_validation.js
@@ -2,6 +2,7 @@
// # Running getCollection on views in sharded suites tries to shard views, which fails.
// assumes_unsharded_collection,
// requires_non_retryable_commands,
+// references_foreign_collection,
// ]
(function() {
diff --git a/jstests/core/write/autoid.js b/jstests/core/write/autoid.js
index 679b109fcc513..1797e3d7bafa9 100644
--- a/jstests/core/write/autoid.js
+++ b/jstests/core/write/autoid.js
@@ -3,14 +3,14 @@
// key.
// @tags: [assumes_unsharded_collection]
-f = db.jstests_autoid;
+let f = db.jstests_autoid;
f.drop();
f.save({z: 1});
-a = f.findOne({z: 1});
+let a = f.findOne({z: 1});
f.update({z: 1}, {z: 2});
-b = f.findOne({z: 2});
+let b = f.findOne({z: 2});
assert.eq(a._id.str, b._id.str);
-c = f.update({z: 2}, {z: "abcdefgabcdefgabcdefg"});
+let c = f.update({z: 2}, {z: "abcdefgabcdefgabcdefg"});
c = f.findOne({});
assert.eq(a._id.str, c._id.str);
diff --git a/jstests/core/write/batch_write_command_w0.js b/jstests/core/write/batch_write_command_w0.js
index 7d5038120b60b..4b6bcb021684b 100644
--- a/jstests/core/write/batch_write_command_w0.js
+++ b/jstests/core/write/batch_write_command_w0.js
@@ -33,12 +33,8 @@ coll.drop();
//
// Single document insert, w:0 write concern specified, missing ordered
coll.drop();
-request = {
- insert: coll.getName(),
- documents: [{a: 1}],
- writeConcern: {w: 0}
-};
-result = coll.runCommand(request);
+let request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 0}};
+let result = coll.runCommand(request);
assert.eq({ok: 1}, result);
countEventually(coll, 1);
diff --git a/jstests/core/write/bulk/bulk_write.js b/jstests/core/write/bulk/bulk_write.js
index a48bd6d11213b..f29e4077eb211 100644
--- a/jstests/core/write/bulk/bulk_write.js
+++ b/jstests/core/write/bulk/bulk_write.js
@@ -1,26 +1,17 @@
/**
- * Tests bulk write command for valid / invalid input.
+ * Tests bulk write command for valid input.
*
* The test runs commands that are not allowed with security token: bulkWrite.
* @tags: [
* assumes_against_mongod_not_mongos,
* not_allowed_with_security_token,
- * # TODO SERVER-72988: Until bulkWrite is compatible with retryable writes.
- * requires_non_retryable_writes,
- * # Command is not yet compatible with tenant migration.
- * tenant_migration_incompatible,
+ * command_not_supported_in_serverless,
+ * # TODO SERVER-52419 Remove this tag.
+ * featureFlagBulkWriteCommand,
* ]
*/
(function() {
"use strict";
-load("jstests/libs/feature_flag_util.js");
-
-// Skip this test if the BulkWriteCommand feature flag is not enabled
-// TODO SERVER-67711: Remove feature flag check.
-if (!FeatureFlagUtil.isPresentAndEnabled(db, "BulkWriteCommand")) {
- jsTestLog('Skipping test because the BulkWriteCommand feature flag is disabled.');
- return;
-}
var coll = db.getCollection("coll");
var coll1 = db.getCollection("coll1");
@@ -35,52 +26,32 @@ assert.eq(coll.find().itcount(), 1);
assert.eq(coll1.find().itcount(), 0);
coll.drop();
-// Make sure non-adminDB request fails
-assert.commandFailedWithCode(db.runCommand({
- bulkWrite: 1,
- ops: [{insert: 0, document: {skey: "MongoDB"}}],
- nsInfo: [{ns: "test.coll"}]
-}),
- [ErrorCodes.Unauthorized]);
-
-assert.eq(coll.find().itcount(), 0);
-assert.eq(coll1.find().itcount(), 0);
-
// Make sure optional fields are accepted
-assert.commandWorked(db.adminCommand({
+var res = db.adminCommand({
bulkWrite: 1,
ops: [{insert: 0, document: {skey: "MongoDB"}}],
nsInfo: [{ns: "test.coll"}],
cursor: {batchSize: 1024},
bypassDocumentValidation: true,
ordered: false
-}));
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
assert.eq(coll.find().itcount(), 1);
assert.eq(coll1.find().itcount(), 0);
coll.drop();
-// Make sure invalid fields are not accepted
-assert.commandFailedWithCode(db.adminCommand({
- bulkWrite: 1,
- ops: [{insert: 0, document: {skey: "MongoDB"}}],
- nsInfo: [{ns: "test.coll"}],
- cursor: {batchSize: 1024},
- bypassDocumentValidation: true,
- ordered: false,
- fooField: 0
-}),
- [40415]);
-
-assert.eq(coll.find().itcount(), 0);
-assert.eq(coll1.find().itcount(), 0);
-
// Make sure ops and nsInfo can take arrays properly
-assert.commandWorked(db.adminCommand({
+res = db.adminCommand({
bulkWrite: 1,
ops: [{insert: 1, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}],
nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}]
-}));
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
assert.eq(coll.find().itcount(), 1);
assert.eq(coll1.find().itcount(), 1);
@@ -88,144 +59,32 @@ coll.drop();
coll1.drop();
// Test 2 inserts into the same namespace
-assert.commandWorked(db.adminCommand({
+res = db.adminCommand({
bulkWrite: 1,
ops: [{insert: 0, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}],
nsInfo: [{ns: "test.coll"}]
-}));
-
-assert.eq(coll.find().itcount(), 2);
-assert.eq(coll1.find().itcount(), 0);
-coll.drop();
-
-// Make sure we fail if index out of range of nsInfo
-assert.commandFailedWithCode(db.adminCommand({
- bulkWrite: 1,
- ops: [{insert: 2, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}],
- nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}]
-}),
- [ErrorCodes.BadValue]);
-
-assert.eq(coll.find().itcount(), 0);
-assert.eq(coll1.find().itcount(), 0);
-
-// Missing ops
-assert.commandFailedWithCode(db.adminCommand({bulkWrite: 1, nsInfo: [{ns: "mydb.coll"}]}), [40414]);
-
-assert.eq(coll.find().itcount(), 0);
-assert.eq(coll1.find().itcount(), 0);
-
-// Missing nsInfo
-assert.commandFailedWithCode(
- db.adminCommand({bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}]}), [40414]);
-
-assert.eq(coll.find().itcount(), 0);
-assert.eq(coll1.find().itcount(), 0);
-
-// Test valid arguments with invalid values
-assert.commandFailedWithCode(db.adminCommand({
- bulkWrite: 1,
- ops: [{insert: "test", document: {skey: "MongoDB"}}],
- nsInfo: [{ns: "test.coll"}]
-}),
- [ErrorCodes.TypeMismatch]);
-
-assert.eq(coll.find().itcount(), 0);
-assert.eq(coll1.find().itcount(), 0);
+});
-assert.commandFailedWithCode(
- db.adminCommand(
- {bulkWrite: 1, ops: [{insert: 0, document: "test"}], nsInfo: [{ns: "test.coll"}]}),
- [ErrorCodes.TypeMismatch]);
-
-assert.eq(coll.find().itcount(), 0);
-assert.eq(coll1.find().itcount(), 0);
-
-assert.commandFailedWithCode(
- db.adminCommand(
- {bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}], nsInfo: ["test"]}),
- [ErrorCodes.TypeMismatch]);
-
-assert.eq(coll.find().itcount(), 0);
-assert.eq(coll1.find().itcount(), 0);
-
-assert.commandFailedWithCode(
- db.adminCommand({bulkWrite: 1, ops: "test", nsInfo: [{ns: "test.coll"}]}),
- [ErrorCodes.TypeMismatch]);
-
-assert.eq(coll.find().itcount(), 0);
-assert.eq(coll1.find().itcount(), 0);
-
-assert.commandFailedWithCode(
- db.adminCommand(
- {bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}], nsInfo: "test"}),
- [ErrorCodes.TypeMismatch]);
-
-assert.eq(coll.find().itcount(), 0);
-assert.eq(coll1.find().itcount(), 0);
-
-// Test 2 inserts into the same namespace
-assert.commandWorked(db.adminCommand({
- bulkWrite: 1,
- ops: [{insert: 0, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}],
- nsInfo: [{ns: "test.coll"}]
-}));
+assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
assert.eq(coll.find().itcount(), 2);
assert.eq(coll1.find().itcount(), 0);
coll.drop();
-// Test that a write can fail part way through a write and the write partially executes.
-assert.commandWorked(db.adminCommand({
- bulkWrite: 1,
- ops: [
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {insert: 1, document: {skey: "MongoDB"}}
- ],
- nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}]
-}));
-
-assert.eq(coll.find().itcount(), 1);
-assert.eq(coll1.find().itcount(), 0);
-coll.drop();
-coll1.drop();
-
-assert.commandWorked(db.adminCommand({
- bulkWrite: 1,
- ops: [
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {insert: 1, document: {skey: "MongoDB"}}
- ],
- nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}],
- ordered: false
-}));
-
-assert.eq(coll.find().itcount(), 1);
-assert.eq(coll1.find().itcount(), 1);
-coll.drop();
-coll1.drop();
-
// Test BypassDocumentValidator
assert.commandWorked(coll.insert({_id: 1}));
assert.commandWorked(db.runCommand({collMod: "coll", validator: {a: {$exists: true}}}));
-assert.commandWorked(db.adminCommand({
- bulkWrite: 1,
- ops: [{insert: 0, document: {_id: 3, skey: "MongoDB"}}],
- nsInfo: [{ns: "test.coll"}],
- bypassDocumentValidation: false,
-}));
-
-assert.eq(0, coll.count({_id: 3}));
-
-assert.commandWorked(db.adminCommand({
+res = db.adminCommand({
bulkWrite: 1,
ops: [{insert: 0, document: {_id: 3, skey: "MongoDB"}}],
nsInfo: [{ns: "test.coll"}],
bypassDocumentValidation: true,
-}));
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
assert.eq(1, coll.count({_id: 3}));
diff --git a/jstests/core/write/bulk/bulk_write_delete_cursor.js b/jstests/core/write/bulk/bulk_write_delete_cursor.js
index 00e829d5a79e5..109e7bb7705de 100644
--- a/jstests/core/write/bulk/bulk_write_delete_cursor.js
+++ b/jstests/core/write/bulk/bulk_write_delete_cursor.js
@@ -5,35 +5,21 @@
* @tags: [
* assumes_against_mongod_not_mongos,
* not_allowed_with_security_token,
- * # TODO SERVER-72988: Until bulkWrite is compatible with retryable writes.
- * requires_non_retryable_writes,
- * # Command is not yet compatible with tenant migration.
- * tenant_migration_incompatible,
+ * command_not_supported_in_serverless,
+ * # TODO SERVER-52419 Remove this tag.
+ * featureFlagBulkWriteCommand,
* ]
*/
+load("jstests/libs/bulk_write_utils.js"); // For cursorEntryValidator.
+
(function() {
"use strict";
-load("jstests/libs/feature_flag_util.js");
-
-// Skip this test if the BulkWriteCommand feature flag is not enabled.
-// TODO SERVER-67711: Remove feature flag check.
-if (!FeatureFlagUtil.isPresentAndEnabled(db, "BulkWriteCommand")) {
- jsTestLog('Skipping test because the BulkWriteCommand feature flag is disabled.');
- return;
-}
var coll = db.getCollection("coll");
var coll1 = db.getCollection("coll1");
coll.drop();
coll1.drop();
-const cursorEntryValidator = function(entry, expectedEntry) {
- assert(entry.ok == expectedEntry.ok);
- assert(entry.idx == expectedEntry.idx);
- assert(entry.n == expectedEntry.n);
- assert(entry.code == expectedEntry.code);
-};
-
// Test generic delete with no return.
var res = db.adminCommand({
bulkWrite: 1,
@@ -45,6 +31,7 @@ var res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
@@ -66,6 +53,7 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
@@ -88,6 +76,7 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
@@ -110,6 +99,7 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
@@ -120,27 +110,6 @@ assert.sameMembers(coll.find().toArray(), [{_id: 1, skey: "MongoDB"}]);
coll.drop();
-// Test deletes multiple when multi is true.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {insert: 0, document: {_id: 0, skey: "MongoDB"}},
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {delete: 0, filter: {skey: "MongoDB"}, multi: true},
- ],
- nsInfo: [{ns: "test.coll"}]
-});
-
-assert.commandWorked(res);
-
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 2});
-assert(!res.cursor.firstBatch[3]);
-assert(!coll.findOne());
-
-coll.drop();
-
// Test Insert outside of bulkWrite + delete in bulkWrite.
coll.insert({_id: 1, skey: "MongoDB"});
@@ -153,6 +122,7 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
assert.docEq(res.cursor.firstBatch[0].value, {_id: 1, skey: "MongoDB"});
@@ -174,6 +144,7 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 0});
assert(!res.cursor.firstBatch[0].value);
@@ -184,59 +155,6 @@ assert.eq("MongoDB", coll1.findOne().skey);
coll.drop();
coll1.drop();
-// Make sure multi:true + return fails the op.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {delete: 0, filter: {_id: 1}, multi: true, return: true},
- ],
- nsInfo: [{ns: "test.coll"}]
-});
-
-assert.commandWorked(res);
-
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.InvalidOptions});
-assert(!res.cursor.firstBatch[1]);
-
-// Test returnFields with return.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {insert: 0, document: {_id: 0, skey: "MongoDB"}},
- {delete: 0, filter: {_id: 0}, returnFields: {_id: 0, skey: 1}, return: true},
- ],
- nsInfo: [{ns: "test.coll"}]
-});
-
-assert.commandWorked(res);
-
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
-assert.docEq(res.cursor.firstBatch[1].value, {skey: "MongoDB"});
-assert(!res.cursor.firstBatch[2]);
-
-assert(!coll.findOne());
-
-coll.drop();
-
-// Test providing returnFields without return option.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {insert: 0, document: {_id: 0, skey: "MongoDB"}},
- {delete: 0, filter: {_id: 0}, returnFields: {_id: 1}},
- ],
- nsInfo: [{ns: "test.coll"}]
-});
-
-assert.commandWorked(res);
-
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: ErrorCodes.InvalidOptions});
-assert(!res.cursor.firstBatch[2]);
-
-coll.drop();
-
// Test let matches specific document.
res = db.adminCommand({
bulkWrite: 1,
@@ -244,17 +162,14 @@ res = db.adminCommand({
{insert: 0, document: {_id: 0, skey: "MongoDB"}},
{insert: 0, document: {_id: 1, skey: "MongoDB2"}},
{insert: 0, document: {_id: 2, skey: "MongoDB3"}},
- {
- delete: 0,
- filter: {$expr: {$eq: ["$skey", "$$targetKey"]}},
- let : {targetKey: "MongoDB"},
- return: true
- },
+ {delete: 0, filter: {$expr: {$eq: ["$skey", "$$targetKey"]}}, return: true},
],
- nsInfo: [{ns: "test.coll"}]
+ nsInfo: [{ns: "test.coll"}],
+ let : {targetKey: "MongoDB"}
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
@@ -265,69 +180,5 @@ assert(!res.cursor.firstBatch[4]);
assert.sameMembers(coll.find().toArray(), [{_id: 1, skey: "MongoDB2"}, {_id: 2, skey: "MongoDB3"}]);
-coll.drop();
-
-// Test write fails userAllowedWriteNS.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {
- delete: 0,
- filter: {_id: 1},
- multi: true,
- },
- ],
- nsInfo: [{ns: "test.system.profile"}]
-});
-
-assert.commandWorked(res);
-
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.InvalidNamespace});
-assert(!res.cursor.firstBatch[1]);
-
-// Test delete continues on error with ordered:false.
-coll.insert({_id: 1, skey: "MongoDB"});
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {
- delete: 0,
- filter: {_id: 0},
- },
- {delete: 1, filter: {_id: 1}, return: true}
- ],
- nsInfo: [{ns: "test.system.profile"}, {ns: "test.coll"}],
- ordered: false
-});
-
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.InvalidNamespace});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
-assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB"});
-assert(!res.cursor.firstBatch[2]);
-
-assert(!coll.findOne());
-
-coll.drop();
-
-// Test delete stop on error with ordered:true.
-coll.insert({_id: 1, skey: "MongoDB"});
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {
- delete: 0,
- filter: {_id: 0},
- },
- {delete: 1, filter: {_id: 1}, return: true},
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- ],
- nsInfo: [{ns: "test.system.profile"}, {ns: "test.coll"}],
-});
-
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.InvalidNamespace});
-assert(!res.cursor.firstBatch[1]);
-
-assert.eq(coll.findOne().skey, "MongoDB");
-
coll.drop();
})();
diff --git a/jstests/core/write/bulk/bulk_write_getMore.js b/jstests/core/write/bulk/bulk_write_getMore.js
new file mode 100644
index 0000000000000..28d4343077361
--- /dev/null
+++ b/jstests/core/write/bulk/bulk_write_getMore.js
@@ -0,0 +1,62 @@
+/**
+ * Tests bulk write command in conjunction with using getMore to obtain the rest
+ * of the cursor response.
+ *
+ * These tests are incompatible with various overrides due to using getMore.
+ *
+ * The test runs commands that are not allowed with security token: bulkWrite.
+ * @tags: [
+ * assumes_against_mongod_not_mongos,
+ * not_allowed_with_security_token,
+ * command_not_supported_in_serverless,
+ * does_not_support_retryable_writes,
+ * requires_non_retryable_writes,
+ * requires_getmore,
+ * # Contains commands that fail which will fail the entire transaction
+ * does_not_support_transactions,
+ * # TODO SERVER-52419 Remove this tag.
+ * featureFlagBulkWriteCommand,
+ * ]
+ */
+load("jstests/libs/bulk_write_utils.js"); // For cursorEntryValidator.
+
+(function() {
+"use strict";
+
+var coll = db.getCollection("coll");
+var coll1 = db.getCollection("coll1");
+coll.drop();
+coll1.drop();
+
+// The retryable write override does not append txnNumber to getMore since it is not a retryable
+// command.
+
+// Test getMore by setting batch size to 1 and running 2 inserts.
+// Should end up with 1 insert return per batch.
+var res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [{insert: 1, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}],
+ nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}],
+ cursor: {batchSize: 1},
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
+
+assert(res.cursor.id != 0);
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
+assert(!res.cursor.firstBatch[1]);
+
+// First batch only had 1 of 2 responses so run a getMore to get the next batch.
+var getMoreRes =
+ assert.commandWorked(db.adminCommand({getMore: res.cursor.id, collection: "$cmd.bulkWrite"}));
+
+assert(getMoreRes.cursor.id == 0);
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
+assert(!getMoreRes.cursor.nextBatch[1]);
+
+assert.eq(coll.find().itcount(), 1);
+assert.eq(coll1.find().itcount(), 1);
+coll.drop();
+coll1.drop();
+})();
diff --git a/jstests/core/write/bulk/bulk_write_insert_cursor.js b/jstests/core/write/bulk/bulk_write_insert_cursor.js
index fcce9584d2ecf..674346a2038b6 100644
--- a/jstests/core/write/bulk/bulk_write_insert_cursor.js
+++ b/jstests/core/write/bulk/bulk_write_insert_cursor.js
@@ -5,40 +5,27 @@
* @tags: [
* assumes_against_mongod_not_mongos,
* not_allowed_with_security_token,
- * # TODO SERVER-72988: Until bulkWrite is compatible with retryable writes.
- * requires_non_retryable_writes,
- * # Command is not yet compatible with tenant migration.
- * tenant_migration_incompatible,
+ * command_not_supported_in_serverless,
+ * # TODO SERVER-52419 Remove this tag.
+ * featureFlagBulkWriteCommand,
* ]
*/
+load("jstests/libs/bulk_write_utils.js"); // For cursorEntryValidator.
+
(function() {
"use strict";
-load("jstests/libs/feature_flag_util.js");
-
-// Skip this test if the BulkWriteCommand feature flag is not enabled.
-// TODO SERVER-67711: Remove feature flag check.
-if (!FeatureFlagUtil.isPresentAndEnabled(db, "BulkWriteCommand")) {
- jsTestLog('Skipping test because the BulkWriteCommand feature flag is disabled.');
- return;
-}
var coll = db.getCollection("coll");
var coll1 = db.getCollection("coll1");
coll.drop();
coll1.drop();
-const cursorEntryValidator = function(entry, expectedEntry) {
- assert(entry.ok == expectedEntry.ok);
- assert(entry.idx == expectedEntry.idx);
- assert(entry.n == expectedEntry.n);
- assert(entry.code == expectedEntry.code);
-};
-
// Make sure a properly formed request has successful result.
var res = db.adminCommand(
{bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}], nsInfo: [{ns: "test.coll"}]});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
assert(res.cursor.id == 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
@@ -49,34 +36,6 @@ assert.eq(coll1.find().itcount(), 0);
coll.drop();
-// Test getMore by setting batch size to 1 and running 2 inserts.
-// Should end up with 1 insert return per batch.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [{insert: 1, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}],
- nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}],
- cursor: {batchSize: 1},
-});
-
-assert.commandWorked(res);
-
-assert(res.cursor.id != 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
-assert(!res.cursor.firstBatch[1]);
-
-// First batch only had 1 of 2 responses so run a getMore to get the next batch.
-var getMoreRes =
- assert.commandWorked(db.adminCommand({getMore: res.cursor.id, collection: "$cmd.bulkWrite"}));
-
-assert(getMoreRes.cursor.id == 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
-assert(!getMoreRes.cursor.nextBatch[1]);
-
-assert.eq(coll.find().itcount(), 1);
-assert.eq(coll1.find().itcount(), 1);
-coll.drop();
-coll1.drop();
-
// Test internal batch size > 1.
res = db.adminCommand({
bulkWrite: 1,
@@ -85,6 +44,7 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
assert(res.cursor.id == 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
@@ -93,127 +53,5 @@ assert(!res.cursor.firstBatch[2]);
assert.eq(coll.find().itcount(), 2);
assert.eq(coll1.find().itcount(), 0);
-coll.drop();
-
-// Test that a write can fail part way through a write and the write partially executes.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {insert: 1, document: {skey: "MongoDB"}}
- ],
- nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}]
-});
-
-assert.commandWorked(res);
-
-assert(res.cursor.id == 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: 11000});
-// Make sure that error extra info was correctly added
-assert.docEq(res.cursor.firstBatch[1].keyPattern, {_id: 1});
-assert.docEq(res.cursor.firstBatch[1].keyValue, {_id: 1});
-assert(!res.cursor.firstBatch[2]);
-
-assert.eq(coll.find().itcount(), 1);
-assert.eq(coll1.find().itcount(), 0);
-coll.drop();
-coll1.drop();
-
-// Test that we continue processing after an error for ordered:false.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {insert: 1, document: {skey: "MongoDB"}}
- ],
- nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}],
- ordered: false
-});
-
-assert.commandWorked(res);
-
-assert(res.cursor.id == 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: 11000});
-cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, n: 1, idx: 2});
-assert(!res.cursor.firstBatch[3]);
-
-assert.eq(coll.find().itcount(), 1);
-assert.eq(coll1.find().itcount(), 1);
-coll.drop();
-coll1.drop();
-
-// Test fixDocumentForInsert works properly by erroring out on >16MB size insert.
-var targetSize = (16 * 1024 * 1024) + 1;
-var doc = {_id: new ObjectId(), value: ''};
-
-var size = Object.bsonsize(doc);
-assert.gte(targetSize, size);
-
-// Set 'value' as a string with enough characters to make the whole document 'targetSize'
-// bytes long.
-doc.value = new Array(targetSize - size + 1).join('x');
-assert.eq(targetSize, Object.bsonsize(doc));
-
-// Testing ordered:false continues on with other ops when fixDocumentForInsert fails.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {insert: 0, document: doc},
- {insert: 0, document: {_id: 2, skey: "MongoDB2"}},
- ],
- nsInfo: [{ns: "test.coll"}],
- ordered: false
-});
-
-assert.commandWorked(res);
-
-assert(res.cursor.id == 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
-
-// In most cases we expect this to fail because it tries to insert a document that is too large.
-// In some cases we may see the javascript execution interrupted because it takes longer than
-// our default time limit, so we allow that possibility.
-try {
- cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: ErrorCodes.BadValue});
-} catch {
- cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: ErrorCodes.Interrupted});
-}
-cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, n: 1, idx: 2});
-assert(!res.cursor.firstBatch[3]);
-
-coll.drop();
-
-// Testing ordered:true short circuits.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {insert: 0, document: doc},
- {insert: 0, document: {_id: 2, skey: "MongoDB2"}},
- ],
- nsInfo: [{ns: "test.coll"}],
- ordered: true
-});
-
-assert.commandWorked(res);
-
-assert(res.cursor.id == 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
-
-// In most cases we expect this to fail because it tries to insert a document that is too large.
-// In some cases we may see the javascript execution interrupted because it takes longer than
-// our default time limit, so we allow that possibility.
-try {
- cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: ErrorCodes.BadValue});
-} catch {
- cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: ErrorCodes.Interrupted});
-}
-assert(!res.cursor.firstBatch[2]);
-
coll.drop();
})();
diff --git a/jstests/core/write/bulk/bulk_write_non_retryable_cursor.js b/jstests/core/write/bulk/bulk_write_non_retryable_cursor.js
new file mode 100644
index 0000000000000..40ec1462f8ff0
--- /dev/null
+++ b/jstests/core/write/bulk/bulk_write_non_retryable_cursor.js
@@ -0,0 +1,175 @@
+/**
+ * Tests bulk write cursor response for correct responses.
+ *
+ * This file contains tests that are not compatible with retryable writes for various reasons.
+ *
+ * The test runs commands that are not allowed with security token: bulkWrite.
+ * @tags: [
+ * assumes_against_mongod_not_mongos,
+ * does_not_support_retryable_writes,
+ * requires_non_retryable_writes,
+ * not_allowed_with_security_token,
+ * command_not_supported_in_serverless,
+ * # TODO SERVER-52419 Remove this tag.
+ * featureFlagBulkWriteCommand,
+ * ]
+ */
+load("jstests/libs/bulk_write_utils.js"); // For cursorEntryValidator.
+
+(function() {
+"use strict";
+
+var coll = db.getCollection("coll");
+var coll1 = db.getCollection("coll1");
+coll.drop();
+coll1.drop();
+
+// TODO SERVER-31242 findAndModify retry doesn't apply 'fields' to response.
+// This causes _id to not get projected out and the assert fails.
+// These tests should be moved back to `bulk_write_update_cursor.js` and
+// `bulk_write_delete_cursor.js` if the above ticket is completed.
+
+// Test returnFields with return.
+var res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 0, skey: "MongoDB"}},
+ {delete: 0, filter: {_id: 0}, returnFields: {_id: 0, skey: 1}, return: true},
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
+
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
+assert.docEq(res.cursor.firstBatch[1].value, {skey: "MongoDB"});
+assert(!res.cursor.firstBatch[2]);
+
+assert(!coll.findOne());
+
+coll.drop();
+
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 0, skey: "MongoDB"}},
+ {
+ update: 0,
+ filter: {_id: 0},
+ updateMods: {$set: {skey: "MongoDB2"}},
+ returnFields: {_id: 0, skey: 1},
+ return: "post"
+ },
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
+
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1});
+assert.docEq(res.cursor.firstBatch[1].value, {skey: "MongoDB2"});
+assert(!res.cursor.firstBatch[2]);
+
+assert.eq("MongoDB2", coll.findOne().skey);
+
+coll.drop();
+
+// Multi:true is not supported for retryable writes.
+
+// Test updates multiple when multi is true.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 0, skey: "MongoDB"}},
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {update: 0, filter: {skey: "MongoDB"}, updateMods: {$set: {skey: "MongoDB2"}}, multi: true},
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
+
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 2, nModified: 2});
+assert(!res.cursor.firstBatch[2].value);
+assert(!res.cursor.firstBatch[3]);
+assert.sameMembers(coll.find().toArray(), [{_id: 0, skey: "MongoDB2"}, {_id: 1, skey: "MongoDB2"}]);
+
+coll.drop();
+
+// Test deletes multiple when multi is true.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 0, skey: "MongoDB"}},
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {delete: 0, filter: {skey: "MongoDB"}, multi: true},
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
+
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 2});
+assert(!res.cursor.firstBatch[3]);
+assert(!coll.findOne());
+
+coll.drop();
+
+// Test let for multiple updates and a delete, with constants shadowing in one update.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 0, skey: "MongoDB"}},
+ {insert: 0, document: {_id: 1, skey: "MongoDB2"}},
+ {
+ update: 0,
+ filter: {$expr: {$eq: ["$skey", "$$targetKey1"]}},
+ updateMods: [{$set: {skey: "$$replacedKey1"}}],
+ constants: {replacedKey1: "MongoDB4"},
+ return: "post"
+ },
+ {
+ update: 0,
+ filter: {$expr: {$eq: ["$skey", "$$targetKey2"]}},
+ updateMods: [{$set: {skey: "MongoDB"}}],
+ return: "post"
+ },
+ {delete: 0, filter: {$expr: {$eq: ["$skey", "$$replacedKey2"]}}, return: true}
+ ],
+ nsInfo: [{ns: "test.coll"}],
+ let : {
+ targetKey1: "MongoDB",
+ targetKey2: "MongoDB2",
+ replacedKey1: "MongoDB",
+ replacedKey2: "MongoDB4"
+ }
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
+
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1, nModified: 1});
+cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, n: 1, nModified: 1});
+cursorEntryValidator(res.cursor.firstBatch[4], {ok: 1, idx: 4, n: 1});
+assert(!res.cursor.firstBatch[5]);
+
+assert.docEq(res.cursor.firstBatch[2].value, {_id: 0, skey: "MongoDB4"});
+assert.docEq(res.cursor.firstBatch[3].value, {_id: 1, skey: "MongoDB"});
+assert.docEq(res.cursor.firstBatch[4].value, {_id: 0, skey: "MongoDB4"});
+
+assert.sameMembers(coll.find().toArray(), [{_id: 1, skey: "MongoDB"}]);
+
+coll.drop();
+})();
diff --git a/jstests/core/write/bulk/bulk_write_non_transaction.js b/jstests/core/write/bulk/bulk_write_non_transaction.js
new file mode 100644
index 0000000000000..185a951102bdf
--- /dev/null
+++ b/jstests/core/write/bulk/bulk_write_non_transaction.js
@@ -0,0 +1,675 @@
+/**
+ * Tests bulk write command for scenarios that cause the command to fail (ok: 0).
+ *
+ * These tests are incompatible with the transaction overrides since any failure
+ * will cause a transaction abortion which will make the overrides infinite loop.
+ *
+ * The test runs commands that are not allowed with security token: bulkWrite.
+ * @tags: [
+ * assumes_against_mongod_not_mongos,
+ * not_allowed_with_security_token,
+ * command_not_supported_in_serverless,
+ * # Contains commands that fail which will fail the entire transaction
+ * does_not_support_transactions,
+ * # TODO SERVER-52419 Remove this tag.
+ * featureFlagBulkWriteCommand,
+ * ]
+ */
+load("jstests/libs/bulk_write_utils.js"); // For cursorEntryValidator.
+
+(function() {
+"use strict";
+
+var coll = db.getCollection("coll");
+var coll1 = db.getCollection("coll1");
+coll.drop();
+coll1.drop();
+
+var maxWriteBatchSize = db.hello().maxWriteBatchSize;
+var insertOp = {insert: 0, document: {_id: 1, skey: "MongoDB"}};
+
+// Make sure bulkWrite at maxWriteBatchSize is okay
+let ops = [];
+for (var i = 0; i < maxWriteBatchSize; ++i) {
+ ops.push(insertOp);
+}
+
+var res = db.adminCommand({
+ bulkWrite: 1,
+ ops: ops,
+ nsInfo: [{ns: "test.coll"}],
+});
+
+// It is also possible to see interruption here due to very large batch size.
+if (!ErrorCodes.isInterruption(res.code)) {
+ assert.commandWorked(res);
+}
+coll.drop();
+
+// Make sure bulkWrite above maxWriteBatchSize fails
+ops = [];
+for (var i = 0; i < maxWriteBatchSize + 1; ++i) {
+ ops.push(insertOp);
+}
+
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: ops,
+ nsInfo: [{ns: "test.coll"}],
+});
+
+// It is also possible to see interruption here due to very large batch size.
+if (!ErrorCodes.isInterruption(res.code)) {
+ assert.commandFailedWithCode(res, [ErrorCodes.InvalidLength]);
+}
+
+// Make sure invalid fields are not accepted
+assert.commandFailedWithCode(db.adminCommand({
+ bulkWrite: 1,
+ ops: [{insert: 0, document: {skey: "MongoDB"}}],
+ nsInfo: [{ns: "test.coll"}],
+ cursor: {batchSize: 1024},
+ bypassDocumentValidation: true,
+ ordered: false,
+ fooField: 0
+}),
+ [40415]);
+
+assert.eq(coll.find().itcount(), 0);
+assert.eq(coll1.find().itcount(), 0);
+
+// Make sure we fail if index out of range of nsInfo
+assert.commandFailedWithCode(db.adminCommand({
+ bulkWrite: 1,
+ ops: [{insert: 2, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}],
+ nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}]
+}),
+ [ErrorCodes.BadValue]);
+
+assert.eq(coll.find().itcount(), 0);
+assert.eq(coll1.find().itcount(), 0);
+
+// Missing ops
+assert.commandFailedWithCode(db.adminCommand({bulkWrite: 1, nsInfo: [{ns: "mydb.coll"}]}), [40414]);
+
+assert.eq(coll.find().itcount(), 0);
+assert.eq(coll1.find().itcount(), 0);
+
+// Missing nsInfo
+assert.commandFailedWithCode(
+ db.adminCommand({bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}]}), [40414]);
+
+assert.eq(coll.find().itcount(), 0);
+assert.eq(coll1.find().itcount(), 0);
+
+// Test valid arguments with invalid values
+assert.commandFailedWithCode(db.adminCommand({
+ bulkWrite: 1,
+ ops: [{insert: "test", document: {skey: "MongoDB"}}],
+ nsInfo: [{ns: "test.coll"}]
+}),
+ [ErrorCodes.TypeMismatch]);
+
+assert.eq(coll.find().itcount(), 0);
+assert.eq(coll1.find().itcount(), 0);
+
+assert.commandFailedWithCode(
+ db.adminCommand(
+ {bulkWrite: 1, ops: [{insert: 0, document: "test"}], nsInfo: [{ns: "test.coll"}]}),
+ [ErrorCodes.TypeMismatch]);
+
+assert.eq(coll.find().itcount(), 0);
+assert.eq(coll1.find().itcount(), 0);
+
+assert.commandFailedWithCode(
+ db.adminCommand(
+ {bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}], nsInfo: ["test"]}),
+ [ErrorCodes.TypeMismatch]);
+
+assert.eq(coll.find().itcount(), 0);
+assert.eq(coll1.find().itcount(), 0);
+
+assert.commandFailedWithCode(
+ db.adminCommand({bulkWrite: 1, ops: "test", nsInfo: [{ns: "test.coll"}]}),
+ [ErrorCodes.TypeMismatch]);
+
+assert.eq(coll.find().itcount(), 0);
+assert.eq(coll1.find().itcount(), 0);
+
+assert.commandFailedWithCode(
+ db.adminCommand(
+ {bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}], nsInfo: "test"}),
+ [ErrorCodes.TypeMismatch]);
+
+assert.eq(coll.find().itcount(), 0);
+assert.eq(coll1.find().itcount(), 0);
+
+// Make sure update multi:true + return fails the op.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {
+ update: 0,
+ filter: {_id: 1},
+ updateMods: {$set: {skey: "MongoDB2"}},
+ multi: true,
+ return: "post"
+ },
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+cursorEntryValidator(res.cursor.firstBatch[0],
+ {ok: 0, idx: 0, n: 0, nModified: 0, code: ErrorCodes.InvalidOptions});
+assert(!res.cursor.firstBatch[1]);
+
+// Test update providing returnFields without return option.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 0, skey: "MongoDB"}},
+ {
+ update: 0,
+ filter: {_id: 0},
+ updateMods: {$set: {skey: "MongoDB2"}},
+ returnFields: {_id: 1}
+ },
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[1],
+ {ok: 0, idx: 1, n: 0, nModified: 0, code: ErrorCodes.InvalidOptions});
+assert(!res.cursor.firstBatch[2]);
+
+coll.drop();
+
+// Test update fails userAllowedWriteNS.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {
+ update: 0,
+ filter: {_id: 1},
+ updateMods: {$set: {skey: "MongoDB2"}},
+ },
+ ],
+ nsInfo: [{ns: "test.system.profile"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+cursorEntryValidator(res.cursor.firstBatch[0],
+ {ok: 0, idx: 0, n: 0, nModified: 0, code: ErrorCodes.InvalidNamespace});
+assert(!res.cursor.firstBatch[1]);
+
+var coll2 = db.getCollection("coll2");
+coll2.drop();
+
+// Test update continues on error with ordered:false.
+assert.commandWorked(coll2.createIndex({x: 1}, {unique: true}));
+assert.commandWorked(coll2.insert({x: 3}));
+assert.commandWorked(coll2.insert({x: 4}));
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {update: 0, filter: {x: 3}, updateMods: {$inc: {x: 1}}, upsert: true},
+ {
+ update: 1,
+ filter: {_id: 1},
+ updateMods: {$set: {skey: "MongoDB2"}},
+ upsert: true,
+ return: "post"
+ },
+ ],
+ nsInfo: [{ns: "test.coll2"}, {ns: "test.coll"}],
+ ordered: false
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+cursorEntryValidator(res.cursor.firstBatch[0],
+ {ok: 0, idx: 0, n: 0, nModified: 0, code: ErrorCodes.DuplicateKey});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 0});
+
+assert.docEq(res.cursor.firstBatch[1].upserted, {index: 0, _id: 1});
+assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB2"});
+assert(!res.cursor.firstBatch[2]);
+coll.drop();
+coll2.drop();
+
+// Test update stop on error with ordered:true.
+assert.commandWorked(coll2.createIndex({x: 1}, {unique: true}));
+assert.commandWorked(coll2.insert({x: 3}));
+assert.commandWorked(coll2.insert({x: 4}));
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {update: 0, filter: {x: 3}, updateMods: {$inc: {x: 1}}, upsert: true},
+ {
+ update: 1,
+ filter: {_id: 1},
+ updateMods: {$set: {skey: "MongoDB2"}},
+ upsert: true,
+ return: "post"
+ },
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ ],
+ nsInfo: [{ns: "test.coll2"}, {ns: "test.coll"}],
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+cursorEntryValidator(res.cursor.firstBatch[0],
+ {ok: 0, idx: 0, n: 0, nModified: 0, code: ErrorCodes.DuplicateKey});
+assert(!res.cursor.firstBatch[1]);
+coll.drop();
+coll2.drop();
+
+// Test fixDocumentForInsert works properly by erroring out on >16MB size insert.
+var targetSize = (16 * 1024 * 1024) + 1;
+var doc = {_id: new ObjectId(), value: ''};
+
+var size = Object.bsonsize(doc);
+assert.gte(targetSize, size);
+
+// Set 'value' as a string with enough characters to make the whole document 'targetSize'
+// bytes long.
+doc.value = new Array(targetSize - size + 1).join('x');
+assert.eq(targetSize, Object.bsonsize(doc));
+
+// Testing ordered:false continues on with other ops when fixDocumentForInsert fails.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {insert: 0, document: doc},
+ {insert: 0, document: {_id: 2, skey: "MongoDB2"}},
+ ],
+ nsInfo: [{ns: "test.coll"}],
+ ordered: false
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+assert.eq(res.cursor.id, 0);
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
+
+// In most cases we expect this to fail because it tries to insert a document that is too large.
+// In some cases we may see the javascript execution interrupted because it takes longer than
+// our default time limit, so we allow that possibility.
+try {
+ cursorEntryValidator(res.cursor.firstBatch[1],
+ {ok: 0, n: 0, idx: 1, code: ErrorCodes.BadValue});
+} catch {
+ cursorEntryValidator(res.cursor.firstBatch[1],
+ {ok: 0, n: 0, idx: 1, code: ErrorCodes.Interrupted});
+}
+cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, n: 1, idx: 2});
+assert(!res.cursor.firstBatch[3]);
+
+coll.drop();
+
+// Testing ordered:true short circuits.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {insert: 0, document: doc},
+ {insert: 0, document: {_id: 2, skey: "MongoDB2"}},
+ ],
+ nsInfo: [{ns: "test.coll"}],
+ ordered: true
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+assert.eq(res.cursor.id, 0);
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
+
+// In most cases we expect this to fail because it tries to insert a document that is too large.
+// In some cases we may see the javascript execution interrupted because it takes longer than
+// our default time limit, so we allow that possibility.
+try {
+ cursorEntryValidator(res.cursor.firstBatch[1],
+ {ok: 0, n: 0, idx: 1, code: ErrorCodes.BadValue});
+} catch {
+ cursorEntryValidator(res.cursor.firstBatch[1],
+ {ok: 0, n: 0, idx: 1, code: ErrorCodes.Interrupted});
+}
+assert(!res.cursor.firstBatch[2]);
+
+coll.drop();
+
+// Test that a write can fail part way through a write and the write partially executes.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {insert: 1, document: {skey: "MongoDB"}}
+ ],
+ nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+assert.eq(res.cursor.id, 0);
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, n: 0, idx: 1, code: 11000});
+// Make sure that error extra info was correctly added
+assert.docEq(res.cursor.firstBatch[1].keyPattern, {_id: 1});
+assert.docEq(res.cursor.firstBatch[1].keyValue, {_id: 1});
+assert(!res.cursor.firstBatch[2]);
+
+assert.eq(coll.find().itcount(), 1);
+assert.eq(coll1.find().itcount(), 0);
+coll.drop();
+coll1.drop();
+
+// Test that we continue processing after an error for ordered:false.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {insert: 1, document: {skey: "MongoDB"}}
+ ],
+ nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}],
+ ordered: false
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+assert.eq(res.cursor.id, 0);
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, n: 0, idx: 1, code: 11000});
+cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, n: 1, idx: 2});
+assert(!res.cursor.firstBatch[3]);
+
+assert.eq(coll.find().itcount(), 1);
+assert.eq(coll1.find().itcount(), 1);
+coll.drop();
+coll1.drop();
+
+// Make sure delete multi:true + return fails the op.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {delete: 0, filter: {_id: 1}, multi: true, return: true},
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+cursorEntryValidator(res.cursor.firstBatch[0],
+ {ok: 0, n: 0, idx: 0, code: ErrorCodes.InvalidOptions});
+assert(!res.cursor.firstBatch[1]);
+
+// Test delete providing returnFields without return option.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 0, skey: "MongoDB"}},
+ {delete: 0, filter: {_id: 0}, returnFields: {_id: 1}},
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[1],
+ {ok: 0, n: 0, idx: 1, code: ErrorCodes.InvalidOptions});
+assert(!res.cursor.firstBatch[2]);
+
+coll.drop();
+
+// Test delete fails userAllowedWriteNS.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {
+ delete: 0,
+ filter: {_id: 1},
+ },
+ ],
+ nsInfo: [{ns: "test.system.profile"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+cursorEntryValidator(res.cursor.firstBatch[0],
+ {ok: 0, idx: 0, n: 0, code: ErrorCodes.InvalidNamespace});
+assert(!res.cursor.firstBatch[1]);
+
+// Test delete continues on error with ordered:false.
+coll.insert({_id: 1, skey: "MongoDB"});
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {
+ delete: 0,
+ filter: {_id: 0},
+ },
+ {delete: 1, filter: {_id: 1}, return: true}
+ ],
+ nsInfo: [{ns: "test.system.profile"}, {ns: "test.coll"}],
+ ordered: false
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+cursorEntryValidator(res.cursor.firstBatch[0],
+ {ok: 0, idx: 0, n: 0, code: ErrorCodes.InvalidNamespace});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
+assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB"});
+assert(!res.cursor.firstBatch[2]);
+
+assert(!coll.findOne());
+
+coll.drop();
+
+// Test delete stop on error with ordered:true.
+coll.insert({_id: 1, skey: "MongoDB"});
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {
+ delete: 0,
+ filter: {_id: 0},
+ },
+ {delete: 1, filter: {_id: 1}, return: true},
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ ],
+ nsInfo: [{ns: "test.system.profile"}, {ns: "test.coll"}],
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+cursorEntryValidator(res.cursor.firstBatch[0],
+ {ok: 0, idx: 0, n: 0, code: ErrorCodes.InvalidNamespace});
+assert(!res.cursor.firstBatch[1]);
+
+assert.eq(coll.findOne().skey, "MongoDB");
+
+coll.drop();
+
+// Test running multiple findAndModify ops in a command.
+// For normal commands this should succeed and for retryable writes the top level should fail.
+
+// Want to make sure both update + delete handle this correctly so test the following combinations
+// of ops. update + delete, delete + update. This will prove that both ops set and check the flag
+// correctly so doing update + update and delete + delete is redundant.
+
+// update + delete
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}, return: "pre"},
+ {delete: 0, filter: {_id: 1}, return: true},
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+let processCursor = true;
+try {
+ assert.commandWorked(res);
+ assert.eq(res.numErrors, 0);
+} catch {
+ processCursor = false;
+ assert.commandFailedWithCode(res, [ErrorCodes.BadValue]);
+ assert.eq(res.errmsg, "BulkWrite can only support 1 op with a return for a retryable write");
+}
+
+if (processCursor) {
+ cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+ cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1});
+ assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB"});
+ cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1});
+ assert.docEq(res.cursor.firstBatch[2].value, {_id: 1, skey: "MongoDB2"});
+ assert(!res.cursor.firstBatch[3]);
+}
+
+coll.drop();
+
+// delete + update
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {insert: 0, document: {_id: 2, skey: "MongoDB"}},
+ {delete: 0, filter: {_id: 2}, return: true},
+ {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}, return: "pre"},
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+processCursor = true;
+try {
+ assert.commandWorked(res);
+ assert.eq(res.numErrors, 0);
+} catch {
+ processCursor = false;
+ assert.commandFailedWithCode(res, [ErrorCodes.BadValue]);
+ assert.eq(res.errmsg, "BulkWrite can only support 1 op with a return for a retryable write");
+}
+
+if (processCursor) {
+ cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+ cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
+ cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1});
+ assert.docEq(res.cursor.firstBatch[2].value, {_id: 2, skey: "MongoDB"});
+ cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, n: 1, nModified: 1});
+ assert.docEq(res.cursor.firstBatch[3].value, {_id: 1, skey: "MongoDB"});
+ assert(!res.cursor.firstBatch[4]);
+}
+
+coll.drop();
+
+// Test BypassDocumentValidator
+assert.commandWorked(coll.insert({_id: 1}));
+assert.commandWorked(db.runCommand({collMod: "coll", validator: {a: {$exists: true}}}));
+
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [{insert: 0, document: {_id: 3, skey: "MongoDB"}}],
+ nsInfo: [{ns: "test.coll"}],
+ bypassDocumentValidation: false,
+});
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+assert.eq(0, coll.count({_id: 3}));
+coll.drop();
+
+// Test that we correctly count multiple errors for different write types when ordered=false.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 1}},
+ {insert: 0, document: {_id: 2}},
+ // error 1: duplicate key error
+ {insert: 0, document: {_id: 1}},
+ {delete: 0, filter: {_id: 2}},
+ // error 2: user can't write to namespace
+ {delete: 1, filter: {_id: 0}},
+ {update: 0, filter: {_id: 0}, updateMods: {$set: {x: 1}}},
+ // error 3: invalid update operator
+ {update: 0, filter: {_id: 0}, updateMods: {$blah: {x: 1}}},
+ ],
+ nsInfo: [{ns: "test.coll"}, {ns: "test.system.profile"}],
+ ordered: false
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 3);
+coll.drop();
+
+// Checking n and nModified on update success and failure.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}},
+ {update: 0, filter: {_id: 1}, updateMods: {$set: {_id: 2}}},
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1});
+cursorEntryValidator(res.cursor.firstBatch[2],
+ {ok: 0, idx: 2, n: 0, nModified: 0, code: ErrorCodes.ImmutableField});
+assert(!res.cursor.firstBatch[3]);
+coll.drop();
+
+// Test constants is not supported on non-pipeline update.
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {
+ update: 0,
+ filter: {$expr: {$eq: ["$skey", "MongoDB"]}},
+ updateMods: {skey: "$$targetKey"},
+ constants: {targetKey: "MongoDB2"},
+ return: "post"
+ },
+ ],
+ nsInfo: [{ns: "test.coll"}],
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 1);
+
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, n: 0, nModified: 0, code: 51198});
+assert.eq(res.cursor.firstBatch[0].errmsg,
+ "Constant values may only be specified for pipeline updates");
+assert(!res.cursor.firstBatch[1]);
+
+coll.drop();
+})();
diff --git a/jstests/core/write/bulk/bulk_write_update_cursor.js b/jstests/core/write/bulk/bulk_write_update_cursor.js
index 140ebe27e6b93..1b2a75e6bd25a 100644
--- a/jstests/core/write/bulk/bulk_write_update_cursor.js
+++ b/jstests/core/write/bulk/bulk_write_update_cursor.js
@@ -5,36 +5,21 @@
* @tags: [
* assumes_against_mongod_not_mongos,
* not_allowed_with_security_token,
- * # TODO SERVER-72988: Until bulkWrite is compatible with retryable writes.
- * requires_non_retryable_writes,
- * # Command is not yet compatible with tenant migration.
- * tenant_migration_incompatible,
+ * command_not_supported_in_serverless,
+ * # TODO SERVER-52419 Remove this tag.
+ * featureFlagBulkWriteCommand,
* ]
*/
+load("jstests/libs/bulk_write_utils.js"); // For cursorEntryValidator.
+
(function() {
"use strict";
-load("jstests/libs/feature_flag_util.js");
-
-// Skip this test if the BulkWriteCommand feature flag is not enabled.
-// TODO SERVER-67711: Remove feature flag check.
-if (!FeatureFlagUtil.isPresentAndEnabled(db, "BulkWriteCommand")) {
- jsTestLog('Skipping test because the BulkWriteCommand feature flag is disabled.');
- return;
-}
var coll = db.getCollection("coll");
var coll1 = db.getCollection("coll1");
coll.drop();
coll1.drop();
-const cursorEntryValidator = function(entry, expectedEntry) {
- assert(entry.ok == expectedEntry.ok);
- assert(entry.idx == expectedEntry.idx);
- assert(entry.n == expectedEntry.n);
- assert(entry.nModified == expectedEntry.nModified);
- assert(entry.code == expectedEntry.code);
-};
-
// Test generic update with no return.
var res = db.adminCommand({
bulkWrite: 1,
@@ -46,9 +31,10 @@ var res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1});
assert(!res.cursor.firstBatch[1].value);
assert(!res.cursor.firstBatch[2]);
@@ -68,9 +54,10 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1});
assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB"});
assert(!res.cursor.firstBatch[2]);
@@ -90,9 +77,10 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1});
assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB2"});
assert(!res.cursor.firstBatch[2]);
@@ -118,10 +106,11 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, nModified: 1});
+cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1, nModified: 1});
assert.docEq(res.cursor.firstBatch[2].value, {_id: 1, skey: "MongoDB2"});
assert(!res.cursor.firstBatch[3]);
assert.sameMembers(coll.find().toArray(), [{_id: 0, skey: "MongoDB"}, {_id: 1, skey: "MongoDB2"}]);
@@ -146,35 +135,68 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, nModified: 1});
+cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1, nModified: 1});
assert.docEq(res.cursor.firstBatch[2].value, {_id: 0, skey: "MongoDB2"});
assert(!res.cursor.firstBatch[3]);
assert.sameMembers(coll.find().toArray(), [{_id: 0, skey: "MongoDB2"}, {_id: 1, skey: "MongoDB"}]);
coll.drop();
-// Test updates multiple when multi is true.
+// Test update with sort and not return
+res = db.adminCommand({
+ bulkWrite: 1,
+ ops: [
+ {insert: 0, document: {_id: 0, skey: "MongoDB"}},
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {
+ update: 0,
+ filter: {skey: "MongoDB"},
+ updateMods: {$set: {skey: "MongoDB2"}},
+ sort: {_id: -1}
+ },
+ ],
+ nsInfo: [{ns: "test.coll"}]
+});
+
+assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
+
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1, nModified: 1});
+assert(!res.cursor.firstBatch[3]);
+assert.sameMembers(coll.find().toArray(), [{_id: 0, skey: "MongoDB"}, {_id: 1, skey: "MongoDB2"}]);
+
+coll.drop();
+
+// Test update with sort and not return
res = db.adminCommand({
bulkWrite: 1,
ops: [
{insert: 0, document: {_id: 0, skey: "MongoDB"}},
{insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {update: 0, filter: {skey: "MongoDB"}, updateMods: {$set: {skey: "MongoDB2"}}, multi: true},
+ {
+ update: 0,
+ filter: {skey: "MongoDB"},
+ updateMods: {$set: {skey: "MongoDB2"}},
+ sort: {_id: 1}
+ },
],
nsInfo: [{ns: "test.coll"}]
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, nModified: 2});
-assert(!res.cursor.firstBatch[2].value);
+cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1, nModified: 1});
assert(!res.cursor.firstBatch[3]);
-assert.sameMembers(coll.find().toArray(), [{_id: 0, skey: "MongoDB2"}, {_id: 1, skey: "MongoDB2"}]);
+assert.sameMembers(coll.find().toArray(), [{_id: 0, skey: "MongoDB2"}, {_id: 1, skey: "MongoDB"}]);
coll.drop();
@@ -190,8 +212,9 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, nModified: 1});
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 1});
assert.docEq(res.cursor.firstBatch[0].value, {_id: 1, skey: "MongoDB2"});
assert(!res.cursor.firstBatch[1]);
@@ -211,8 +234,9 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, nModified: 0});
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 0, nModified: 0});
assert(!res.cursor.firstBatch[0].value);
assert(!res.cursor.firstBatch[1]);
@@ -237,8 +261,9 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, nModified: 0});
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 0});
assert.docEq(res.cursor.firstBatch[0].upserted, {index: 0, _id: 1});
assert(!res.cursor.firstBatch[0].value);
assert(!res.cursor.firstBatch[1]);
@@ -263,8 +288,9 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, nModified: 0});
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 0});
assert.docEq(res.cursor.firstBatch[0].upserted, {index: 0, _id: 1});
assert.docEq(res.cursor.firstBatch[0].value, {_id: 1, skey: "MongoDB2"});
assert(!res.cursor.firstBatch[1]);
@@ -273,76 +299,6 @@ assert.eq("MongoDB2", coll.findOne().skey);
coll.drop();
-// Make sure multi:true + return fails the op.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {
- update: 0,
- filter: {_id: 1},
- updateMods: {$set: {skey: "MongoDB2"}},
- multi: true,
- return: "post"
- },
- ],
- nsInfo: [{ns: "test.coll"}]
-});
-
-assert.commandWorked(res);
-
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.InvalidOptions});
-assert(!res.cursor.firstBatch[1]);
-
-// Test returnFields with return.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {insert: 0, document: {_id: 0, skey: "MongoDB"}},
- {
- update: 0,
- filter: {_id: 0},
- updateMods: {$set: {skey: "MongoDB2"}},
- returnFields: {_id: 0, skey: 1},
- return: "post"
- },
- ],
- nsInfo: [{ns: "test.coll"}]
-});
-
-assert.commandWorked(res);
-
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1});
-assert.docEq(res.cursor.firstBatch[1].value, {skey: "MongoDB2"});
-assert(!res.cursor.firstBatch[2]);
-
-assert.eq("MongoDB2", coll.findOne().skey);
-
-coll.drop();
-
-// Test providing returnFields without return option.
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {insert: 0, document: {_id: 0, skey: "MongoDB"}},
- {
- update: 0,
- filter: {_id: 0},
- updateMods: {$set: {skey: "MongoDB2"}},
- returnFields: {_id: 1}
- },
- ],
- nsInfo: [{ns: "test.coll"}]
-});
-
-assert.commandWorked(res);
-
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: ErrorCodes.InvalidOptions});
-assert(!res.cursor.firstBatch[2]);
-
-coll.drop();
-
// Test inc operator in updateMods.
res = db.adminCommand({
bulkWrite: 1,
@@ -354,9 +310,10 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1});
assert.docEq(res.cursor.firstBatch[1].value, {_id: 0, a: 3});
assert.eq(res.cursor.firstBatch[1].nModified, 1);
assert(!res.cursor.firstBatch[2]);
@@ -381,8 +338,9 @@ res = db.adminCommand({
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, nModified: 1});
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 1});
assert.eq(res.cursor.firstBatch[0].nModified, 1);
assert.docEq(res.cursor.firstBatch[0].value, {_id: 0, a: [{b: 6}, {b: 1}, {b: 2}]});
assert(!res.cursor.firstBatch[1]);
@@ -400,19 +358,20 @@ res = db.adminCommand({
update: 0,
filter: {$expr: {$eq: ["$skey", "$$targetKey"]}},
updateMods: {skey: "MongoDB2"},
- let : {targetKey: "MongoDB"},
return: "post"
},
],
- nsInfo: [{ns: "test.coll"}]
+ nsInfo: [{ns: "test.coll"}],
+ let : {targetKey: "MongoDB"}
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, nModified: 1});
+cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, n: 1, nModified: 1});
assert.docEq(res.cursor.firstBatch[3].value, {_id: 0, skey: "MongoDB2"});
assert(!res.cursor.firstBatch[4]);
@@ -422,124 +381,118 @@ assert.sameMembers(
coll.drop();
-// Test multiple updates on same namespace.
+// Test constants works in pipeline update.
res = db.adminCommand({
bulkWrite: 1,
ops: [
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
- {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}, return: "post"},
- {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB3"}}, return: "post"},
+ {insert: 0, document: {_id: 0, skey: "MongoDB"}},
+ {insert: 0, document: {_id: 1, skey: "MongoDB2"}},
+ {insert: 0, document: {_id: 2, skey: "MongoDB3"}},
+ {
+ update: 0,
+ filter: {$expr: {$eq: ["$skey", "$$targetKey"]}},
+ updateMods: [{$set: {skey: "$$replacedKey"}}],
+ constants: {targetKey: "MongoDB", replacedKey: "MongoDB2"},
+ return: "post"
+ },
],
- nsInfo: [{ns: "test.coll"}]
+ nsInfo: [{ns: "test.coll"}],
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1});
-assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB2"});
-cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, nModified: 1});
-assert.docEq(res.cursor.firstBatch[2].value, {_id: 1, skey: "MongoDB3"});
-assert(!res.cursor.firstBatch[3]);
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, n: 1, nModified: 1});
+assert.docEq(res.cursor.firstBatch[3].value, {_id: 0, skey: "MongoDB2"});
+assert(!res.cursor.firstBatch[4]);
-assert.eq("MongoDB3", coll.findOne().skey);
+assert.sameMembers(
+ coll.find().toArray(),
+ [{_id: 0, skey: "MongoDB2"}, {_id: 1, skey: "MongoDB2"}, {_id: 2, skey: "MongoDB3"}]);
coll.drop();
-// Test upsert with implicit collection creation.
+// Test let matches specific document (targetKey) and constants overwrite let (replacedKey).
res = db.adminCommand({
bulkWrite: 1,
ops: [
+ {insert: 0, document: {_id: 0, skey: "MongoDB"}},
+ {insert: 0, document: {_id: 1, skey: "MongoDB2"}},
+ {insert: 0, document: {_id: 2, skey: "MongoDB3"}},
{
update: 0,
- filter: {_id: 1},
- updateMods: {$set: {skey: "MongoDB2"}},
- upsert: true,
+ filter: {$expr: {$eq: ["$skey", "$$targetKey"]}},
+ updateMods: [{$set: {skey: "$$replacedKey"}}],
+ constants: {replacedKey: "MongoDB4"},
return: "post"
},
],
- nsInfo: [{ns: "test.coll2"}]
+ nsInfo: [{ns: "test.coll"}],
+ let : {targetKey: "MongoDB3", replacedKey: "MongoDB2"}
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, nModified: 0});
-assert.docEq(res.cursor.firstBatch[0].upserted, {index: 0, _id: 1});
-assert.docEq(res.cursor.firstBatch[0].value, {_id: 1, skey: "MongoDB2"});
-assert(!res.cursor.firstBatch[1]);
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, n: 1, nModified: 1});
+assert.docEq(res.cursor.firstBatch[3].value, {_id: 2, skey: "MongoDB4"});
+assert(!res.cursor.firstBatch[4]);
-var coll2 = db.getCollection("coll2");
-coll2.drop();
+coll.drop();
-// Test write fails userAllowedWriteNS.
+// Test multiple updates on same namespace.
res = db.adminCommand({
bulkWrite: 1,
ops: [
- {
- update: 0,
- filter: {_id: 1},
- updateMods: {$set: {skey: "MongoDB2"}},
- multi: true,
- },
+ {insert: 0, document: {_id: 1, skey: "MongoDB"}},
+ {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}, return: "post"},
+ {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB3"}}},
],
- nsInfo: [{ns: "test.system.profile"}]
+ nsInfo: [{ns: "test.coll"}]
});
assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.InvalidNamespace});
-assert(!res.cursor.firstBatch[1]);
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1});
+cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1});
+assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB2"});
+cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1, nModified: 1});
+assert(!res.cursor.firstBatch[3]);
-// Test update continues on error with ordered:false.
-assert.commandWorked(coll2.createIndex({x: 1}, {unique: true}));
-assert.commandWorked(coll2.insert({x: 3}));
-assert.commandWorked(coll2.insert({x: 4}));
-res = db.adminCommand({
- bulkWrite: 1,
- ops: [
- {update: 0, filter: {x: 3}, updateMods: {$inc: {x: 1}}, upsert: true, return: "post"},
- {
- update: 1,
- filter: {_id: 1},
- updateMods: {$set: {skey: "MongoDB2"}},
- upsert: true,
- return: "post"
- },
- ],
- nsInfo: [{ns: "test.coll2"}, {ns: "test.coll"}],
- ordered: false
-});
+assert.eq("MongoDB3", coll.findOne().skey);
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.DuplicateKey});
-cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 0});
-assert.docEq(res.cursor.firstBatch[1].upserted, {index: 0, _id: 1});
-assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB2"});
-assert(!res.cursor.firstBatch[2]);
coll.drop();
-coll2.drop();
-// Test update stop on error with ordered:true.
-assert.commandWorked(coll2.createIndex({x: 1}, {unique: true}));
-assert.commandWorked(coll2.insert({x: 3}));
-assert.commandWorked(coll2.insert({x: 4}));
+// Test upsert with implicit collection creation.
res = db.adminCommand({
bulkWrite: 1,
ops: [
- {update: 0, filter: {x: 3}, updateMods: {$inc: {x: 1}}, upsert: true, return: "post"},
{
- update: 1,
+ update: 0,
filter: {_id: 1},
updateMods: {$set: {skey: "MongoDB2"}},
upsert: true,
return: "post"
},
- {insert: 0, document: {_id: 1, skey: "MongoDB"}},
],
- nsInfo: [{ns: "test.coll2"}, {ns: "test.coll"}],
+ nsInfo: [{ns: "test.coll2"}]
});
-cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.DuplicateKey});
+assert.commandWorked(res);
+assert.eq(res.numErrors, 0);
+
+cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 0});
+assert.docEq(res.cursor.firstBatch[0].upserted, {index: 0, _id: 1});
+assert.docEq(res.cursor.firstBatch[0].value, {_id: 1, skey: "MongoDB2"});
assert(!res.cursor.firstBatch[1]);
-coll.drop();
+
+var coll2 = db.getCollection("coll2");
coll2.drop();
})();
diff --git a/jstests/core/write/collection_uuid_write_commands.js b/jstests/core/write/collection_uuid_write_commands.js
index ff9781ec09052..e12971e2c13e7 100644
--- a/jstests/core/write/collection_uuid_write_commands.js
+++ b/jstests/core/write/collection_uuid_write_commands.js
@@ -23,7 +23,7 @@ const validateErrorResponse = function(
};
var testCommand = function(cmd, cmdObj) {
- const testDB = db.getSiblingDB(jsTestName());
+ const testDB = db.getSiblingDB("coll_uuid_write_cmds");
assert.commandWorked(testDB.dropDatabase());
const coll = testDB['coll'];
assert.commandWorked(coll.insert({_id: 0}));
diff --git a/jstests/core/write/delete/delete_hint.js b/jstests/core/write/delete/delete_hint.js
index 6b944702fd97d..9d1e6eb88f536 100644
--- a/jstests/core/write/delete/delete_hint.js
+++ b/jstests/core/write/delete/delete_hint.js
@@ -10,10 +10,7 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getPlanStage} from "jstests/libs/analyze_plan.js";
function assertCommandUsesIndex(command, expectedHintKeyPattern) {
const out = assert.commandWorked(coll.runCommand({explain: command}));
@@ -114,5 +111,4 @@ function failedHintTest() {
normalIndexTest();
sparseIndexTest();
shellHelpersTest();
-failedHintTest();
-})();
+failedHintTest();
\ No newline at end of file
diff --git a/jstests/core/write/delete/remove.js b/jstests/core/write/delete/remove.js
index f08792aeca594..e3ecd6343212d 100644
--- a/jstests/core/write/delete/remove.js
+++ b/jstests/core/write/delete/remove.js
@@ -3,11 +3,11 @@
// remove.js
// unit test for db remove
-t = db.removetest;
+let t = db.removetest;
function f(n, dir) {
t.createIndex({x: dir || 1});
- for (i = 0; i < n; i++)
+ for (let i = 0; i < n; i++)
t.save({x: 3, z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"});
assert.eq(n, t.find().count());
diff --git a/jstests/core/write/delete/remove3.js b/jstests/core/write/delete/remove3.js
index 6f0a94589e94e..c2ce5ffead8b4 100644
--- a/jstests/core/write/delete/remove3.js
+++ b/jstests/core/write/delete/remove3.js
@@ -1,9 +1,9 @@
// @tags: [requires_non_retryable_writes, requires_fastcount]
-t = db.remove3;
+let t = db.remove3;
t.drop();
-for (i = 1; i <= 8; i++) {
+for (let i = 1; i <= 8; i++) {
t.save({_id: i, x: i});
}
diff --git a/jstests/core/write/delete/remove4.js b/jstests/core/write/delete/remove4.js
index 483de24bf7d77..43dcbc5807bfd 100644
--- a/jstests/core/write/delete/remove4.js
+++ b/jstests/core/write/delete/remove4.js
@@ -1,6 +1,6 @@
// @tags: [requires_non_retryable_writes]
-t = db.remove4;
+let t = db.remove4;
t.drop();
t.save({a: 1, b: 1});
diff --git a/jstests/core/write/delete/remove6.js b/jstests/core/write/delete/remove6.js
index f60200a8d8122..0546fa3f8b612 100644
--- a/jstests/core/write/delete/remove6.js
+++ b/jstests/core/write/delete/remove6.js
@@ -1,9 +1,9 @@
// @tags: [requires_non_retryable_writes, requires_fastcount]
-t = db.remove6;
+let t = db.remove6;
t.drop();
-N = 1000;
+let N = 1000;
function pop() {
t.drop();
diff --git a/jstests/core/write/delete/remove7.js b/jstests/core/write/delete/remove7.js
index 9cc8632999090..9924a34362ca8 100644
--- a/jstests/core/write/delete/remove7.js
+++ b/jstests/core/write/delete/remove7.js
@@ -1,6 +1,6 @@
// @tags: [requires_non_retryable_writes]
-t = db.remove7;
+let t = db.remove7;
t.drop();
function getTags(n) {
@@ -14,13 +14,13 @@ function getTags(n) {
return a;
}
-for (i = 0; i < 1000; i++) {
+for (let i = 0; i < 1000; i++) {
t.save({tags: getTags()});
}
t.createIndex({tags: 1});
-for (i = 0; i < 200; i++) {
+for (let i = 0; i < 200; i++) {
for (var j = 0; j < 10; j++)
t.save({tags: getTags(100)});
var q = {tags: {$in: getTags(10)}};
diff --git a/jstests/core/write/delete/remove8.js b/jstests/core/write/delete/remove8.js
index 7a8263c21e448..3f34a753e92f3 100644
--- a/jstests/core/write/delete/remove8.js
+++ b/jstests/core/write/delete/remove8.js
@@ -4,10 +4,10 @@
// requires_fastcount,
// ]
-t = db.remove8;
+let t = db.remove8;
t.drop();
-N = 1000;
+let N = 1000;
function fill() {
for (var i = 0; i < N; i++) {
diff --git a/jstests/core/write/delete/remove_justone.js b/jstests/core/write/delete/remove_justone.js
index f5345627e34cb..1ae630432db3d 100644
--- a/jstests/core/write/delete/remove_justone.js
+++ b/jstests/core/write/delete/remove_justone.js
@@ -3,7 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection, requires_non_retryable_writes, requires_fastcount]
-t = db.remove_justone;
+let t = db.remove_justone;
t.drop();
t.insert({x: 1});
diff --git a/jstests/core/write/delete/removec.js b/jstests/core/write/delete/removec.js
index 560f7405de3ee..bd68d43201c42 100644
--- a/jstests/core/write/delete/removec.js
+++ b/jstests/core/write/delete/removec.js
@@ -6,36 +6,36 @@
// Sanity test for removing documents with adjacent index keys. SERVER-2008
-t = db.jstests_removec;
+let t = db.jstests_removec;
t.drop();
t.createIndex({a: 1});
/** @return an array containing a sequence of numbers from i to i + 10. */
function runStartingWith(i) {
- ret = [];
- for (j = 0; j < 11; ++j) {
+ let ret = [];
+ for (let j = 0; j < 11; ++j) {
ret.push(i + j);
}
return ret;
}
// Insert some documents with adjacent index keys.
-for (i = 0; i < 1100; i += 11) {
+for (let i = 0; i < 1100; i += 11) {
t.save({a: runStartingWith(i)});
}
// Remove and then reinsert random documents in the background.
-s = startParallelShell('t = db.jstests_removec;' +
- 'Random.setRandomSeed();' +
- 'for( j = 0; j < 1000; ++j ) {' +
- ' o = t.findOne( { a:Random.randInt( 1100 ) } );' +
- ' t.remove( { _id:o._id } );' +
- ' t.insert( o );' +
- '}');
+let s = startParallelShell('t = db.jstests_removec;' +
+ 'Random.setRandomSeed();' +
+ 'for( j = 0; j < 1000; ++j ) {' +
+ ' o = t.findOne( { a:Random.randInt( 1100 ) } );' +
+ ' t.remove( { _id:o._id } );' +
+ ' t.insert( o );' +
+ '}');
// Find operations are error free. Note that the cursor throws if it detects the $err
// field in the returned document.
-for (i = 0; i < 200; ++i) {
+for (let i = 0; i < 200; ++i) {
t.find({a: {$gte: 0}}).hint({a: 1}).itcount();
}
diff --git a/jstests/core/write/find_and_modify/find_and_modify.js b/jstests/core/write/find_and_modify/find_and_modify.js
index 56eae29456802..47885cf9c921c 100644
--- a/jstests/core/write/find_and_modify/find_and_modify.js
+++ b/jstests/core/write/find_and_modify/find_and_modify.js
@@ -6,7 +6,7 @@
// requires_fastcount,
// ]
-t = db.find_and_modify;
+let t = db.find_and_modify;
t.drop();
// fill db
@@ -15,7 +15,8 @@ for (var i = 1; i <= 10; i++) {
}
// returns old
-out = t.findAndModify({sort: {priority: 1}, update: {$set: {inprogress: true}, $inc: {value: 1}}});
+let out =
+ t.findAndModify({sort: {priority: 1}, update: {$set: {inprogress: true}, $inc: {value: 1}}});
assert.eq(out.value, 0);
assert.eq(out.inprogress, false);
t.update({_id: out._id}, {$set: {inprogress: false}});
@@ -133,7 +134,7 @@ runFindAndModify(true /* shouldMatch */, false /* upsert */, false /* new */);
//
t.drop();
-cmdRes = db.runCommand(
+let cmdRes = db.runCommand(
{findAndModify: t.getName(), query: {_id: "miss"}, update: {$inc: {y: 1}}, upsert: true});
assert.commandWorked(cmdRes);
assert("value" in cmdRes);
diff --git a/jstests/core/write/find_and_modify/find_and_modify3.js b/jstests/core/write/find_and_modify/find_and_modify3.js
index a319aef7a2d9a..8aee710a0cf8e 100644
--- a/jstests/core/write/find_and_modify/find_and_modify3.js
+++ b/jstests/core/write/find_and_modify/find_and_modify3.js
@@ -3,7 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.find_and_modify3;
+let t = db.find_and_modify3;
t.drop();
t.insert({_id: 0, other: 0, comments: [{i: 0, j: 0}, {i: 1, j: 1}]});
@@ -14,10 +14,10 @@ t.insert({
}); // this is the only one that gets modded
t.insert({_id: 2, other: 2, comments: [{i: 0, j: 0}, {i: 1, j: 1}]});
-orig0 = t.findOne({_id: 0});
-orig2 = t.findOne({_id: 2});
+let orig0 = t.findOne({_id: 0});
+let orig2 = t.findOne({_id: 2});
-out = t.findAndModify({
+let out = t.findAndModify({
query: {_id: 1, 'comments.i': 0},
update: {$set: {'comments.$.j': 2}},
'new': true,
diff --git a/jstests/core/write/find_and_modify/find_and_modify4.js b/jstests/core/write/find_and_modify/find_and_modify4.js
index d5b3ae23cb21f..8d23edb62a94c 100644
--- a/jstests/core/write/find_and_modify/find_and_modify4.js
+++ b/jstests/core/write/find_and_modify/find_and_modify4.js
@@ -3,7 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection, requires_fastcount]
-t = db.find_and_modify4;
+let t = db.find_and_modify4;
t.drop();
// this is the best way to build auto-increment
diff --git a/jstests/core/write/find_and_modify/find_and_modify_hint.js b/jstests/core/write/find_and_modify/find_and_modify_hint.js
index 9298e92dd7495..0350000f533fe 100644
--- a/jstests/core/write/find_and_modify/find_and_modify_hint.js
+++ b/jstests/core/write/find_and_modify/find_and_modify_hint.js
@@ -9,10 +9,7 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getPlanStage} from "jstests/libs/analyze_plan.js";
function assertCommandUsesIndex(command, expectedHintKeyPattern) {
const out = assert.commandWorked(coll.runCommand({explain: command}));
@@ -151,5 +148,4 @@ const coll = db.jstests_find_and_modify_hint;
hint: {badHint: 1}
};
assert.commandFailedWithCode(coll.runCommand(famUpdateCmd), ErrorCodes.BadValue);
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/core/write/find_and_modify/find_and_modify_metrics.js b/jstests/core/write/find_and_modify/find_and_modify_metrics.js
index 37ba521ef7350..5a251525c08a0 100644
--- a/jstests/core/write/find_and_modify/find_and_modify_metrics.js
+++ b/jstests/core/write/find_and_modify/find_and_modify_metrics.js
@@ -10,6 +10,9 @@
* # This test contains assertions on the number of executed operations, and tenant migrations
* # passthrough suites automatically retry operations on TenantMigrationAborted errors.
* tenant_migration_incompatible,
+ * # The config fuzzer may run logical session cache refreshes in the background, which modifies
+ * # some serverStatus metrics read in this test.
+ * does_not_support_config_fuzzer,
* ]
*/
(function() {
diff --git a/jstests/core/write/find_and_modify/find_and_modify_pipeline_update.js b/jstests/core/write/find_and_modify/find_and_modify_pipeline_update.js
index 0340d29bd669a..38246e386fc7c 100644
--- a/jstests/core/write/find_and_modify/find_and_modify_pipeline_update.js
+++ b/jstests/core/write/find_and_modify/find_and_modify_pipeline_update.js
@@ -2,11 +2,8 @@
* Tests the pipeline-style update is accepted by the findAndModify command.
* @tags: [requires_non_retryable_writes]
*/
-(function() {
-"use strict";
-
load("jstests/libs/fixture_helpers.js"); // For isMongos.
-load("jstests/libs/analyze_plan.js"); // For planHasStage().
+import {getPlanStage, planHasStage} from "jstests/libs/analyze_plan.js";
const coll = db.find_and_modify_pipeline_update;
coll.drop();
@@ -93,5 +90,4 @@ if (!FixtureHelpers.isMongos(db)) {
let err =
assert.throws(() => coll.findAndModify(
{query: {_id: 1}, update: [{$set: {y: 1}}], arrayFilters: [{"i.x": 4}]}));
-assert.eq(err.code, ErrorCodes.FailedToParse);
-}());
+assert.eq(err.code, ErrorCodes.FailedToParse);
\ No newline at end of file
diff --git a/jstests/core/write/find_and_modify/find_and_modify_server6254.js b/jstests/core/write/find_and_modify/find_and_modify_server6254.js
index 5a0dae9db22c4..2cb527446ba2e 100644
--- a/jstests/core/write/find_and_modify/find_and_modify_server6254.js
+++ b/jstests/core/write/find_and_modify/find_and_modify_server6254.js
@@ -3,11 +3,11 @@
// key.
// @tags: [assumes_unsharded_collection, requires_fastcount]
-t = db.find_and_modify_server6254;
+let t = db.find_and_modify_server6254;
t.drop();
t.insert({x: 1});
-ret = t.findAndModify({query: {x: 1}, update: {$set: {x: 2}}, new: true});
+let ret = t.findAndModify({query: {x: 1}, update: {$set: {x: 2}}, new: true});
assert.eq(2, ret.x, tojson(ret));
assert.eq(1, t.count());
diff --git a/jstests/core/write/find_and_modify/find_and_modify_server6582.js b/jstests/core/write/find_and_modify/find_and_modify_server6582.js
index 7ad8aebee760d..2e5b38af2a23c 100644
--- a/jstests/core/write/find_and_modify/find_and_modify_server6582.js
+++ b/jstests/core/write/find_and_modify/find_and_modify_server6582.js
@@ -3,11 +3,12 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.find_and_modify_server6582;
+let t = db.find_and_modify_server6582;
t.drop();
-x = t.runCommand("findAndModify", {query: {f: 1}, update: {$set: {f: 2}}, upsert: true, new: true});
-le = x.lastErrorObject;
+let x =
+ t.runCommand("findAndModify", {query: {f: 1}, update: {$set: {f: 2}}, upsert: true, new: true});
+let le = x.lastErrorObject;
assert.eq(le.updatedExisting, false);
assert.eq(le.n, 1);
assert.eq(le.upserted, x.value._id);
diff --git a/jstests/core/write/find_and_modify/find_and_modify_server6588.js b/jstests/core/write/find_and_modify/find_and_modify_server6588.js
index 197c892ca67c5..344bb74a2416d 100644
--- a/jstests/core/write/find_and_modify/find_and_modify_server6588.js
+++ b/jstests/core/write/find_and_modify/find_and_modify_server6588.js
@@ -3,25 +3,12 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.find_and_modify_sever6588;
+let t = db.find_and_modify_sever6588;
-initial = {
- _id: 1,
- a: [{b: 1}],
- z: 1
-};
-up = {
- "$set": {"a.$.b": 2}
-};
-q = {
- _id: 1,
- "a.b": 1
-};
-correct = {
- _id: 1,
- a: [{b: 2}],
- z: 1
-};
+let initial = {_id: 1, a: [{b: 1}], z: 1};
+let up = {"$set": {"a.$.b": 2}};
+let q = {_id: 1, "a.b": 1};
+let correct = {_id: 1, a: [{b: 2}], z: 1};
t.drop();
t.insert(initial);
@@ -30,7 +17,7 @@ assert.eq(correct, t.findOne());
t.drop();
t.insert(initial);
-x = t.findAndModify({query: q, update: up});
+let x = t.findAndModify({query: q, update: up});
assert.eq(correct, t.findOne());
t.drop();
diff --git a/jstests/core/write/find_and_modify/find_and_modify_server6659.js b/jstests/core/write/find_and_modify/find_and_modify_server6659.js
index 029a32d6a4988..091c9e93ebcab 100644
--- a/jstests/core/write/find_and_modify/find_and_modify_server6659.js
+++ b/jstests/core/write/find_and_modify/find_and_modify_server6659.js
@@ -3,9 +3,9 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.find_and_modify_server6659;
+let t = db.find_and_modify_server6659;
t.drop();
-x = t.findAndModify({query: {f: 1}, update: {$set: {f: 2}}, upsert: true, new: true});
+let x = t.findAndModify({query: {f: 1}, update: {$set: {f: 2}}, upsert: true, new: true});
assert.eq(2, x.f);
assert.eq(2, t.findOne().f);
diff --git a/jstests/core/write/find_and_modify/find_and_modify_server6909.js b/jstests/core/write/find_and_modify/find_and_modify_server6909.js
index c74e342822bab..24a62a9ba2b4f 100644
--- a/jstests/core/write/find_and_modify/find_and_modify_server6909.js
+++ b/jstests/core/write/find_and_modify/find_and_modify_server6909.js
@@ -3,12 +3,12 @@
// key.
// @tags: [assumes_unsharded_collection]
-c = db.find_and_modify_server6906;
+let c = db.find_and_modify_server6906;
c.drop();
c.insert({_id: 5, a: {b: 1}});
-ret = c.findAndModify({
+let ret = c.findAndModify({
query: {'a.b': 1},
update: {$set: {'a.b': 2}}, // Ensure the query on 'a.b' no longer matches.
new: true
diff --git a/jstests/core/write/find_and_modify/find_and_modify_server6993.js b/jstests/core/write/find_and_modify/find_and_modify_server6993.js
index 4d9b169700d48..eec75a9679b73 100644
--- a/jstests/core/write/find_and_modify/find_and_modify_server6993.js
+++ b/jstests/core/write/find_and_modify/find_and_modify_server6993.js
@@ -3,7 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection]
-c = db.find_and_modify_server6993;
+let c = db.find_and_modify_server6993;
c.drop();
c.insert({a: [1, 2]});
diff --git a/jstests/core/write/find_and_modify/find_and_modify_server7660.js b/jstests/core/write/find_and_modify/find_and_modify_server7660.js
index 7973279ddcf1b..3ec3d715405e0 100644
--- a/jstests/core/write/find_and_modify/find_and_modify_server7660.js
+++ b/jstests/core/write/find_and_modify/find_and_modify_server7660.js
@@ -3,13 +3,13 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.find_and_modify_server7660;
+let t = db.find_and_modify_server7660;
t.drop();
-a = t.findAndModify(
+let a = t.findAndModify(
{query: {foo: 'bar'}, update: {$set: {bob: 'john'}}, sort: {foo: 1}, upsert: true, new: true});
-b = t.findOne();
+let b = t.findOne();
assert.eq(a, b);
assert.eq("bar", a.foo);
assert.eq("john", a.bob);
diff --git a/jstests/core/write/find_and_modify/find_and_modify_where.js b/jstests/core/write/find_and_modify/find_and_modify_where.js
index 62375da7aabe2..6d605f9d23afa 100644
--- a/jstests/core/write/find_and_modify/find_and_modify_where.js
+++ b/jstests/core/write/find_and_modify/find_and_modify_where.js
@@ -8,11 +8,11 @@
// requires_scripting,
// ]
-t = db.find_and_modify_where;
+let t = db.find_and_modify_where;
t.drop();
t.insert({_id: 1, x: 1});
-res = t.findAndModify({query: {$where: "return this.x == 1"}, update: {$set: {y: 1}}});
+let res = t.findAndModify({query: {$where: "return this.x == 1"}, update: {$set: {y: 1}}});
assert.eq(1, t.findOne().y);
diff --git a/jstests/core/write/insert/insert_id_undefined.js b/jstests/core/write/insert/insert_id_undefined.js
index 6d0bc38f8fb45..80873def9c658 100644
--- a/jstests/core/write/insert/insert_id_undefined.js
+++ b/jstests/core/write/insert/insert_id_undefined.js
@@ -3,7 +3,7 @@
// @tags: [assumes_no_implicit_collection_creation_after_drop, requires_fastcount]
// ensure a document with _id undefined cannot be saved
-t = db.insert_id_undefined;
+let t = db.insert_id_undefined;
t.drop();
t.insert({_id: undefined});
assert.eq(t.count(), 0);
diff --git a/jstests/core/write/update/update2.js b/jstests/core/write/update/update2.js
index 080875b50bd23..a026346e35574 100644
--- a/jstests/core/write/update/update2.js
+++ b/jstests/core/write/update/update2.js
@@ -3,7 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection]
-f = db.ed_db_update2;
+let f = db.ed_db_update2;
f.drop();
f.save({a: 4});
diff --git a/jstests/core/write/update/update3.js b/jstests/core/write/update/update3.js
index 5a61b8bcfc956..884a65e9e0eef 100644
--- a/jstests/core/write/update/update3.js
+++ b/jstests/core/write/update/update3.js
@@ -5,7 +5,7 @@
// Update with mods corner cases.
-f = db.jstests_update3;
+let f = db.jstests_update3;
f.drop();
f.save({a: 1});
@@ -30,4 +30,4 @@ assert.eq(0, f.findOne()._id, "D");
f.drop();
f.save({_id: 1, a: 1});
f.update({}, {$unset: {"a": 1, "b.c": 1}});
-assert.docEq({_id: 1}, f.findOne(), "E");
\ No newline at end of file
+assert.docEq({_id: 1}, f.findOne(), "E");
diff --git a/jstests/core/write/update/update5.js b/jstests/core/write/update/update5.js
index fafc0d72ce08f..2bdcaea1ef2bc 100644
--- a/jstests/core/write/update/update5.js
+++ b/jstests/core/write/update/update5.js
@@ -4,7 +4,7 @@
//
// @tags: [assumes_unsharded_collection, requires_fastcount]
-t = db.update5;
+let t = db.update5;
function go(key) {
t.drop();
@@ -24,7 +24,7 @@ function go(key) {
check(3, "C");
var ik = {};
- for (k in key)
+ for (let k in key)
ik[k] = 1;
t.createIndex(ik);
diff --git a/jstests/core/write/update/update6.js b/jstests/core/write/update/update6.js
index 8a1950b8d02ec..7704e921151bb 100644
--- a/jstests/core/write/update/update6.js
+++ b/jstests/core/write/update/update6.js
@@ -5,7 +5,7 @@
// assumes_unsharded_collection,
// ]
-t = db.update6;
+let t = db.update6;
t.drop();
t.save({a: 1, b: {c: 1, d: 1}});
diff --git a/jstests/core/write/update/update7.js b/jstests/core/write/update/update7.js
index d3a7a5d1debeb..a7fcbbfddf05c 100644
--- a/jstests/core/write/update/update7.js
+++ b/jstests/core/write/update/update7.js
@@ -3,7 +3,7 @@
// key.
// @tags: [assumes_unsharded_collection, requires_multi_updates, requires_non_retryable_writes]
-t = db.update7;
+let t = db.update7;
t.drop();
function s() {
@@ -119,6 +119,7 @@ assert.eq("4,7,", s(), "E1");
t.update({}, {$inc: {x: 1}}, false, true);
assert.eq("5,8,1", s(), "E2");
+let i;
for (i = 4; i < 8; i++)
t.save({_id: i});
t.save({_id: i, x: 1});
diff --git a/jstests/core/write/update/update8.js b/jstests/core/write/update/update8.js
index 596bc8695ddc7..7db54242cf399 100644
--- a/jstests/core/write/update/update8.js
+++ b/jstests/core/write/update/update8.js
@@ -1,5 +1,5 @@
-t = db.update8;
+let t = db.update8;
t.drop();
t.update({_id: 1, tags: {"$ne": "a"}}, {"$push": {tags: "a"}}, true);
diff --git a/jstests/core/write/update/update9.js b/jstests/core/write/update/update9.js
index d119681a09e45..e7f9da8ddb601 100644
--- a/jstests/core/write/update/update9.js
+++ b/jstests/core/write/update/update9.js
@@ -1,8 +1,8 @@
-t = db.update9;
+let t = db.update9;
t.drop();
-orig = {
+let orig = {
"_id": 1,
"question": "a",
"choices": {"1": {"choice": "b"}, "0": {"choice": "c"}},
diff --git a/jstests/core/write/update/update_addToSet2.js b/jstests/core/write/update/update_addToSet2.js
index 44ba8bce671e8..2aabdb3078c95 100644
--- a/jstests/core/write/update/update_addToSet2.js
+++ b/jstests/core/write/update/update_addToSet2.js
@@ -1,10 +1,8 @@
-t = db.update_addToSet2;
+let t = db.update_addToSet2;
t.drop();
-o = {
- _id: 1
-};
+let o = {_id: 1};
t.insert({_id: 1});
t.update({}, {$addToSet: {'kids': {'name': 'Bob', 'age': '4'}}});
diff --git a/jstests/core/write/update/update_addToSet3.js b/jstests/core/write/update/update_addToSet3.js
index efd682cef4cf6..ee13b651233b5 100644
--- a/jstests/core/write/update/update_addToSet3.js
+++ b/jstests/core/write/update/update_addToSet3.js
@@ -1,6 +1,6 @@
// Test the use of $each in $addToSet
-t = db.update_addToSet3;
+let t = db.update_addToSet3;
t.drop();
t.insert({_id: 1});
diff --git a/jstests/core/write/update/update_arraymatch1.js b/jstests/core/write/update/update_arraymatch1.js
index 10b7e37e45127..dd88af731aa6b 100644
--- a/jstests/core/write/update/update_arraymatch1.js
+++ b/jstests/core/write/update/update_arraymatch1.js
@@ -3,19 +3,14 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.update_arraymatch1;
+let t = db.update_arraymatch1;
t.drop();
-o = {
- _id: 1,
- a: [{x: 1, y: 1}, {x: 2, y: 2}, {x: 3, y: 3}]
-};
+let o = {_id: 1, a: [{x: 1, y: 1}, {x: 2, y: 2}, {x: 3, y: 3}]};
t.insert(o);
assert.eq(o, t.findOne(), "A1");
-q = {
- "a.x": 2
-};
+let q = {"a.x": 2};
t.update(q, {$set: {b: 5}});
o.b = 5;
assert.eq(o, t.findOne(), "A2");
diff --git a/jstests/core/write/update/update_arraymatch2.js b/jstests/core/write/update/update_arraymatch2.js
index 7610de7c96202..5ec6be9e3cf03 100644
--- a/jstests/core/write/update/update_arraymatch2.js
+++ b/jstests/core/write/update/update_arraymatch2.js
@@ -1,6 +1,6 @@
// @tags: [requires_multi_updates, requires_non_retryable_writes]
-t = db.update_arraymatch2;
+let t = db.update_arraymatch2;
t.drop();
t.insert({});
diff --git a/jstests/core/write/update/update_arraymatch3.js b/jstests/core/write/update/update_arraymatch3.js
index 36f7ab22430eb..e16a518fd80a8 100644
--- a/jstests/core/write/update/update_arraymatch3.js
+++ b/jstests/core/write/update/update_arraymatch3.js
@@ -1,13 +1,9 @@
// @tags: [requires_multi_updates, requires_non_retryable_writes]
-t = db.update_arraymatch3;
+let t = db.update_arraymatch3;
t.drop();
-o = {
- _id: 1,
- title: "ABC",
- comments: [{"by": "joe", "votes": 3}, {"by": "jane", "votes": 7}]
-};
+let o = {_id: 1, title: "ABC", comments: [{"by": "joe", "votes": 3}, {"by": "jane", "votes": 7}]};
t.save(o);
assert.eq(o, t.findOne(), "A1");
diff --git a/jstests/core/write/update/update_arraymatch4.js b/jstests/core/write/update/update_arraymatch4.js
index 3c087e53ca5ad..6c8f378fad196 100644
--- a/jstests/core/write/update/update_arraymatch4.js
+++ b/jstests/core/write/update/update_arraymatch4.js
@@ -3,13 +3,10 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.update_arraymatch4;
+let t = db.update_arraymatch4;
t.drop();
-x = {
- _id: 1,
- arr: ["A1", "B1", "C1"]
-};
+let x = {_id: 1, arr: ["A1", "B1", "C1"]};
t.insert(x);
assert.eq(x, t.findOne(), "A1");
diff --git a/jstests/core/write/update/update_arraymatch5.js b/jstests/core/write/update/update_arraymatch5.js
index 1b4c967b38b7d..0d676f3e4e51e 100644
--- a/jstests/core/write/update/update_arraymatch5.js
+++ b/jstests/core/write/update/update_arraymatch5.js
@@ -4,7 +4,7 @@
// requires_non_retryable_writes,
// ]
-t = db.update_arraymatch5;
+let t = db.update_arraymatch5;
t.drop();
t.insert({abc: {visible: true}, testarray: [{foobar_id: 316, visible: true, xxx: 1}]});
diff --git a/jstests/core/write/update/update_arraymatch6.js b/jstests/core/write/update/update_arraymatch6.js
index 1241753b86694..16563e8d3e5f4 100644
--- a/jstests/core/write/update/update_arraymatch6.js
+++ b/jstests/core/write/update/update_arraymatch6.js
@@ -4,7 +4,7 @@
// @tags: [assumes_unsharded_collection]
var res;
-t = db.jstests_update_arraymatch6;
+let t = db.jstests_update_arraymatch6;
t.drop();
function doTest() {
@@ -17,4 +17,4 @@ function doTest() {
doTest();
t.drop();
t.createIndex({'a.id': 1});
-doTest();
\ No newline at end of file
+doTest();
diff --git a/jstests/core/write/update/update_arraymatch7.js b/jstests/core/write/update/update_arraymatch7.js
index cded4ba56f493..552c8469dd19e 100644
--- a/jstests/core/write/update/update_arraymatch7.js
+++ b/jstests/core/write/update/update_arraymatch7.js
@@ -6,7 +6,7 @@
// Check that the positional operator works properly when an index only match is used for the update
// query spec. SERVER-5067
-t = db.jstests_update_arraymatch7;
+let t = db.jstests_update_arraymatch7;
t.drop();
function testPositionalInc() {
diff --git a/jstests/core/write/update/update_arraymatch8.js b/jstests/core/write/update/update_arraymatch8.js
index e3aa91d642292..2f5365352e270 100644
--- a/jstests/core/write/update/update_arraymatch8.js
+++ b/jstests/core/write/update/update_arraymatch8.js
@@ -9,7 +9,7 @@
// SERVER-7511
// array.$.name
-t = db.jstests_update_arraymatch8;
+let t = db.jstests_update_arraymatch8;
t.drop();
t.createIndex({'array.name': 1});
t.insert({'array': [{'name': 'old'}]});
@@ -51,7 +51,7 @@ assert(!t.findOne({'array.name': 'old'}));
// // array.12.name
t = db.jstests_update_arraymatch8;
t.drop();
-arr = new Array();
+let arr = new Array();
for (var i = 0; i < 20; i++) {
arr.push({'name': 'old'});
}
diff --git a/jstests/core/write/update/update_blank1.js b/jstests/core/write/update/update_blank1.js
index cd8f7433ebeb5..0e643f890b0e9 100644
--- a/jstests/core/write/update/update_blank1.js
+++ b/jstests/core/write/update/update_blank1.js
@@ -3,15 +3,10 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.update_blank1;
+let t = db.update_blank1;
t.drop();
-orig = {
- "": 1,
- _id: 2,
- "a": 3,
- "b": 4
-};
+let orig = {"": 1, _id: 2, "a": 3, "b": 4};
t.insert(orig);
var res = t.update({}, {$set: {"c": 5}});
print(res);
diff --git a/jstests/core/write/update/update_dbref.js b/jstests/core/write/update/update_dbref.js
index f3e461c737927..caaff55cc9174 100644
--- a/jstests/core/write/update/update_dbref.js
+++ b/jstests/core/write/update/update_dbref.js
@@ -8,7 +8,7 @@
// Test that we can update DBRefs, but not dbref fields outside a DBRef
var res;
-t = db.jstests_update_dbref;
+let t = db.jstests_update_dbref;
t.drop();
res = t.save({_id: 1, a: new DBRef("a", "b")});
diff --git a/jstests/core/write/update/update_hint.js b/jstests/core/write/update/update_hint.js
index f0869073de0da..a834fd0bb469a 100644
--- a/jstests/core/write/update/update_hint.js
+++ b/jstests/core/write/update/update_hint.js
@@ -7,10 +7,7 @@
* @tags: [assumes_unsharded_collection, requires_multi_updates, requires_non_retryable_writes]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getPlanStage} from "jstests/libs/analyze_plan.js";
function assertCommandUsesIndex(command, expectedHintKeyPattern) {
const out = assert.commandWorked(coll.runCommand({explain: command}));
@@ -162,5 +159,4 @@ function failedHintTest() {
normalIndexTest();
sparseIndexTest();
shellHelpersTest();
-failedHintTest();
-})();
+failedHintTest();
\ No newline at end of file
diff --git a/jstests/core/write/update/update_invalid1.js b/jstests/core/write/update/update_invalid1.js
index bbda4cee53e94..7276d33e5ef58 100644
--- a/jstests/core/write/update/update_invalid1.js
+++ b/jstests/core/write/update/update_invalid1.js
@@ -2,7 +2,7 @@
// requires_fastcount,
// ]
-t = db.update_invalid1;
+let t = db.update_invalid1;
t.drop();
t.update({_id: 5}, {$set: {$inc: {x: 5}}}, true);
diff --git a/jstests/core/write/update/update_metrics.js b/jstests/core/write/update/update_metrics.js
index 8bf93e5009d95..a5e3938d47b2c 100644
--- a/jstests/core/write/update/update_metrics.js
+++ b/jstests/core/write/update/update_metrics.js
@@ -5,6 +5,9 @@
* @tags: [
* # The test is designed to work with an unsharded collection.
* assumes_unsharded_collection,
+ * # The config fuzzer may run logical session cache refreshes in the background, which modifies
+ * # some serverStatus metrics read in this test.
+ * does_not_support_config_fuzzer,
* # The test relies on the precise number of executions of commands.
* requires_non_retryable_writes,
* # This test contains assertions on the number of executed operations, and tenant migrations
diff --git a/jstests/core/write/update/update_multi3.js b/jstests/core/write/update/update_multi3.js
index 4c6769bc65a9e..463d4192ad97d 100644
--- a/jstests/core/write/update/update_multi3.js
+++ b/jstests/core/write/update/update_multi3.js
@@ -1,6 +1,6 @@
// @tags: [requires_multi_updates, requires_non_retryable_writes]
-t = db.update_multi3;
+let t = db.update_multi3;
function test(useIndex) {
t.drop();
@@ -8,7 +8,7 @@ function test(useIndex) {
if (useIndex)
t.createIndex({k: 1});
- for (i = 0; i < 10; i++) {
+ for (let i = 0; i < 10; i++) {
t.save({_id: i, k: 'x', a: []});
}
diff --git a/jstests/core/write/update/update_multi4.js b/jstests/core/write/update/update_multi4.js
index cfe11616efc46..d0e868ee5ddd8 100644
--- a/jstests/core/write/update/update_multi4.js
+++ b/jstests/core/write/update/update_multi4.js
@@ -1,9 +1,9 @@
// @tags: [requires_multi_updates, requires_non_retryable_writes]
-t = db.update_mulit4;
+let t = db.update_mulit4;
t.drop();
-for (i = 0; i < 1000; i++) {
+for (let i = 0; i < 1000; i++) {
t.insert({_id: i, k: i % 12, v: "v" + i % 12});
}
diff --git a/jstests/core/write/update/update_multi6.js b/jstests/core/write/update/update_multi6.js
index 023398534cdc0..217da4c002d34 100644
--- a/jstests/core/write/update/update_multi6.js
+++ b/jstests/core/write/update/update_multi6.js
@@ -2,7 +2,7 @@
var res;
-t = db.update_multi6;
+let t = db.update_multi6;
t.drop();
t.update({_id: 1}, {_id: 1, x: 1, y: 2}, true, false);
diff --git a/jstests/core/write/update/update_pipeline_shell_helpers.js b/jstests/core/write/update/update_pipeline_shell_helpers.js
index 0ac1b35f4ce6e..d22abfdc0aa60 100644
--- a/jstests/core/write/update/update_pipeline_shell_helpers.js
+++ b/jstests/core/write/update/update_pipeline_shell_helpers.js
@@ -7,12 +7,9 @@
* requires_non_retryable_writes,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For 'arrayEq'.
-load("jstests/libs/analyze_plan.js"); // For planHasStage().
-load("jstests/libs/fixture_helpers.js"); // For isMongos().
+import {getPlanStage, planHasStage} from "jstests/libs/analyze_plan.js";
+load("jstests/libs/fixture_helpers.js"); // For isMongos().
// Make sure that the test collection is empty before starting the test.
const testColl = db.update_pipeline_shell_helpers_test;
@@ -129,5 +126,4 @@ if (!FixtureHelpers.isMongos(db)) {
assert.throws(() => testColl.replaceOne({_id: 1}, [{$replaceWith: {}}]));
assert.throws(() => testColl.findOneAndReplace({_id: 1}, [{$replaceWith: {}}]));
assert.throws(() => testColl.bulkWrite(
- [{replaceOne: {filter: {_id: 1}, replacement: [{$replaceWith: {}}]}}]));
-})();
+ [{replaceOne: {filter: {_id: 1}, replacement: [{$replaceWith: {}}]}}]));
\ No newline at end of file
diff --git a/jstests/core/write/update/update_setOnInsert.js b/jstests/core/write/update/update_setOnInsert.js
index 9457c69f325d7..6e3f757660fa3 100644
--- a/jstests/core/write/update/update_setOnInsert.js
+++ b/jstests/core/write/update/update_setOnInsert.js
@@ -1,5 +1,5 @@
// This tests that $setOnInsert works and allow setting the _id
-t = db.update_setOnInsert;
+let t = db.update_setOnInsert;
var res;
function dotest(useIndex) {
diff --git a/jstests/core/write/update/update_with_large_hint.js b/jstests/core/write/update/update_with_large_hint.js
index 0b2521337cecf..d812954130d2b 100644
--- a/jstests/core/write/update/update_with_large_hint.js
+++ b/jstests/core/write/update/update_with_large_hint.js
@@ -33,4 +33,4 @@ assert.commandFailedWithCode(coll.runCommand("delete", {
deletes: [{q: {_id: 0}, limit: 1, hint: {[longHint]: 1}}],
}),
ErrorCodes.BadValue);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/core/write/update/updatea.js b/jstests/core/write/update/updatea.js
index 99938c433fa15..23dda37054b81 100644
--- a/jstests/core/write/update/updatea.js
+++ b/jstests/core/write/update/updatea.js
@@ -4,13 +4,10 @@
// @tags: [assumes_unsharded_collection]
var res;
-t = db.updatea;
+let t = db.updatea;
t.drop();
-orig = {
- _id: 1,
- a: [{x: 1, y: 2}, {x: 10, y: 11}]
-};
+let orig = {_id: 1, a: [{x: 1, y: 2}, {x: 10, y: 11}]};
res = t.save(orig);
assert.commandWorked(res);
@@ -52,7 +49,7 @@ orig = {
_id: 1,
a: []
};
-for (i = 0; i < 12; i++)
+for (let i = 0; i < 12; i++)
orig.a.push(i);
res = t.save(orig);
diff --git a/jstests/core/write/update/updateb.js b/jstests/core/write/update/updateb.js
index 1518e7f354637..cea484a27329d 100644
--- a/jstests/core/write/update/updateb.js
+++ b/jstests/core/write/update/updateb.js
@@ -3,15 +3,12 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.updateb;
+let t = db.updateb;
t.drop();
t.update({"x.y": 2}, {$inc: {a: 7}}, true);
-correct = {
- a: 7,
- x: {y: 2}
-};
-got = t.findOne();
+let correct = {a: 7, x: {y: 2}};
+let got = t.findOne();
delete got._id;
assert.docEq(correct, got, "A");
diff --git a/jstests/core/write/update/updatec.js b/jstests/core/write/update/updatec.js
index 8ce8cf4ecdd69..e47ed3ff797c8 100644
--- a/jstests/core/write/update/updatec.js
+++ b/jstests/core/write/update/updatec.js
@@ -1,5 +1,5 @@
-t = db.updatec;
+let t = db.updatec;
t.drop();
t.update({"_id": 123}, {$set: {"v": {"i": 123, "a": 456}}, $push: {"f": 234}}, 1, 0);
diff --git a/jstests/core/write/update/updated.js b/jstests/core/write/update/updated.js
index 919d02610c7e6..d850181afb5a0 100644
--- a/jstests/core/write/update/updated.js
+++ b/jstests/core/write/update/updated.js
@@ -3,13 +3,10 @@
// key.
// @tags: [assumes_unsharded_collection]
-t = db.updated;
+let t = db.updated;
t.drop();
-o = {
- _id: Math.random(),
- items: [null, null, null, null]
-};
+let o = {_id: Math.random(), items: [null, null, null, null]};
t.insert(o);
assert.docEq(o, t.findOne(), "A1");
diff --git a/jstests/core/write/update/updatee.js b/jstests/core/write/update/updatee.js
index fbbcac01c9a96..ed2c3a0d81c55 100644
--- a/jstests/core/write/update/updatee.js
+++ b/jstests/core/write/update/updatee.js
@@ -5,7 +5,7 @@
// big numeric updates (used to overflow)
-t = db.updatee;
+let t = db.updatee;
t.drop();
var o = {
diff --git a/jstests/core/write/update/updatef.js b/jstests/core/write/update/updatef.js
index 6597484f78aa8..3a55a38d95495 100644
--- a/jstests/core/write/update/updatef.js
+++ b/jstests/core/write/update/updatef.js
@@ -7,24 +7,24 @@
// Test unsafe management of nsdt on update command yield SERVER-3208
-prefixNS = db.jstests_updatef;
+let prefixNS = db.jstests_updatef;
prefixNS.save({});
-t = db.jstests_updatef_actual;
+let t = db.jstests_updatef_actual;
t.drop();
t.save({a: 0, b: []});
-for (i = 0; i < 1000; ++i) {
+for (let i = 0; i < 1000; ++i) {
t.save({a: 100});
}
t.save({a: 0, b: []});
// Repeatedly rename jstests_updatef to jstests_updatef_ and back. This will
// invalidate the jstests_updatef_actual NamespaceDetailsTransient object.
-s = startParallelShell(
+let s = startParallelShell(
"for( i=0; i < 100; ++i ) { db.jstests_updatef.renameCollection( 'jstests_updatef_' ); db.jstests_updatef_.renameCollection( 'jstests_updatef' ); }");
-for (i = 0; i < 20; ++i) {
+for (let i = 0; i < 20; ++i) {
t.update({a: 0}, {$push: {b: i}}, false, true);
}
diff --git a/jstests/core/write/update/updateg.js b/jstests/core/write/update/updateg.js
index 8a849a0ce5939..b014547b3e2a0 100644
--- a/jstests/core/write/update/updateg.js
+++ b/jstests/core/write/update/updateg.js
@@ -5,7 +5,7 @@
// SERVER-3370 check modifiers with field name characters comparing less than '.' character.
-t = db.jstests_updateg;
+let t = db.jstests_updateg;
t.drop();
t.update({}, {'$inc': {'all.t': 1, 'all-copy.t': 1}}, true);
diff --git a/jstests/core/write/update/updateh.js b/jstests/core/write/update/updateh.js
index 72d0d22c616e6..bafebc08ed77e 100644
--- a/jstests/core/write/update/updateh.js
+++ b/jstests/core/write/update/updateh.js
@@ -7,7 +7,7 @@
var res;
-t = db.jstest_updateh;
+let t = db.jstest_updateh;
t.drop();
t.insert({x: 1});
diff --git a/jstests/core/write/update/updatei.js b/jstests/core/write/update/updatei.js
index 599c9538a80f7..c5b7875df51bf 100644
--- a/jstests/core/write/update/updatei.js
+++ b/jstests/core/write/update/updatei.js
@@ -5,13 +5,13 @@
// Test new (optional) update syntax
// SERVER-4176
-t = db.updatei;
+let t = db.updatei;
// Using a multi update
t.drop();
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
t.save({_id: i, k: "x", a: []});
}
@@ -24,7 +24,7 @@ t.drop();
// Using a single update
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
t.save({_id: i, k: "x", a: []});
}
@@ -35,7 +35,7 @@ t.drop();
// Using upsert, found
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
t.save({_id: i, k: "x", a: []});
}
@@ -46,7 +46,7 @@ t.drop();
// Using upsert + multi, found
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
t.save({_id: i, k: "x", a: []});
}
@@ -59,7 +59,7 @@ t.drop();
// Using upsert, not found
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
t.save({_id: i, k: "x", a: []});
}
@@ -70,7 +70,7 @@ t.drop();
// Without upsert, found
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
t.save({_id: i, k: "x", a: []});
}
@@ -81,7 +81,7 @@ t.drop();
// Without upsert, not found
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
t.save({_id: i, k: "x", a: []});
}
diff --git a/jstests/core/write/update/updatej.js b/jstests/core/write/update/updatej.js
index bab2a32f45fc2..77c5f52cbcf3d 100644
--- a/jstests/core/write/update/updatej.js
+++ b/jstests/core/write/update/updatej.js
@@ -5,7 +5,7 @@
// encounters an error.
// @tags: [requires_multi_updates, requires_non_retryable_writes, assumes_unsharded_collection]
-t = db.jstests_updatej;
+let t = db.jstests_updatej;
t.drop();
t.save({a: []});
diff --git a/jstests/core/write/update/updatek.js b/jstests/core/write/update/updatek.js
index 923b4145d1d7d..4ef4fe8ddb982 100644
--- a/jstests/core/write/update/updatek.js
+++ b/jstests/core/write/update/updatek.js
@@ -5,7 +5,7 @@
// Test modifier operations on numerically equivalent string field names. SERVER-4776
-t = db.jstests_updatek;
+let t = db.jstests_updatek;
t.drop();
t.save({_id: 0, '1': {}, '01': {}});
diff --git a/jstests/core/write/update/updatel.js b/jstests/core/write/update/updatel.js
index a663f30672214..332fad0c6ece8 100644
--- a/jstests/core/write/update/updatel.js
+++ b/jstests/core/write/update/updatel.js
@@ -8,7 +8,7 @@
// setinel ('$'), the update fails with an error. SERVER-6669 SERVER-4713
var res;
-t = db.jstests_updatel;
+let t = db.jstests_updatel;
t.drop();
// The collection is empty, forcing an upsert. In this case the query has no array position match
diff --git a/jstests/core/write/update/updatem.js b/jstests/core/write/update/updatem.js
index 8e4af7e56c741..0dd0fcba0140e 100644
--- a/jstests/core/write/update/updatem.js
+++ b/jstests/core/write/update/updatem.js
@@ -7,7 +7,7 @@
// Tests that _id will exist in all updated docs.
-t = db.jstests_updatem;
+let t = db.jstests_updatem;
t.drop();
// new _id from insert (upsert:true)
diff --git a/jstests/core/write/update/upsert_and.js b/jstests/core/write/update/upsert_and.js
index 1e45cbe8dc20b..c2bab3ab25cf3 100644
--- a/jstests/core/write/update/upsert_and.js
+++ b/jstests/core/write/update/upsert_and.js
@@ -5,7 +5,7 @@
// tests to ensure fields in $and conditions are created when using the query to do upsert
var res;
-coll = db.upsert4;
+let coll = db.upsert4;
coll.drop();
res = coll.update({_id: 1, $and: [{c: 1}, {d: 1}], a: 12}, {$inc: {y: 1}}, true);
diff --git a/jstests/core/write/update/upsert_fields.js b/jstests/core/write/update/upsert_fields.js
index 310bace490744..0433b15048bd2 100644
--- a/jstests/core/write/update/upsert_fields.js
+++ b/jstests/core/write/update/upsert_fields.js
@@ -12,7 +12,7 @@ coll.drop();
var upsertedResult = function(query, expr) {
coll.drop();
- result = coll.update(query, expr, {upsert: true});
+ let result = coll.update(query, expr, {upsert: true});
return result;
};
diff --git a/jstests/core/write/update/upsert_shell.js b/jstests/core/write/update/upsert_shell.js
index 3ab07b50c21b3..252a0f4139a55 100644
--- a/jstests/core/write/update/upsert_shell.js
+++ b/jstests/core/write/update/upsert_shell.js
@@ -5,7 +5,7 @@
// tests to make sure that the new _id is returned after the insert in the shell
var l;
-t = db.upsert1;
+let t = db.upsert1;
t.drop();
// make sure the new _id is returned when $mods are used
diff --git a/jstests/core/write/validate_user_documents.js b/jstests/core/write/validate_user_documents.js
index 2a30ed0fea8c9..65aa7cde07ea7 100644
--- a/jstests/core/write/validate_user_documents.js
+++ b/jstests/core/write/validate_user_documents.js
@@ -10,7 +10,7 @@
// Ensure that inserts and updates of the system.users collection validate the schema of inserted
// documents.
-mydb = db.getSiblingDB("validate_user_documents");
+let mydb = db.getSiblingDB("validate_user_documents");
function assertGLEOK(status) {
assert(status.ok && status.err === null, "Expected OK status object; found " + tojson(status));
diff --git a/jstests/cqf/analyze/array_histogram.js b/jstests/cqf/analyze/array_histogram.js
index e242277c5005b..4e854983cd759 100644
--- a/jstests/cqf/analyze/array_histogram.js
+++ b/jstests/cqf/analyze/array_histogram.js
@@ -1,10 +1,12 @@
/**
* This test verifies array histograms are both generated and estimated correctly.
*/
-(function() {
-"use strict";
-
-load('jstests/libs/ce_stats_utils.js');
+import {
+ createAndValidateHistogram,
+ runHistogramsTest,
+ verifyCEForMatch
+} from "jstests/libs/ce_stats_utils.js";
+import {forceCE} from "jstests/libs/optimizer_utils.js";
runHistogramsTest(function verifyArrayHistograms() {
const coll = db.array_histogram;
@@ -283,4 +285,3 @@ runHistogramsTest(function verifyArrayHistograms() {
hint: idx
});
});
-}());
diff --git a/jstests/cqf/analyze/ce_histogram.js b/jstests/cqf/analyze/ce_histogram.js
index 47e86d7b1d010..2f45ecb5743c2 100644
--- a/jstests/cqf/analyze/ce_histogram.js
+++ b/jstests/cqf/analyze/ce_histogram.js
@@ -10,10 +10,12 @@
* change as a result of updates to estimation, since estimates for bucket boundaries should always
* be accurate.
*/
-(function() {
-"use strict";
-
-load('jstests/libs/ce_stats_utils.js');
+import {
+ createAndValidateHistogram,
+ runHistogramsTest,
+ verifyCEForMatch
+} from "jstests/libs/ce_stats_utils.js";
+import {forceCE} from "jstests/libs/optimizer_utils.js";
const charCodeA = 65;
const collName = "ce_histogram";
@@ -253,4 +255,3 @@ runHistogramsTest(function testScalarHistograms() {
verifyCEForNDV(3);
verifyCEForNDV(4);
});
-}());
diff --git a/jstests/cqf/analyze/ce_sample_rate.js b/jstests/cqf/analyze/ce_sample_rate.js
index 4af4c6fc1c074..6180266a0e4d9 100644
--- a/jstests/cqf/analyze/ce_sample_rate.js
+++ b/jstests/cqf/analyze/ce_sample_rate.js
@@ -2,10 +2,14 @@
* This is an integration test for histogram CE & statistics to ensure that we can estimate a
* histogram appropriately for different sample sizes.
*/
-(function() {
-"use strict";
-
-load('jstests/libs/ce_stats_utils.js');
+import {
+ assertApproxEq,
+ createAndValidateHistogram,
+ createHistogram,
+ getRootCE,
+ runHistogramsTest
+} from "jstests/libs/ce_stats_utils.js";
+import {forceCE, round2} from "jstests/libs/optimizer_utils.js";
const field = "sampled";
const numDocs = 1000;
@@ -135,4 +139,3 @@ runHistogramsTest(function testSampleRates() {
testSampleRatesForDocsWithPredicates(docs, expectedEst, expectedHistogram, sampleRates);
}
});
-})();
diff --git a/jstests/cqf/analyze/missing_histogram.js b/jstests/cqf/analyze/missing_histogram.js
index 57d16836883cd..0f17cd4e8de56 100644
--- a/jstests/cqf/analyze/missing_histogram.js
+++ b/jstests/cqf/analyze/missing_histogram.js
@@ -2,10 +2,12 @@
* This test verifies that we gracefully handle the case where we do not have statistics or a
* histogram available for a given path. It also tests empty collections are handled appropriately.
*/
-(function() {
-"use strict";
-
-load('jstests/libs/ce_stats_utils.js');
+import {
+ createAndValidateHistogram,
+ runHistogramsTest,
+ verifyCEForMatch
+} from "jstests/libs/ce_stats_utils.js";
+import {forceCE} from "jstests/libs/optimizer_utils.js";
runHistogramsTest(function testEmptyAndMissingHistograms() {
const emptyColl = db.missing_histogram_empty;
@@ -45,4 +47,3 @@ runHistogramsTest(function testEmptyAndMissingHistograms() {
hint: {notAField: 1},
});
});
-}());
diff --git a/jstests/cqf/analyze/scalar_histograms.js b/jstests/cqf/analyze/scalar_histograms.js
index 83dfa7c0a42b3..d6b73fe7977a8 100644
--- a/jstests/cqf/analyze/scalar_histograms.js
+++ b/jstests/cqf/analyze/scalar_histograms.js
@@ -1,15 +1,12 @@
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
assert.commandWorked(
- db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"}));
+ db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"}));
const coll = db.cqf_analyze_scalar_hist;
coll.drop();
@@ -58,4 +55,3 @@ testAnalyzeStats("c", docs, 37);
assert.commandWorked(
db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "forceBonsai"}));
-}());
diff --git a/jstests/cqf/analyze/type_counts.js b/jstests/cqf/analyze/type_counts.js
index 2a20163e52d53..964c7a36a2157 100644
--- a/jstests/cqf/analyze/type_counts.js
+++ b/jstests/cqf/analyze/type_counts.js
@@ -3,10 +3,17 @@
* histogram with appropriate type counts and retrieve that histogram to estimate a simple match
* predicate. Note that this tests predicates and histograms on several types.
*/
-(function() {
-"use strict";
-
-load('jstests/libs/ce_stats_utils.js');
+import {
+ createAndValidateHistogram,
+ runHistogramsTest,
+ verifyCEForMatch,
+ verifyCEForMatchNodes,
+} from "jstests/libs/ce_stats_utils.js";
+import {
+ extractLogicalCEFromNode,
+ forceCE,
+ navigateToPlanPath
+} from "jstests/libs/optimizer_utils.js";
runHistogramsTest(function testTypeCounts() {
const coll = db.type_counts;
@@ -871,4 +878,3 @@ runHistogramsTest(function testTypeCounts() {
expected: []
});
});
-}());
diff --git a/jstests/cqf/disjunction.js b/jstests/cqf/disjunction.js
deleted file mode 100644
index fa265d8c3f8db..0000000000000
--- a/jstests/cqf/disjunction.js
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Test that $or is translated to a SargableNode, and executed with correct results.
- */
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
-if (!checkCascadesOptimizerEnabled(db)) {
- jsTestLog("Skipping test because the optimizer is not enabled");
- return;
-}
-
-const coll = db.cqf_disjunction;
-coll.drop();
-
-let docs = [];
-for (let i = 0; i < 10; ++i) {
- // Generate enough documents for an index to be preferable.
- for (let a = 0; a < 10; ++a) {
- for (let b = 0; b < 10; ++b) {
- docs.push({a, b});
- }
- }
-}
-assert.commandWorked(coll.insert(docs));
-
-let result = coll.find({$or: [{a: 2}, {b: 3}]}).toArray();
-assert.eq(result.length, 190, result);
-for (const doc of result) {
- assert(doc.a === 2 || doc.b === 3, "Query returned a doc not matching the predicate: ${doc}");
-}
-
-assert.commandWorked(coll.createIndexes([
- {a: 1},
- {b: 1},
-]));
-
-result = coll.find({$or: [{a: 2}, {b: 3}]}).toArray();
-assert.eq(result.length, 190, result);
-for (const doc of result) {
- assert(doc.a === 2 || doc.b === 3, "Query returned a doc not matching the predicate: ${doc}");
-}
-
-// At time of writing, queries that compare to literal array or MinKey/MaxKey are translated to
-// an ABT with a disjunction in it.
-result = coll.find({arr: {$eq: [2]}}).toArray();
-assert.eq(result.length, 0, result);
-
-result = coll.find({arr: {$gt: MinKey()}}).toArray();
-assert.eq(result.length, docs.length, result);
-
-// Test a nested or/and where one leaf predicate ($exists) cannot be fully satisfied with index
-// bounds.
-result = coll.find({
- $or: [
- // 'b' exists on every doc so this should never happen.
- {a: 5, b: {$exists: false}},
- // The field 'nope' doesn't exist so this also shouldn't happen.
- {nope: 'nope'},
- ]
- })
- .toArray();
-assert.eq(result.length, 0, result);
-
-// Test that adding an $or predicate doesn't inhibit the use of index scan for other predicates.
-// The $or can just be a residual predicate.
-{
- const res = runWithParams(
- [
- {key: 'internalCascadesOptimizerExplainVersion', value: "v2"},
- {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true}
- ],
- () => coll.explain("executionStats")
- .find({a: 2, $or: [{b: 2}, {no_such_field: 123}]})
- .finish());
- assert.eq(10, res.executionStats.nReturned);
-
- // We get an index scan on 'a' and some expression for the $or.
- const expectedStr =
- `Root [{scan_0}]
-Filter []
-| BinaryOp [Or]
-| | EvalFilter []
-| | | Variable [scan_0]
-| | PathGet [no_such_field]
-| | PathTraverse [1]
-| | PathCompare [Eq]
-| | Const [123]
-| EvalFilter []
-| | Variable [scan_0]
-| PathGet [b]
-| PathCompare [Eq]
-| Const [2]
-NestedLoopJoin [joinType: Inner, {rid_1}]
-| | Const [true]
-| LimitSkip [limit: 1, skip: 0]
-| Seek [ridProjection: rid_1, {'': scan_0}, cqf_disjunction_]
-IndexScan [{'': rid_1}, scanDefName: cqf_disjunction_, indexDefName: a_1, interval: {=Const [2]}]
-`;
- const actualStr = removeUUIDsFromExplain(db, res);
- assert.eq(expectedStr, actualStr);
-}
-}());
diff --git a/jstests/cqf/index_hints.js b/jstests/cqf/index_hints.js
deleted file mode 100644
index 6594a4be09890..0000000000000
--- a/jstests/cqf/index_hints.js
+++ /dev/null
@@ -1,87 +0,0 @@
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
-if (!checkCascadesOptimizerEnabled(db)) {
- jsTestLog("Skipping test because the optimizer is not enabled");
- return;
-}
-
-const t = db.cqf_index_hints;
-t.drop();
-
-assert.commandWorked(t.insert({_id: 0, a: [1, 2, 3, 4]}));
-assert.commandWorked(t.insert({_id: 1, a: [2, 3, 4]}));
-assert.commandWorked(t.insert({_id: 2, a: [2]}));
-assert.commandWorked(t.insert({_id: 3, a: 2}));
-assert.commandWorked(t.insert({_id: 4, a: [1, 3]}));
-
-assert.commandWorked(t.createIndex({a: 1}));
-
-// There are too few documents, and an index is not preferable.
-{
- let res = t.explain("executionStats").find({a: 2}).finish();
- assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
-}
-
-{
- let res = t.explain("executionStats").find({a: 2}).hint({a: 1}).finish();
- assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
-}
-
-{
- let res = t.explain("executionStats").find({a: 2}).hint("a_1").finish();
- assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
-}
-
-{
- let res = t.explain("executionStats").find({a: 2}).hint({$natural: 1}).finish();
- assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
-
- res = t.find({a: 2}).hint({$natural: 1}).toArray();
- assert.eq(res[0]._id, 0, res);
-}
-
-{
- let res = t.explain("executionStats").find({a: 2}).hint({$natural: -1}).finish();
- assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
-
- res = t.find({a: 2}).hint({$natural: -1}).toArray();
- assert.eq(res[0]._id, 3, res);
-}
-
-// Generate enough documents for index to be preferable.
-for (let i = 0; i < 100; i++) {
- assert.commandWorked(t.insert({a: i + 10}));
-}
-
-{
- let res = t.explain("executionStats").find({a: 2}).finish();
- assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
-}
-
-{
- let res = t.explain("executionStats").find({a: 2}).hint({a: 1}).finish();
- assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
-}
-
-{
- let res = t.explain("executionStats").find({a: 2}).hint("a_1").finish();
- assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
-}
-{
- let res = t.explain("executionStats").find({a: 2}).hint({$natural: 1}).finish();
- assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
-
- res = t.find({a: 2}).hint({$natural: 1}).toArray();
- assert.eq(res[0]._id, 0, res);
-}
-
-{
- let res = t.explain("executionStats").find({a: 2}).hint({$natural: -1}).finish();
- assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
-
- res = t.find({a: 2}).hint({$natural: -1}).toArray();
- assert.eq(res[0]._id, 3, res);
-}
-}());
diff --git a/jstests/cqf/no_collection.js b/jstests/cqf/no_collection.js
deleted file mode 100644
index 3c7ecae4c3271..0000000000000
--- a/jstests/cqf/no_collection.js
+++ /dev/null
@@ -1,15 +0,0 @@
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
-if (!checkCascadesOptimizerEnabled(db)) {
- jsTestLog("Skipping test because the optimizer is not enabled");
- return;
-}
-
-let t = db.cqf_no_collection;
-t.drop();
-
-const res = t.explain("executionStats").aggregate([{$match: {'a': 2}}]);
-assert.eq(0, res.executionStats.nReturned);
-}());
\ No newline at end of file
diff --git a/jstests/cqf/array_index.js b/jstests/cqf/optimizer/array_index.js
similarity index 94%
rename from jstests/cqf/array_index.js
rename to jstests/cqf/optimizer/array_index.js
index b96fbb9413d6a..1dba4e9019881 100644
--- a/jstests/cqf/array_index.js
+++ b/jstests/cqf/optimizer/array_index.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_array_index;
@@ -61,4 +62,3 @@ assert.commandWorked(t.createIndex({a: 1}));
res = t.explain("executionStats").aggregate([{$match: {a: {$gte: MaxKey}}}]);
assert.eq(1, res.executionStats.nReturned);
}
-}());
diff --git a/jstests/cqf/array_match.js b/jstests/cqf/optimizer/array_match.js
similarity index 95%
rename from jstests/cqf/array_match.js
rename to jstests/cqf/optimizer/array_match.js
index 774361d0c4b7e..acee15d2b35b7 100644
--- a/jstests/cqf/array_match.js
+++ b/jstests/cqf/optimizer/array_match.js
@@ -1,10 +1,13 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPath,
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled,
+ navigateToPlanPath,
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_array_match;
@@ -79,4 +82,3 @@ assert.commandWorked(t.createIndex({b: 1, a: 1}));
assertValueOnPath("IndexScan", indexUnionNode, "children.1.nodeType");
assertValueOnPath(2, indexUnionNode, "children.1.interval.lowBound.bound.1.value");
}
-}());
diff --git a/jstests/cqf/array_size.js b/jstests/cqf/optimizer/array_size.js
similarity index 87%
rename from jstests/cqf/array_size.js
rename to jstests/cqf/optimizer/array_size.js
index 8f3b4fdf73307..9017e3466b74b 100644
--- a/jstests/cqf/array_size.js
+++ b/jstests/cqf/optimizer/array_size.js
@@ -1,10 +1,8 @@
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_array_size;
@@ -26,4 +24,3 @@ assert.eq(2, res.executionStats.nReturned);
res = t.explain("executionStats").aggregate([{$match: {'a.b': {$size: 2}}}]);
assert.eq(1, res.executionStats.nReturned);
-}());
diff --git a/jstests/cqf/basic_agg.js b/jstests/cqf/optimizer/basic_agg.js
similarity index 93%
rename from jstests/cqf/basic_agg.js
rename to jstests/cqf/optimizer/basic_agg.js
index 678d896c290a1..67a7e1a4c3677 100644
--- a/jstests/cqf/basic_agg.js
+++ b/jstests/cqf/optimizer/basic_agg.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const coll = db.cqf_basic_index;
@@ -45,4 +46,3 @@ assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
res = coll.explain("executionStats").aggregate([{$match: {$and: [{'a.b': 2}]}}]);
assert.eq(1, res.executionStats.nReturned);
assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
-}());
diff --git a/jstests/cqf/basic_agg_expr.js b/jstests/cqf/optimizer/basic_agg_expr.js
similarity index 97%
rename from jstests/cqf/basic_agg_expr.js
rename to jstests/cqf/optimizer/basic_agg_expr.js
index 0578717f5fdee..a55ce8de5fc26 100644
--- a/jstests/cqf/basic_agg_expr.js
+++ b/jstests/cqf/optimizer/basic_agg_expr.js
@@ -1,12 +1,9 @@
-(function() {
-"use strict";
-
load('jstests/aggregation/extras/utils.js'); // For assertArrayEq.
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_agg_expr;
@@ -136,4 +133,3 @@ const t = db.cqf_agg_expr;
assertArrayEq({actual: res, expected: [{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}]});
}
}
-}());
diff --git a/jstests/cqf/basic_find.js b/jstests/cqf/optimizer/basic_find.js
similarity index 92%
rename from jstests/cqf/basic_find.js
rename to jstests/cqf/optimizer/basic_find.js
index f41e4b1b9f7f7..fd0ad385841b2 100644
--- a/jstests/cqf/basic_find.js
+++ b/jstests/cqf/optimizer/basic_find.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const coll = db.cqf_basic_find;
@@ -45,4 +46,3 @@ assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
res = coll.explain("executionStats").find({'': {$gt: 2}}).finish();
assert.eq(1, res.executionStats.nReturned);
assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
-}());
diff --git a/jstests/cqf/basic_unwind.js b/jstests/cqf/optimizer/basic_unwind.js
similarity index 80%
rename from jstests/cqf/basic_unwind.js
rename to jstests/cqf/optimizer/basic_unwind.js
index 89f5c7ea5d8c0..0ed1064dc4b7e 100644
--- a/jstests/cqf/basic_unwind.js
+++ b/jstests/cqf/optimizer/basic_unwind.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const coll = db.cqf_basic_unwind;
@@ -22,4 +23,3 @@ assert.commandWorked(coll.insert([
let res = coll.explain("executionStats").aggregate([{$unwind: '$x'}]);
assert.eq(4, res.executionStats.nReturned);
assertValueOnPlanPath("Unwind", res, "child.child.nodeType");
-}());
diff --git a/jstests/cqf/chess.js b/jstests/cqf/optimizer/chess.js
similarity index 76%
rename from jstests/cqf/chess.js
rename to jstests/cqf/optimizer/chess.js
index b210c52135423..47dee1f46453f 100644
--- a/jstests/cqf/chess.js
+++ b/jstests/cqf/optimizer/chess.js
@@ -1,10 +1,13 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPath,
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled,
+ navigateToPlanPath,
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const coll = db.cqf_chess;
@@ -100,17 +103,14 @@ const res = coll.explain("executionStats").aggregate([
// TODO: verify expected results.
-// Verify we are getting an intersection between "minutes" and either "turns" or "avgRating".
-// The plan is currently not stable due to sampling.
-{
- const indexNode = navigateToPlanPath(res, "child.child.leftChild.leftChild");
- assertValueOnPath("IndexScan", indexNode, "nodeType");
- assertValueOnPath("minutes_1", indexNode, "indexDefName");
-}
-{
- const indexNode = navigateToPlanPath(res, "child.child.leftChild.rightChild.children.0.child");
- assertValueOnPath("IndexScan", indexNode, "nodeType");
- const indexName = navigateToPath(indexNode, "indexDefName");
- assert(indexName === "turns_1" || indexName === "avgRating_1");
-}
-}());
+/**
+ * Demonstrate the following:
+ * 1. Limit is subsumed into the collation node above.
+ * 2. We have one index scan on minutes and the range is between 2 and 150 (we can encode both
+ * comparisons as a single index scan).
+ */
+assertValueOnPlanPath("Collation", res, "child.nodeType");
+
+const indexNode = navigateToPlanPath(res, "child.child.leftChild");
+assertValueOnPath("IndexScan", indexNode, "nodeType");
+assertValueOnPath("minutes_1", indexNode, "indexDefName");
diff --git a/jstests/cqf/compond_index.js b/jstests/cqf/optimizer/compond_index.js
similarity index 96%
rename from jstests/cqf/compond_index.js
rename to jstests/cqf/optimizer/compond_index.js
index 526dd3c4eb0d9..4d757c63fac5d 100644
--- a/jstests/cqf/compond_index.js
+++ b/jstests/cqf/optimizer/compond_index.js
@@ -1,10 +1,8 @@
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled, runWithParams} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_compound_index;
@@ -91,4 +89,3 @@ assert.commandWorked(t.createIndex({a: 1, b: 1}));
() => t.explain("executionStats").aggregate([{$match: {a: {$gte: 1, $lte: 3}}}]));
assert.eq(30, res.executionStats.nReturned);
}
-}());
diff --git a/jstests/cqf/cost_model_override.js b/jstests/cqf/optimizer/cost_model_override.js
similarity index 89%
rename from jstests/cqf/cost_model_override.js
rename to jstests/cqf/optimizer/cost_model_override.js
index 5f03cbeeca272..cfef385899019 100644
--- a/jstests/cqf/cost_model_override.js
+++ b/jstests/cqf/optimizer/cost_model_override.js
@@ -3,13 +3,15 @@
* the cost of produced query plan changed.
*/
-(function() {
-"use strict";
+import {
+ assertValueOnPath,
+ checkCascadesOptimizerEnabled,
+ navigateToPlanPath
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const coll = db.cost_model_override;
@@ -50,5 +52,4 @@ function executeAndGetScanCost(scanIncrementalCost) {
const scanCost1 = executeAndGetScanCost(0.2);
const scanCost2 = executeAndGetScanCost(0.4);
-assert.lt(scanCost1, scanCost2);
-}());
+assert.lt(scanCost1, scanCost2);
\ No newline at end of file
diff --git a/jstests/cqf/count_optimize.js b/jstests/cqf/optimizer/count_optimize.js
similarity index 91%
rename from jstests/cqf/count_optimize.js
rename to jstests/cqf/optimizer/count_optimize.js
index 9ac7c14ea8f47..5597732780093 100644
--- a/jstests/cqf/count_optimize.js
+++ b/jstests/cqf/optimizer/count_optimize.js
@@ -1,10 +1,12 @@
-(function() {
-"use strict";
+import {
+ checkCascadesOptimizerEnabled,
+ removeUUIDsFromExplain,
+ runWithParams
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_count_optimize;
@@ -58,4 +60,3 @@ Filter []
PhysicalScan [{'a': evalTemp_0}, cqf_count_optimize_]
`;
assert.eq(expectedStr, removeUUIDsFromExplain(db, res));
-}());
diff --git a/jstests/cqf/optimizer/disjunction.js b/jstests/cqf/optimizer/disjunction.js
new file mode 100644
index 0000000000000..529d61bbbbc74
--- /dev/null
+++ b/jstests/cqf/optimizer/disjunction.js
@@ -0,0 +1,310 @@
+/**
+ * Test that $or is translated to a SargableNode, and executed with correct results.
+ */
+import {
+ checkCascadesOptimizerEnabled,
+ removeUUIDsFromExplain,
+ runWithParams
+} from "jstests/libs/optimizer_utils.js";
+
+if (!checkCascadesOptimizerEnabled(db)) {
+ jsTestLog("Skipping test because the optimizer is not enabled");
+ quit();
+}
+
+const coll = db.cqf_disjunction;
+coll.drop();
+
+let docs = [];
+// Generate 100 documents with different pairs of a,b values.
+for (let i = 0; i < 10; ++i) {
+ for (let a = 0; a < 10; ++a) {
+ for (let b = 0; b < 10; ++b) {
+ docs.push({a, b});
+ }
+ }
+}
+// Generate extra non-matching documents to discourage collection scan.
+for (let i = 0; i < 1000; ++i) {
+ docs.push({});
+}
+assert.commandWorked(coll.insert(docs));
+
+let result = coll.find({$or: [{a: 2}, {b: 3}]}).toArray();
+assert.eq(result.length, 190, result);
+for (const doc of result) {
+ assert(doc.a === 2 || doc.b === 3, "Query returned a doc not matching the predicate: ${doc}");
+}
+
+assert.commandWorked(coll.createIndexes([
+ {a: 1},
+ {b: 1},
+]));
+
+{
+ const query = {$or: [{a: 2}, {b: 3}]};
+
+ // Check the plan and count.
+ const res = runWithParams(
+ [
+ {key: 'internalCascadesOptimizerExplainVersion', value: "v2"},
+ {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true}
+ ],
+ () => coll.explain("executionStats").find(query).finish());
+ assert.eq(190, res.executionStats.nReturned);
+
+ // We should get a union of two indexes {a:1} and {b:1}.
+ const expectedStr =
+ `Root [{scan_0}]
+NestedLoopJoin [joinType: Inner, {rid_1}]
+| | Const [true]
+| LimitSkip [limit: 1, skip: 0]
+| Seek [ridProjection: rid_1, {'': scan_0}, cqf_disjunction_]
+Unique [{rid_1}]
+Union [{rid_1}]
+| IndexScan [{'': rid_1}, scanDefName: cqf_disjunction_, indexDefName: b_1, interval: {=Const [3]}]
+IndexScan [{'': rid_1}, scanDefName: cqf_disjunction_, indexDefName: a_1, interval: {=Const [2]}]
+`;
+ const actualStr = removeUUIDsFromExplain(db, res);
+ assert.eq(expectedStr, actualStr);
+
+ // Check the full result.
+ const result = coll.find(query).toArray();
+ assert.eq(result.length, 190, result);
+ for (const doc of result) {
+ assert(doc.a === 2 || doc.b === 3,
+ "Query returned a doc not matching the predicate: ${doc}");
+ }
+}
+
+// At time of writing, queries that compare to literal array or MinKey/MaxKey are translated to
+// an ABT with a disjunction in it.
+result = coll.find({arr: {$eq: [2]}}).toArray();
+assert.eq(result.length, 0, result);
+
+result = coll.find({arr: {$gt: MinKey()}}).toArray();
+assert.eq(result.length, docs.length, result);
+
+// Test a nested or/and where one leaf predicate ($exists) cannot be fully satisfied with index
+// bounds.
+result = coll.find({
+ $or: [
+ // 'b' exists on every doc so this should never happen.
+ {a: 5, b: {$exists: false}},
+ // The field 'nope' doesn't exist so this also shouldn't happen.
+ {nope: 'nope'},
+ ]
+ })
+ .toArray();
+assert.eq(result.length, 0, result);
+
+// Test that adding an $or predicate doesn't inhibit the use of index scan for other predicates.
+// The $or can just be a residual predicate.
+{
+ const res = runWithParams(
+ [
+ {key: 'internalCascadesOptimizerExplainVersion', value: "v2"},
+ {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true}
+ ],
+ () => coll.explain("executionStats")
+ .find({a: 2, $or: [{b: 2}, {no_such_field: 123}]})
+ .finish());
+ assert.eq(10, res.executionStats.nReturned);
+
+ // We get an index scan on 'a' and some expression for the $or.
+ const expectedStr =
+ `Root [{scan_0}]
+Filter []
+| BinaryOp [Or]
+| | EvalFilter []
+| | | Variable [scan_0]
+| | PathGet [no_such_field]
+| | PathTraverse [1]
+| | PathCompare [Eq]
+| | Const [123]
+| EvalFilter []
+| | Variable [scan_0]
+| PathGet [b]
+| PathCompare [Eq]
+| Const [2]
+NestedLoopJoin [joinType: Inner, {rid_1}]
+| | Const [true]
+| LimitSkip [limit: 1, skip: 0]
+| Seek [ridProjection: rid_1, {'': scan_0}, cqf_disjunction_]
+IndexScan [{'': rid_1}, scanDefName: cqf_disjunction_, indexDefName: a_1, interval: {=Const [2]}]
+`;
+ const actualStr = removeUUIDsFromExplain(db, res);
+ assert.eq(expectedStr, actualStr);
+}
+
+// Test that an $or containing multiple predicates on the same field groups the predicates under
+// the shared field.
+{
+ const params = [
+ {key: 'internalCascadesOptimizerExplainVersion', value: "v2"},
+ {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true},
+ {key: "internalCascadesOptimizerDisableIndexes", value: true}
+ ];
+
+ //
+ // Test $or where all predicates are on the same field.
+ //
+ let res = runWithParams(
+ params,
+ () => coll.explain("executionStats").find({$or: [{a: 1}, {a: 2}, {a: 3}]}).finish());
+
+ let expectedStr =
+ `Root [{scan_0}]
+Filter []
+| EvalFilter []
+| | Variable [evalTemp_0]
+| PathTraverse [1]
+| PathCompare [EqMember]
+| Const [[1, 2, 3]]
+PhysicalScan [{'': scan_0, 'a': evalTemp_0}, cqf_disjunction_]
+`;
+ assert.eq(300, res.executionStats.nReturned);
+ let actualStr = removeUUIDsFromExplain(db, res);
+ assert.eq(expectedStr, actualStr);
+
+ // The same query, but with nested $ors.
+ res = runWithParams(
+ params,
+ () =>
+ coll.explain("executionStats").find({$or: [{$or: [{a: 1}, {a: 2}, {a: 3}]}]}).finish());
+ assert.eq(300, res.executionStats.nReturned);
+ assert.eq(expectedStr, actualStr);
+
+ res = runWithParams(
+ params,
+ () =>
+ coll.explain("executionStats").find({$or: [{a: 1}, {$or: [{a: 2}, {a: 3}]}]}).finish());
+ assert.eq(300, res.executionStats.nReturned);
+ assert.eq(expectedStr, actualStr);
+
+ //
+ // Test $or where two predicates are on the same field and one is on a different field.
+ //
+ res = runWithParams(
+ params,
+ () => coll.explain("executionStats").find({$or: [{a: 1}, {a: 2}, {b: 3}]}).finish());
+
+ expectedStr =
+ `Root [{scan_0}]
+Filter []
+| BinaryOp [Or]
+| | EvalFilter []
+| | | Variable [evalTemp_1]
+| | PathTraverse [1]
+| | PathCompare [Eq]
+| | Const [3]
+| EvalFilter []
+| | Variable [evalTemp_0]
+| PathTraverse [1]
+| PathCompare [EqMember]
+| Const [[1, 2]]
+PhysicalScan [{'': scan_0, 'a': evalTemp_0, 'b': evalTemp_1}, cqf_disjunction_]
+`;
+ assert.eq(280, res.executionStats.nReturned);
+ actualStr = removeUUIDsFromExplain(db, res);
+ assert.eq(expectedStr, actualStr);
+
+ // The same query, but with nested $ors.
+ res = runWithParams(
+ params,
+ () =>
+ coll.explain("executionStats").find({$or: [{$or: [{a: 1}, {a: 2}]}, {b: 3}]}).finish());
+ assert.eq(280, res.executionStats.nReturned);
+ assert.eq(expectedStr, actualStr);
+
+ res = runWithParams(
+ params,
+ () =>
+ coll.explain("executionStats").find({$or: [{$or: [{a: 1}, {b: 3}]}, {a: 2}]}).finish());
+ assert.eq(280, res.executionStats.nReturned);
+ assert.eq(expectedStr, actualStr);
+
+ //
+ // Test $or where two predicates are on one field and two predicates are on another.
+ //
+ res = runWithParams(
+ params,
+ () =>
+ coll.explain("executionStats").find({$or: [{a: 1}, {a: 2}, {b: 3}, {b: 4}]}).finish());
+
+ expectedStr =
+ `Root [{scan_0}]
+Filter []
+| BinaryOp [Or]
+| | EvalFilter []
+| | | Variable [evalTemp_1]
+| | PathTraverse [1]
+| | PathCompare [EqMember]
+| | Const [[3, 4]]
+| EvalFilter []
+| | Variable [evalTemp_0]
+| PathTraverse [1]
+| PathCompare [EqMember]
+| Const [[1, 2]]
+PhysicalScan [{'': scan_0, 'a': evalTemp_0, 'b': evalTemp_1}, cqf_disjunction_]
+`;
+ assert.eq(360, res.executionStats.nReturned);
+ actualStr = removeUUIDsFromExplain(db, res);
+ assert.eq(expectedStr, actualStr);
+
+ // The same query, but with nested $ors.
+ runWithParams(params,
+ () => coll.explain("executionStats")
+ .find({$or: [{$or: [{a: 1}, {a: 2}]}, {$or: [{b: 3}, {b: 4}]}]})
+ .finish());
+ assert.eq(360, res.executionStats.nReturned);
+ assert.eq(expectedStr, actualStr);
+
+ runWithParams(params,
+ () => coll.explain("executionStats")
+ .find({$or: [{$or: [{a: 1}, {b: 4}]}, {$or: [{b: 3}, {a: 2}]}]})
+ .finish());
+ assert.eq(360, res.executionStats.nReturned);
+ assert.eq(expectedStr, actualStr);
+}
+
+// Test a union involving multikey indexes.
+// First make {a:1} and {b:1} multikey.
+assert.commandWorked(coll.insert({a: ['asdf'], b: ['qwer']}));
+{
+ const query = {$or: [{a: 2}, {b: 3}]};
+
+ // Check the plan and count.
+ const res = runWithParams(
+ [
+ {key: 'internalCascadesOptimizerExplainVersion', value: "v2"},
+ {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true}
+ ],
+ () => coll.explain("executionStats").find(query).finish());
+ assert.eq(190, res.executionStats.nReturned);
+
+ // We should get a union of two indexes {a:1} and {b:1}.
+ // Neither one needs its own Unique stage, because we have to have a Unique after the Union
+ // anyway.
+ const expectedStr =
+ `Root [{scan_0}]
+NestedLoopJoin [joinType: Inner, {rid_1}]
+| | Const [true]
+| LimitSkip [limit: 1, skip: 0]
+| Seek [ridProjection: rid_1, {'': scan_0}, cqf_disjunction_]
+Unique [{rid_1}]
+Union [{rid_1}]
+| IndexScan [{'': rid_1}, scanDefName: cqf_disjunction_, indexDefName: b_1, interval: {=Const [3]}]
+IndexScan [{'': rid_1}, scanDefName: cqf_disjunction_, indexDefName: a_1, interval: {=Const [2]}]
+`;
+ const actualStr = removeUUIDsFromExplain(db, res);
+ assert.eq(expectedStr, actualStr);
+
+ // Check the full result.
+ const result = coll.find(query).toArray();
+ assert.eq(result.length, 190, result);
+ for (const doc of result) {
+ assert(doc.a === 2 || doc.b === 3,
+ "Query returned a doc not matching the predicate: ${doc}");
+ }
+}
\ No newline at end of file
diff --git a/jstests/cqf/elemmatch_bounds.js b/jstests/cqf/optimizer/elemmatch_bounds.js
similarity index 98%
rename from jstests/cqf/elemmatch_bounds.js
rename to jstests/cqf/optimizer/elemmatch_bounds.js
index 09e9a44c45a39..4e17c45429a60 100644
--- a/jstests/cqf/elemmatch_bounds.js
+++ b/jstests/cqf/optimizer/elemmatch_bounds.js
@@ -7,10 +7,7 @@
* requires_cqf,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For getPlanSkeleton.
+import {getPlanSkeleton} from "jstests/libs/optimizer_utils.js";
const coll = db.cqf_elemmatch_bounds;
coll.drop();
@@ -204,5 +201,4 @@ result = run({
assertCount(result, numDuplicates, {c: [[1, 2, 3]]});
assertCount(result, numDuplicates, {c: [[1], [2], [3]]});
assertCount(result, numDuplicates, {c: [[2]]});
-assert.eq(result.length, numDuplicates * 3);
-})();
+assert.eq(result.length, numDuplicates * 3);
\ No newline at end of file
diff --git a/jstests/cqf/empty_results.js b/jstests/cqf/optimizer/empty_results.js
similarity index 76%
rename from jstests/cqf/empty_results.js
rename to jstests/cqf/optimizer/empty_results.js
index 05fd8b24fa880..7c97a3d71f74d 100644
--- a/jstests/cqf/empty_results.js
+++ b/jstests/cqf/optimizer/empty_results.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_empty_results;
@@ -15,4 +16,3 @@ assert.commandWorked(t.insert([{a: 1}, {a: 2}]));
const res = t.explain("executionStats").aggregate([{$match: {'a': 2}}, {$limit: 1}, {$skip: 10}]);
assert.eq(0, res.executionStats.nReturned);
assertValueOnPlanPath("CoScan", res, "child.child.child.nodeType");
-}());
diff --git a/jstests/cqf/explain_test.js b/jstests/cqf/optimizer/explain_test.js
similarity index 87%
rename from jstests/cqf/explain_test.js
rename to jstests/cqf/optimizer/explain_test.js
index 2c24cf32c2114..574df54a027cf 100644
--- a/jstests/cqf/explain_test.js
+++ b/jstests/cqf/optimizer/explain_test.js
@@ -1,11 +1,12 @@
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
+import {
+ checkCascadesOptimizerEnabled,
+ removeUUIDsFromExplain,
+ runWithParams
+} from "jstests/libs/optimizer_utils.js";
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_explain_test;
@@ -40,4 +41,3 @@ PhysicalScan [{'': scan_0, 'a': evalTemp_2, 'b': evalTemp_3}, cqf_explain_
const actualStr = removeUUIDsFromExplain(db, res);
assert.eq(expectedStr, actualStr);
-}());
diff --git a/jstests/cqf/filter_order.js b/jstests/cqf/optimizer/filter_order.js
similarity index 82%
rename from jstests/cqf/filter_order.js
rename to jstests/cqf/optimizer/filter_order.js
index e33e45c661eb5..2f094be4f90f7 100644
--- a/jstests/cqf/filter_order.js
+++ b/jstests/cqf/optimizer/filter_order.js
@@ -1,10 +1,8 @@
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const coll = db.cqf_filter_order;
@@ -19,4 +17,3 @@ assert.commandWorked(bulk.execute());
let res = coll.aggregate([{$match: {'a': {$eq: 1}, 'b': {$eq: 1}, 'c': {$eq: 1}}}]).toArray();
// TODO: verify plan that predicate on "c" is applied first (most selective), then "b", then "a".
-}());
diff --git a/jstests/cqf/find_limit_skip.js b/jstests/cqf/optimizer/find_limit_skip.js
similarity index 100%
rename from jstests/cqf/find_limit_skip.js
rename to jstests/cqf/optimizer/find_limit_skip.js
diff --git a/jstests/cqf/find_sort.js b/jstests/cqf/optimizer/find_sort.js
similarity index 87%
rename from jstests/cqf/find_sort.js
rename to jstests/cqf/optimizer/find_sort.js
index 53d281dba0d59..f2b179808890a 100644
--- a/jstests/cqf/find_sort.js
+++ b/jstests/cqf/optimizer/find_sort.js
@@ -1,10 +1,12 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPath,
+ checkCascadesOptimizerEnabled,
+ navigateToPlanPath
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const coll = db.cqf_find_sort;
@@ -38,4 +40,3 @@ assert.eq(numResults, res.executionStats.nReturned);
const indexScanNode = navigateToPlanPath(res, "child.child.child.leftChild.child.child");
assertValueOnPath("IndexScan", indexScanNode, "nodeType");
assertValueOnPath(5, indexScanNode, "interval.highBound.bound.0.value");
-}());
diff --git a/jstests/cqf/group.js b/jstests/cqf/optimizer/group.js
similarity index 82%
rename from jstests/cqf/group.js
rename to jstests/cqf/optimizer/group.js
index 7979a72f2eee8..814937a38ef01 100644
--- a/jstests/cqf/group.js
+++ b/jstests/cqf/optimizer/group.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const coll = db.cqf_group;
@@ -24,4 +25,3 @@ const res = coll.explain("executionStats").aggregate([
]);
assertValueOnPlanPath("GroupBy", res, "child.child.nodeType");
assert.eq(4, res.executionStats.nReturned);
-}());
diff --git a/jstests/cqf/optimizer/index_hints.js b/jstests/cqf/optimizer/index_hints.js
new file mode 100644
index 0000000000000..70dc6a2ba5ae3
--- /dev/null
+++ b/jstests/cqf/optimizer/index_hints.js
@@ -0,0 +1,167 @@
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled,
+ removeUUIDsFromExplain,
+ runWithParams,
+} from "jstests/libs/optimizer_utils.js";
+
+if (!checkCascadesOptimizerEnabled(db)) {
+ jsTestLog("Skipping test because the optimizer is not enabled");
+ quit();
+}
+
+const t = db.cqf_index_hints;
+t.drop();
+
+assert.commandWorked(t.insert({_id: 0, b: 0, a: [1, 2, 3, 4]}));
+assert.commandWorked(t.insert({_id: 1, b: 1, a: [2, 3, 4]}));
+assert.commandWorked(t.insert({_id: 2, b: 2, a: [2]}));
+assert.commandWorked(t.insert({_id: 3, b: 3, a: 2}));
+assert.commandWorked(t.insert({_id: 4, b: 4, a: [1, 3]}));
+
+assert.commandWorked(t.createIndex({a: 1}));
+assert.commandWorked(t.createIndex({b: 1}));
+
+// There are too few documents, and an index is not preferable.
+{
+ let res = t.explain("executionStats").find({a: 2}).finish();
+ assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
+}
+
+{
+ let res = t.explain("executionStats").find({a: 2}).hint({a: 1}).finish();
+ assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
+}
+
+{
+ let res = t.explain("executionStats").find({a: 2}).hint("a_1").finish();
+ assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
+}
+
+{
+ let res = t.explain("executionStats").find({a: 2}).hint({$natural: 1}).finish();
+ assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
+
+ res = t.find({a: 2}).hint({$natural: 1}).toArray();
+ assert.eq(res[0]._id, 0, res);
+}
+
+{
+ let res = t.explain("executionStats").find({a: 2}).hint({$natural: -1}).finish();
+ assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
+
+ res = t.find({a: 2}).hint({$natural: -1}).toArray();
+ assert.eq(res[0]._id, 3, res);
+}
+
+// Generate enough documents for index to be preferable.
+for (let i = 0; i < 100; i++) {
+ assert.commandWorked(t.insert({b: i + 5, a: i + 10}));
+}
+
+{
+ let res = t.explain("executionStats").find({a: 2}).finish();
+ assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
+}
+
+{
+ let res = t.explain("executionStats").find({a: 2}).hint({a: 1}).finish();
+ assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
+}
+
+{
+ let res = t.explain("executionStats").find({a: 2}).hint("a_1").finish();
+ assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
+}
+{
+ let res = t.explain("executionStats").find({a: 2}).hint({$natural: 1}).finish();
+ assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
+
+ res = t.find({a: 2}).hint({$natural: 1}).toArray();
+ assert.eq(res[0]._id, 0, res);
+}
+
+{
+ let res = t.explain("executionStats").find({a: 2}).hint({$natural: -1}).finish();
+ assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
+
+ res = t.find({a: 2}).hint({$natural: -1}).toArray();
+ assert.eq(res[0]._id, 3, res);
+}
+
+// Use index {a:1} multikeyness info, Cannot eliminate PathTraverse.
+{
+ const res = runWithParams(
+ [
+ {key: 'internalCascadesOptimizerExplainVersion', value: "v2"},
+ {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true}
+ ],
+ () => t.explain("executionStats").find({a: 2}).hint({$natural: -1}).finish());
+
+ const expectedStr =
+ `Root [{scan_0}]
+Filter []
+| EvalFilter []
+| | Variable [evalTemp_0]
+| PathTraverse [1]
+| PathCompare [Eq]
+| Const [2]
+PhysicalScan [{'': scan_0, 'a': evalTemp_0}, cqf_index_hints_]
+`;
+
+ const actualStr = removeUUIDsFromExplain(db, res);
+ assert.eq(expectedStr, actualStr);
+}
+
+// Hint collection scan to disable indexes. Check that index {b: 1} multikeyness info can eliminate
+// PathTraverse.
+{
+ const res = runWithParams(
+ [
+ {key: 'internalCascadesOptimizerExplainVersion', value: "v2"},
+ {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true}
+ ],
+ () => t.explain("executionStats").find({b: 2}).hint({$natural: -1}).finish());
+
+ const expectedStr =
+ `Root [{scan_0}]
+Filter []
+| EvalFilter []
+| | Variable [evalTemp_0]
+| PathCompare [Eq]
+| Const [2]
+PhysicalScan [{'': scan_0, 'b': evalTemp_0}, cqf_index_hints_]
+`;
+
+ const actualStr = removeUUIDsFromExplain(db, res);
+ assert.eq(expectedStr, actualStr);
+}
+
+// Hint index {a: 1} to disable index {b:1}. Check that index {b: 1} multikeyness info can eliminate
+// PathTraverse.
+{
+ const res = runWithParams(
+ [
+ {key: 'internalCascadesOptimizerExplainVersion', value: "v2"},
+ {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true}
+ ],
+ () => t.explain("executionStats").find({a: {$gt: 0}, b: 2}).hint("a_1").finish());
+
+ const expectedStr =
+ `Root [{scan_0}]
+NestedLoopJoin [joinType: Inner, {rid_1}]
+| | Const [true]
+| Filter []
+| | EvalFilter []
+| | | Variable [evalTemp_4]
+| | PathCompare [Eq]
+| | Const [2]
+| LimitSkip [limit: 1, skip: 0]
+| Seek [ridProjection: rid_1, {'': scan_0, 'b': evalTemp_4}, cqf_index_hints_]
+Unique [{rid_1}]
+IndexScan [{'': rid_1}, scanDefName: cqf_index_hints_, indexDefName: a_1, interval: {(Const [0], Const [""])}]
+`;
+
+ const actualStr = removeUUIDsFromExplain(db, res);
+ assert.eq(expectedStr, actualStr);
+}
diff --git a/jstests/cqf/index_intersect.js b/jstests/cqf/optimizer/index_intersect.js
similarity index 90%
rename from jstests/cqf/index_intersect.js
rename to jstests/cqf/optimizer/index_intersect.js
index 8d89703cfb88e..29543e38aa912 100644
--- a/jstests/cqf/index_intersect.js
+++ b/jstests/cqf/optimizer/index_intersect.js
@@ -1,10 +1,13 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPath,
+ checkCascadesOptimizerEnabled,
+ navigateToPlanPath,
+ runWithParams,
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_index_intersect;
@@ -18,7 +21,7 @@ for (let i = 0; i < nMatches; i++) {
documents.push({a: 4, b: 3, c: 2});
documents.push({a: 5, b: 5, c: 2});
-for (let i = 1; i < nMatches + 500; i++) {
+for (let i = 1; i < nMatches + 1000; i++) {
documents.push({a: i + nMatches, b: i + nMatches, c: i + nMatches});
}
@@ -53,4 +56,3 @@ joinNode = navigateToPlanPath(res, "child.leftChild");
assertValueOnPath("HashJoin", joinNode, "nodeType");
assertValueOnPath("IndexScan", joinNode, "leftChild.nodeType");
assertValueOnPath("IndexScan", joinNode, "rightChild.children.0.child.nodeType");
-}());
diff --git a/jstests/cqf/index_intersect1.js b/jstests/cqf/optimizer/index_intersect1.js
similarity index 89%
rename from jstests/cqf/index_intersect1.js
rename to jstests/cqf/optimizer/index_intersect1.js
index 7c602dcb695c4..4c3d0b1704549 100644
--- a/jstests/cqf/index_intersect1.js
+++ b/jstests/cqf/optimizer/index_intersect1.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_index_intersect1;
@@ -32,4 +33,3 @@ res = t.explain("executionStats")
.aggregate([{$project: {'_id': 0, 'a': 1}}, {$match: {'a': {$gt: 60, $lt: 100}}}]);
assert.eq(2, res.executionStats.nReturned);
assertValueOnPlanPath("IndexScan", res, "child.child.nodeType");
-}());
\ No newline at end of file
diff --git a/jstests/cqf/index_subfield.js b/jstests/cqf/optimizer/index_subfield.js
similarity index 86%
rename from jstests/cqf/index_subfield.js
rename to jstests/cqf/optimizer/index_subfield.js
index edb8146e3b951..70b56a643cb1e 100644
--- a/jstests/cqf/index_subfield.js
+++ b/jstests/cqf/optimizer/index_subfield.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_index_subfield;
@@ -29,4 +30,3 @@ assert.commandWorked(t.createIndex({a: 1, b: 1}));
t.explain("executionStats").find({a: 2, 'b.c': 3}, {_id: 0, a: 1}).hint("a_1_b_1").finish();
assertValueOnPlanPath("IndexScan", res, "child.child.child.nodeType");
}
-}());
diff --git a/jstests/cqf/index_with_null.js b/jstests/cqf/optimizer/index_with_null.js
similarity index 90%
rename from jstests/cqf/index_with_null.js
rename to jstests/cqf/optimizer/index_with_null.js
index aa4125825f527..b6bd29ca565cb 100644
--- a/jstests/cqf/index_with_null.js
+++ b/jstests/cqf/optimizer/index_with_null.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_index_with_null;
@@ -45,4 +46,3 @@ t.createIndex({c: 1});
// Verify the query **is covered** by the index.
assertValueOnPlanPath("IndexScan", res, "child.child.nodeType");
}
-}());
diff --git a/jstests/cqf/lookup.js b/jstests/cqf/optimizer/lookup.js
similarity index 96%
rename from jstests/cqf/lookup.js
rename to jstests/cqf/optimizer/lookup.js
index 0b633766bf764..353bc0bf63087 100644
--- a/jstests/cqf/lookup.js
+++ b/jstests/cqf/optimizer/lookup.js
@@ -1,14 +1,16 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPath,
+ checkCascadesOptimizerEnabled,
+ navigateToPlanPath
+} from "jstests/libs/optimizer_utils.js";
+
+load("jstests/aggregation/extras/utils.js");
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
-load("jstests/aggregation/extras/utils.js");
-
const collA = db.collA;
collA.drop();
@@ -110,4 +112,3 @@ try {
assert.commandWorked(
db.adminCommand({'configureFailPoint': 'disablePipelineOptimization', 'mode': 'off'}));
}
-}());
diff --git a/jstests/cqf/match_expr.js b/jstests/cqf/optimizer/match_expr.js
similarity index 90%
rename from jstests/cqf/match_expr.js
rename to jstests/cqf/optimizer/match_expr.js
index ed2ffcdb4268c..7bcbc2452c274 100644
--- a/jstests/cqf/match_expr.js
+++ b/jstests/cqf/optimizer/match_expr.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_match_expr;
@@ -46,4 +47,3 @@ const numExpected = 1 * 5 * 5 + 4 * 1 * 1;
assertValueOnPlanPath("Filter", res, "child.nodeType");
assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
}
-}());
diff --git a/jstests/cqf/match_with_exists.js b/jstests/cqf/optimizer/match_with_exists.js
similarity index 100%
rename from jstests/cqf/match_with_exists.js
rename to jstests/cqf/optimizer/match_with_exists.js
diff --git a/jstests/cqf/match_with_in.js b/jstests/cqf/optimizer/match_with_in.js
similarity index 97%
rename from jstests/cqf/match_with_in.js
rename to jstests/cqf/optimizer/match_with_in.js
index ac52430ece6eb..73418c3a2dcc3 100644
--- a/jstests/cqf/match_with_in.js
+++ b/jstests/cqf/optimizer/match_with_in.js
@@ -3,10 +3,7 @@
*/
load('jstests/aggregation/extras/utils.js'); // For assertArrayEq.
-load('jstests/libs/optimizer_utils.js');
-
-(function() {
-"use strict";
+import {usedBonsaiOptimizer} from "jstests/libs/optimizer_utils.js";
const coll = db.cqf_match_with_in;
coll.drop();
@@ -84,4 +81,3 @@ const tests = [
for (const testData of tests) {
runTest(testData.filter, testData.expected);
}
-}());
diff --git a/jstests/cqf/optimizer/no_collection.js b/jstests/cqf/optimizer/no_collection.js
new file mode 100644
index 0000000000000..9e6d0b88e8f82
--- /dev/null
+++ b/jstests/cqf/optimizer/no_collection.js
@@ -0,0 +1,12 @@
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
+
+if (!checkCascadesOptimizerEnabled(db)) {
+ jsTestLog("Skipping test because the optimizer is not enabled");
+ quit();
+}
+
+let t = db.cqf_no_collection;
+t.drop();
+
+const res = t.explain("executionStats").aggregate([{$match: {'a': 2}}]);
+assert.eq(0, res.executionStats.nReturned);
diff --git a/jstests/cqf/nonselective_index.js b/jstests/cqf/optimizer/nonselective_index.js
similarity index 83%
rename from jstests/cqf/nonselective_index.js
rename to jstests/cqf/optimizer/nonselective_index.js
index 56ba933c0bada..52c8384cd7c97 100644
--- a/jstests/cqf/nonselective_index.js
+++ b/jstests/cqf/optimizer/nonselective_index.js
@@ -1,13 +1,14 @@
/**
* Tests scenario related to SERVER-13065.
*/
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_nonselective_index;
@@ -26,5 +27,4 @@ assert.commandWorked(t.createIndex({a: 1}));
const res = t.explain("executionStats").aggregate([{$match: {a: {$gte: 0}}}]);
assert.eq(nDocs, res.executionStats.nReturned);
-assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
-}());
\ No newline at end of file
+assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType");
\ No newline at end of file
diff --git a/jstests/cqf/not_expr.js b/jstests/cqf/optimizer/not_expr.js
similarity index 97%
rename from jstests/cqf/not_expr.js
rename to jstests/cqf/optimizer/not_expr.js
index 980bc671c1aae..3b4233a96508d 100644
--- a/jstests/cqf/not_expr.js
+++ b/jstests/cqf/optimizer/not_expr.js
@@ -1,8 +1,5 @@
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
-load("jstests/libs/optimizer_utils.js"); // For assertValueOnPlanPath.
+import {assertValueOnPlanPath} from "jstests/libs/optimizer_utils.js";
const c = db.cqf_not;
c.drop();
@@ -270,4 +267,3 @@ assertArrayEq({
{a: [[3, 3], [3, 3]]},
],
});
-}());
diff --git a/jstests/cqf/null_missing.js b/jstests/cqf/optimizer/null_missing.js
similarity index 87%
rename from jstests/cqf/null_missing.js
rename to jstests/cqf/optimizer/null_missing.js
index 2797fc66e0ced..0b6e0c5a6b389 100644
--- a/jstests/cqf/null_missing.js
+++ b/jstests/cqf/optimizer/null_missing.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_null_missing;
@@ -32,4 +33,3 @@ assert.commandWorked(t.createIndex({'a.b': 1}));
assert.eq(3, res.executionStats.nReturned);
assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
}
-}());
diff --git a/jstests/cqf/object_elemMatch.js b/jstests/cqf/optimizer/object_elemMatch.js
similarity index 90%
rename from jstests/cqf/object_elemMatch.js
rename to jstests/cqf/optimizer/object_elemMatch.js
index e1baf046e9b93..dbfd868fed4a5 100644
--- a/jstests/cqf/object_elemMatch.js
+++ b/jstests/cqf/optimizer/object_elemMatch.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_object_elemMatch;
@@ -35,4 +36,3 @@ assert.commandWorked(t.insert({a: [{"": [1, 2], c: [3, 4]}]}));
assert.eq(1, res.executionStats.nReturned);
assertValueOnPlanPath("PhysicalScan", res, "child.child.child.nodeType");
}
-}());
diff --git a/jstests/cqf/partial_index.js b/jstests/cqf/optimizer/partial_index.js
similarity index 88%
rename from jstests/cqf/partial_index.js
rename to jstests/cqf/optimizer/partial_index.js
index ba1993a318cd1..fe708e4e72e01 100644
--- a/jstests/cqf/partial_index.js
+++ b/jstests/cqf/optimizer/partial_index.js
@@ -1,10 +1,8 @@
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_partial_index;
@@ -30,4 +28,3 @@ assert.eq(1, res.length);
// TODO: verify with explain the plan should not use the index.
res = t.aggregate([{$match: {'a': 3, 'b': 3}}]).toArray();
assert.eq(2, res.length);
-}());
\ No newline at end of file
diff --git a/jstests/cqf/project_expr_dependency.js b/jstests/cqf/optimizer/project_expr_dependency.js
similarity index 84%
rename from jstests/cqf/project_expr_dependency.js
rename to jstests/cqf/optimizer/project_expr_dependency.js
index 0c3d1510d1b35..806e46abe555f 100644
--- a/jstests/cqf/project_expr_dependency.js
+++ b/jstests/cqf/optimizer/project_expr_dependency.js
@@ -1,10 +1,8 @@
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled, navigateToPlanPath} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_project_expr_dependency;
@@ -23,4 +21,3 @@ const res = t.explain("executionStats").aggregate([
// Demonstrate we only need to read "b1" and "c1" from the collection.
const scanNodeProjFieldMap = navigateToPlanPath(res, "child.child.fieldProjectionMap");
assert.eq(["b1", "c1"], Object.keys(scanNodeProjFieldMap));
-}());
diff --git a/jstests/cqf/projection.js b/jstests/cqf/optimizer/projection.js
similarity index 99%
rename from jstests/cqf/projection.js
rename to jstests/cqf/optimizer/projection.js
index 9bd12a477866e..841ca8ba942d4 100644
--- a/jstests/cqf/projection.js
+++ b/jstests/cqf/optimizer/projection.js
@@ -3,10 +3,8 @@
* Many of these tests are similar/repeats of core/projection_semantics.js
*/
-(function() {
-"use strict";
load('jstests/aggregation/extras/utils.js'); // For assertArrayEq.
-load('jstests/libs/optimizer_utils.js');
+import {usedBonsaiOptimizer} from "jstests/libs/optimizer_utils.js";
const coll = db.cqf_project;
@@ -187,5 +185,4 @@ function testInputOutput({input, projection, expectedOutput, interestingIndexes
[{_id: 0, b: {c: 1, d: 1}}, {_id: 1, b: {c: 2, d: 2}}, {_id: 2, b: {c: 3, d: 3}}],
interestingIndexes: [],
});
-}());
-}());
+}());
\ No newline at end of file
diff --git a/jstests/cqf/range_descending.js b/jstests/cqf/optimizer/range_descending.js
similarity index 97%
rename from jstests/cqf/range_descending.js
rename to jstests/cqf/optimizer/range_descending.js
index 4c8fe22f9fc3c..fd790f5905d19 100644
--- a/jstests/cqf/range_descending.js
+++ b/jstests/cqf/optimizer/range_descending.js
@@ -7,10 +7,8 @@
* behavior, the index bounds are swapped when the corresponding index is descending.
*/
-(function() {
-"use strict";
+import {assertValueOnPlanPath} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js");
const coll = db.cqf_range_descending;
/*
* This is the most basic case: a single range predicate with a descending index.
@@ -84,4 +82,3 @@ const coll = db.cqf_range_descending;
assertValueOnPlanPath("IndexScan", res, "child.child.leftChild.nodeType");
}
}
-}());
diff --git a/jstests/cqf/recursive_ix_nav.js b/jstests/cqf/optimizer/recursive_ix_nav.js
similarity index 87%
rename from jstests/cqf/recursive_ix_nav.js
rename to jstests/cqf/optimizer/recursive_ix_nav.js
index 529ec9d7eac22..8b9f59bd4fd1e 100644
--- a/jstests/cqf/recursive_ix_nav.js
+++ b/jstests/cqf/optimizer/recursive_ix_nav.js
@@ -1,10 +1,13 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled,
+ navigateToPlanPath,
+ runWithParams,
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_recursive_ix_nav;
@@ -152,7 +155,11 @@ assert.commandWorked(t.createIndex({a: 1, b: 1, c: 1, d: 1, e: 1}));
[
{key: "internalCascadesOptimizerMinIndexEqPrefixes", value: 2},
{key: "internalCascadesOptimizerMaxIndexEqPrefixes", value: 2},
- {key: "internalCascadesOptimizerDisableScan", value: true}
+ {key: "internalCascadesOptimizerDisableScan", value: true},
+ // Make Seek very expensive to discourage plans where we satisfy some predicates after
+ // the fetch. We want to test the plan where a,c,e predicates are all satisfied on the
+ // index side: a,c as equality prefixes and e as residual.
+ {key: 'internalCostModelCoefficients', value: {"seekStartupCost": 1e6 + 0.1}}
],
() => t.explain("executionStats").aggregate([
{
@@ -170,12 +177,18 @@ assert.commandWorked(t.createIndex({a: 1, b: 1, c: 1, d: 1, e: 1}));
// Assert we have two spool producers, one for each interval for "a" ([1, 3] and [6, 6]).
assertValueOnPlanPath(
- "SpoolProducer", res, "child.child.leftChild.child.children.0.leftChild.nodeType");
- assertValueOnPlanPath(7, res, "child.child.leftChild.child.children.0.leftChild.id");
+ "SpoolProducer", res, "child.child.child.children.0.child.children.0.leftChild.nodeType");
+ const leftNode =
+ navigateToPlanPath(res, 'child.child.child.children.0.child.children.0.leftChild');
assertValueOnPlanPath(
- "SpoolProducer", res, "child.child.leftChild.child.children.1.leftChild.nodeType");
- assertValueOnPlanPath(8, res, "child.child.leftChild.child.children.1.leftChild.id");
+ "SpoolProducer", res, "child.child.child.children.0.child.children.1.leftChild.nodeType");
+ const rightNode =
+ navigateToPlanPath(res, 'child.child.child.children.0.child.children.1.leftChild');
+
+ assert.neq(leftNode.id,
+ rightNode.id,
+ `Expected different spool ids: ${tojson({leftNode, rightNode})}`);
}
{
@@ -209,4 +222,3 @@ assert.commandWorked(t.createIndex({a: 1, b: 1, c: 1, d: 1, e: 1}));
assertValueOnPlanPath("IndexScan", res, "child.leftChild.rightChild.nodeType");
assertValueOnPlanPath(false, res, "child.leftChild.rightChild.reversed");
}
-}());
diff --git a/jstests/cqf/redundant_conditions.js b/jstests/cqf/optimizer/redundant_conditions.js
similarity index 87%
rename from jstests/cqf/redundant_conditions.js
rename to jstests/cqf/optimizer/redundant_conditions.js
index 75db76e4e3011..3593969321d44 100644
--- a/jstests/cqf/redundant_conditions.js
+++ b/jstests/cqf/optimizer/redundant_conditions.js
@@ -1,13 +1,15 @@
/**
* Tests scenario related to SERVER-22857.
*/
-(function() {
-"use strict";
+import {
+ checkCascadesOptimizerEnabled,
+ removeUUIDsFromExplain,
+ runWithParams
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_redundant_condition;
@@ -39,5 +41,4 @@ PhysicalScan [{'': scan_0, 'a': evalTemp_2}, cqf_redundant_condition_]
`;
const actualStr = removeUUIDsFromExplain(db, res);
assert.eq(expectedStr, actualStr);
-}
-}());
+}
\ No newline at end of file
diff --git a/jstests/cqf/residual_pred_costing.js b/jstests/cqf/optimizer/residual_pred_costing.js
similarity index 87%
rename from jstests/cqf/residual_pred_costing.js
rename to jstests/cqf/optimizer/residual_pred_costing.js
index 387c5804b0a0c..6c98749265e26 100644
--- a/jstests/cqf/residual_pred_costing.js
+++ b/jstests/cqf/optimizer/residual_pred_costing.js
@@ -1,13 +1,16 @@
/**
* Tests scenario related to SERVER-21697.
*/
-(function() {
-"use strict";
+import {
+ assertValueOnPath,
+ checkCascadesOptimizerEnabled,
+ navigateToPlanPath,
+ runWithParams,
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_residual_pred_costing;
@@ -33,5 +36,4 @@ assert.eq(nDocs * 0.1, res.executionStats.nReturned);
// Demonstrate we can pick the indexing covering most fields.
const indexNode = navigateToPlanPath(res, "child.leftChild");
assertValueOnPath("IndexScan", indexNode, "nodeType");
-assertValueOnPath("a_1_b_1_c_1_d_1", indexNode, "indexDefName");
-}());
+assertValueOnPath("a_1_b_1_c_1_d_1", indexNode, "indexDefName");
\ No newline at end of file
diff --git a/jstests/cqf/sampling.js b/jstests/cqf/optimizer/sampling.js
similarity index 86%
rename from jstests/cqf/sampling.js
rename to jstests/cqf/optimizer/sampling.js
index 37dd0ae0e4411..fdd91cc17dd90 100644
--- a/jstests/cqf/sampling.js
+++ b/jstests/cqf/optimizer/sampling.js
@@ -1,10 +1,8 @@
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const coll = db.cqf_sampling;
@@ -28,4 +26,3 @@ const props = res.queryPlanner.winningPlan.optimizerPlan.properties;
// Verify the winning plan cardinality is within roughly 25% of the expected documents.
assert.lt(nDocs * 0.2 * 0.75, props.adjustedCE);
assert.gt(nDocs * 0.2 * 1.25, props.adjustedCE);
-}());
diff --git a/jstests/cqf/selective_index.js b/jstests/cqf/optimizer/selective_index.js
similarity index 81%
rename from jstests/cqf/selective_index.js
rename to jstests/cqf/optimizer/selective_index.js
index 42113655472be..7e482658c29b7 100644
--- a/jstests/cqf/selective_index.js
+++ b/jstests/cqf/optimizer/selective_index.js
@@ -1,13 +1,15 @@
/**
* Tests scenario related to SERVER-20616.
*/
-(function() {
-"use strict";
+import {
+ assertValueOnPath,
+ checkCascadesOptimizerEnabled,
+ navigateToPlanPath
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_selective_index;
@@ -30,5 +32,4 @@ assert.eq(1, res.executionStats.nReturned);
// Demonstrate we can pick index on "b".
const indexNode = navigateToPlanPath(res, "child.leftChild");
assertValueOnPath("IndexScan", indexNode, "nodeType");
-assertValueOnPath("b_1", indexNode, "indexDefName");
-}());
\ No newline at end of file
+assertValueOnPath("b_1", indexNode, "indexDefName");
\ No newline at end of file
diff --git a/jstests/cqf/sort.js b/jstests/cqf/optimizer/sort.js
similarity index 81%
rename from jstests/cqf/sort.js
rename to jstests/cqf/optimizer/sort.js
index 6d03b95c850df..f79fb45bc396c 100644
--- a/jstests/cqf/sort.js
+++ b/jstests/cqf/optimizer/sort.js
@@ -1,10 +1,8 @@
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_sort;
@@ -19,4 +17,3 @@ assert.commandWorked(t.insert({_id: 6, x: 4}));
const res = t.aggregate([{$unwind: '$x'}, {$sort: {'x': 1}}]).toArray();
assert.eq(4, res.length);
-}());
\ No newline at end of file
diff --git a/jstests/cqf/sort1.js b/jstests/cqf/optimizer/sort1.js
similarity index 89%
rename from jstests/cqf/sort1.js
rename to jstests/cqf/optimizer/sort1.js
index fce60b2f4ae43..335b5e8a70842 100644
--- a/jstests/cqf/sort1.js
+++ b/jstests/cqf/optimizer/sort1.js
@@ -1,10 +1,8 @@
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_sort1;
@@ -34,4 +32,3 @@ t.createIndex(index);
const resIndexScan = t.find(query).hint(index).sort({_id: 1}).toArray();
assert.eq(resCollScan, resIndexScan);
}
-}());
\ No newline at end of file
diff --git a/jstests/cqf/sort2.js b/jstests/cqf/optimizer/sort2.js
similarity index 80%
rename from jstests/cqf/sort2.js
rename to jstests/cqf/optimizer/sort2.js
index 7961d82320dea..5023808b407b4 100644
--- a/jstests/cqf/sort2.js
+++ b/jstests/cqf/optimizer/sort2.js
@@ -1,10 +1,8 @@
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_sort2;
@@ -22,4 +20,3 @@ t.createIndex({a: 1});
const resIndexScan = t.find({a: {$gte: 1}}).sort({a: -1}).hint({a: 1}).toArray();
assert.eq(resCollScan, resIndexScan);
}
-}());
\ No newline at end of file
diff --git a/jstests/cqf/sort3.js b/jstests/cqf/optimizer/sort3.js
similarity index 82%
rename from jstests/cqf/sort3.js
rename to jstests/cqf/optimizer/sort3.js
index 66a23b6beba10..59590cf48795c 100644
--- a/jstests/cqf/sort3.js
+++ b/jstests/cqf/optimizer/sort3.js
@@ -1,10 +1,8 @@
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_sort3;
@@ -23,4 +21,3 @@ t.createIndex({a: 1, b: -1});
const resIndexScan = t.find({a: {$gte: 2}}).sort({a: 1, b: -1}).hint({a: 1, b: -1}).toArray();
assert.eq(resCollScan, resIndexScan);
}
-}());
\ No newline at end of file
diff --git a/jstests/cqf/sort_compound_pred.js b/jstests/cqf/optimizer/sort_compound_pred.js
similarity index 94%
rename from jstests/cqf/sort_compound_pred.js
rename to jstests/cqf/optimizer/sort_compound_pred.js
index a983890b36eb2..f4cfd8b90162f 100644
--- a/jstests/cqf/sort_compound_pred.js
+++ b/jstests/cqf/optimizer/sort_compound_pred.js
@@ -1,10 +1,12 @@
-(function() {
-"use strict";
+import {
+ checkCascadesOptimizerEnabled,
+ removeUUIDsFromExplain,
+ runWithParams
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_sort_compound_pred;
@@ -72,4 +74,3 @@ Union [{disjunction_0, rid_1}]
IndexScan [{' 0': disjunction_0, '': rid_1}, scanDefName: cqf_sort_compound_pred_, indexDefName: a_1, interval: {=Const [1]}]
`);
}
-}());
diff --git a/jstests/cqf/sort_match.js b/jstests/cqf/optimizer/sort_match.js
similarity index 89%
rename from jstests/cqf/sort_match.js
rename to jstests/cqf/optimizer/sort_match.js
index 4d53ec5f66925..55a5ab4f219f0 100644
--- a/jstests/cqf/sort_match.js
+++ b/jstests/cqf/optimizer/sort_match.js
@@ -1,13 +1,16 @@
/**
* Tests scenario related to SERVER-12923.
*/
-(function() {
-"use strict";
+import {
+ assertValueOnPath,
+ checkCascadesOptimizerEnabled,
+ navigateToPlanPath,
+ runWithParams,
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_sort_match;
@@ -49,5 +52,4 @@ assert.commandWorked(t.createIndex({b: 1}));
}
prev = current;
}
-}
-}());
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/jstests/cqf/sort_project.js b/jstests/cqf/optimizer/sort_project.js
similarity index 96%
rename from jstests/cqf/sort_project.js
rename to jstests/cqf/optimizer/sort_project.js
index c3cd5b2b9dbac..5b413c87fc2ab 100644
--- a/jstests/cqf/sort_project.js
+++ b/jstests/cqf/optimizer/sort_project.js
@@ -1,10 +1,12 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled,
+ runWithParams
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
var coll = db.cqf_testCovIndxScan;
@@ -78,4 +80,3 @@ const nDocs = 20;
assert.eq(nDocs, res.executionStats.nReturned);
assertValueOnPlanPath("IndexScan", res, "child.child.nodeType");
}
-}());
diff --git a/jstests/cqf/sorted_merge.js b/jstests/cqf/optimizer/sorted_merge.js
similarity index 95%
rename from jstests/cqf/sorted_merge.js
rename to jstests/cqf/optimizer/sorted_merge.js
index 6c001e99b7807..5213f3717844f 100644
--- a/jstests/cqf/sorted_merge.js
+++ b/jstests/cqf/optimizer/sorted_merge.js
@@ -1,10 +1,12 @@
-(function() {
-"use strict";
+import {
+ checkCascadesOptimizerEnabled,
+ removeUUIDsFromExplain,
+ runWithParams
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_sorted_merge;
@@ -75,4 +77,3 @@ IndexScan [{'': rid_1}, scanDefName: cqf_sorted_merge_, indexDefName: a_1,
`;
assert.eq(removeUUIDsFromExplain(db, getExplain()), multikeyExplain);
testCorrectness();
-}());
\ No newline at end of file
diff --git a/jstests/cqf/type_bracket.js b/jstests/cqf/optimizer/type_bracket.js
similarity index 94%
rename from jstests/cqf/type_bracket.js
rename to jstests/cqf/optimizer/type_bracket.js
index 13abbc2a856c4..3fb986f80dbe7 100644
--- a/jstests/cqf/type_bracket.js
+++ b/jstests/cqf/optimizer/type_bracket.js
@@ -1,7 +1,4 @@
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
+import {assertValueOnPlanPath} from "jstests/libs/optimizer_utils.js";
const t = db.cqf_type_bracket;
t.drop();
@@ -55,4 +52,3 @@ assert.commandWorked(t.createIndex({a: 1}));
assert.eq(4, res.executionStats.nReturned);
assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType");
}
-}());
\ No newline at end of file
diff --git a/jstests/cqf/type_predicate.js b/jstests/cqf/optimizer/type_predicate.js
similarity index 82%
rename from jstests/cqf/type_predicate.js
rename to jstests/cqf/optimizer/type_predicate.js
index eb8de44b3f601..af80f58db75fa 100644
--- a/jstests/cqf/type_predicate.js
+++ b/jstests/cqf/optimizer/type_predicate.js
@@ -1,10 +1,8 @@
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_type_predicate;
@@ -23,4 +21,3 @@ for (let i = 0; i < 10; i++) {
const res = t.explain("executionStats").aggregate([{$match: {a: {$type: "double"}}}]);
assert.eq(10, res.executionStats.nReturned);
}
-}());
\ No newline at end of file
diff --git a/jstests/cqf/unionWith.js b/jstests/cqf/optimizer/unionWith.js
similarity index 93%
rename from jstests/cqf/unionWith.js
rename to jstests/cqf/optimizer/unionWith.js
index 63dedc9d75018..70aaf970899ef 100644
--- a/jstests/cqf/unionWith.js
+++ b/jstests/cqf/optimizer/unionWith.js
@@ -1,10 +1,8 @@
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
load("jstests/aggregation/extras/utils.js");
@@ -51,4 +49,3 @@ assert.eq([{_id: 0, a: 1}, {a: 2}], res);
res = collA.aggregate([{$unionWith: "collB"}, {$project: {_id: 0, a: 1}}]).toArray();
assert.eq(2, res.length);
assert.eq([{a: 1}, {a: 2}], res);
-}());
diff --git a/jstests/cqf/validate_internal_plan_with_rid_output.js b/jstests/cqf/optimizer/validate_internal_plan_with_rid_output.js
similarity index 87%
rename from jstests/cqf/validate_internal_plan_with_rid_output.js
rename to jstests/cqf/optimizer/validate_internal_plan_with_rid_output.js
index 37b1fe4d4875c..e94b278631692 100644
--- a/jstests/cqf/validate_internal_plan_with_rid_output.js
+++ b/jstests/cqf/optimizer/validate_internal_plan_with_rid_output.js
@@ -1,13 +1,11 @@
-(function() {
-"use strict";
-
// Validate that we can internally generate a special query which along with a document returns its
// RecordID.
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
+
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const coll = db.cqf_findone_rid;
@@ -30,4 +28,3 @@ try {
} finally {
db.cqf_findone_rid_view.drop();
}
-}());
diff --git a/jstests/cqf/value_elemMatch.js b/jstests/cqf/optimizer/value_elemMatch.js
similarity index 93%
rename from jstests/cqf/value_elemMatch.js
rename to jstests/cqf/optimizer/value_elemMatch.js
index 0afa14bb2d1e5..d57c3724bfa45 100644
--- a/jstests/cqf/value_elemMatch.js
+++ b/jstests/cqf/optimizer/value_elemMatch.js
@@ -1,7 +1,4 @@
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For assertValueOnPlanPath.
+import {assertValueOnPlanPath} from "jstests/libs/optimizer_utils.js";
const t = db.cqf_value_elemMatch;
t.drop();
@@ -42,4 +39,3 @@ assert.commandWorked(t.createIndex({a: 1}));
assert.eq(0, res.executionStats.nReturned);
assertValueOnPlanPath("CoScan", res, "child.child.child.nodeType");
}
-}());
diff --git a/jstests/cqf/value_elemmatch_exists.js b/jstests/cqf/optimizer/value_elemmatch_exists.js
similarity index 87%
rename from jstests/cqf/value_elemmatch_exists.js
rename to jstests/cqf/optimizer/value_elemmatch_exists.js
index ed0a3e44ef78a..375ad0b6e3818 100644
--- a/jstests/cqf/value_elemmatch_exists.js
+++ b/jstests/cqf/optimizer/value_elemmatch_exists.js
@@ -1,13 +1,11 @@
/**
* Tests scenario related to SERVER-74954.
*/
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_elemmatch_exists;
@@ -33,4 +31,3 @@ const res = t.find({
assert.eq(2, res.length);
assert.eq(1, res[0].a);
assert.eq(4, res[1].a);
-}());
diff --git a/jstests/cqf_parallel/basic_exchange.js b/jstests/cqf_parallel/optimizer/basic_exchange.js
similarity index 81%
rename from jstests/cqf_parallel/basic_exchange.js
rename to jstests/cqf_parallel/optimizer/basic_exchange.js
index 446357a966e1d..cf409bfe3dd1e 100644
--- a/jstests/cqf_parallel/basic_exchange.js
+++ b/jstests/cqf_parallel/optimizer/basic_exchange.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_exchange;
@@ -19,4 +20,3 @@ assert.commandWorked(t.insert({a: {b: 5}}));
const res = t.explain("executionStats").aggregate([{$match: {'a.b': 2}}]);
assert.eq(1, res.executionStats.nReturned);
assertValueOnPlanPath("Exchange", res, "child.nodeType");
-}());
diff --git a/jstests/cqf_parallel/groupby.js b/jstests/cqf_parallel/optimizer/groupby.js
similarity index 90%
rename from jstests/cqf_parallel/groupby.js
rename to jstests/cqf_parallel/optimizer/groupby.js
index 9b23fb1546c15..a6ddaae7e21fc 100644
--- a/jstests/cqf_parallel/groupby.js
+++ b/jstests/cqf_parallel/optimizer/groupby.js
@@ -1,10 +1,12 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled,
+ runWithParams
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const t = db.cqf_exchange;
@@ -36,4 +38,3 @@ assertValueOnPlanPath(
"UnknownPartitioning",
res,
"child.child.child.child.child.properties.physicalProperties.distribution.type");
-}());
diff --git a/jstests/cqf_parallel/index.js b/jstests/cqf_parallel/optimizer/index.js
similarity index 81%
rename from jstests/cqf_parallel/index.js
rename to jstests/cqf_parallel/optimizer/index.js
index d56727d5b77a1..c4bd0246a83b8 100644
--- a/jstests/cqf_parallel/index.js
+++ b/jstests/cqf_parallel/optimizer/index.js
@@ -1,10 +1,11 @@
-(function() {
-"use strict";
+import {
+ assertValueOnPlanPath,
+ checkCascadesOptimizerEnabled
+} from "jstests/libs/optimizer_utils.js";
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
- return;
+ quit();
}
const coll = db.cqf_parallel_index;
@@ -21,4 +22,3 @@ assert.commandWorked(coll.createIndex({a: 1}));
let res = coll.explain("executionStats").aggregate([{$match: {a: {$lt: 10}}}]);
assert.eq(10, res.executionStats.nReturned);
assertValueOnPlanPath("IndexScan", res, "child.child.leftChild.child.nodeType");
-}());
diff --git a/jstests/decimal/decimal128_test1.js b/jstests/decimal/decimal128_test1.js
index 6cf083341e46c..b38e6ec7c0b95 100644
--- a/jstests/decimal/decimal128_test1.js
+++ b/jstests/decimal/decimal128_test1.js
@@ -160,4 +160,4 @@ testData.forEach(function(testCase) {
assert.eq(output, `NumberDecimal("${testCase.input}")`);
}
});
-}());
\ No newline at end of file
+}());
diff --git a/jstests/decimal/decimal128_test2.js b/jstests/decimal/decimal128_test2.js
index ffbc37042259c..853af54876535 100644
--- a/jstests/decimal/decimal128_test2.js
+++ b/jstests/decimal/decimal128_test2.js
@@ -301,4 +301,4 @@ data.forEach(function(testCase) {
assert.eq(output, `NumberDecimal("${testCase.input}")`);
}
});
-}());
\ No newline at end of file
+}());
diff --git a/jstests/decimal/decimal128_test3.js b/jstests/decimal/decimal128_test3.js
index b50f3e45bd14f..4ba0c276e149c 100644
--- a/jstests/decimal/decimal128_test3.js
+++ b/jstests/decimal/decimal128_test3.js
@@ -578,4 +578,4 @@ data.forEach(function(testCase) {
assert.eq(output, `NumberDecimal("${testCase.input}")`);
}
});
-}());
\ No newline at end of file
+}());
diff --git a/jstests/decimal/decimal128_test4.js b/jstests/decimal/decimal128_test4.js
index 7ec4f14c303d8..8a945db386ba4 100644
--- a/jstests/decimal/decimal128_test4.js
+++ b/jstests/decimal/decimal128_test4.js
@@ -136,4 +136,4 @@ parseErrors.forEach(function(testCase) {
}
assert.throws(test, [], `[Test - ${testCase.description}] should have failed with error.`);
});
-}());
\ No newline at end of file
+}());
diff --git a/jstests/decimal/decimal128_test6.js b/jstests/decimal/decimal128_test6.js
index 07a52669e33b4..a66486b69df0d 100644
--- a/jstests/decimal/decimal128_test6.js
+++ b/jstests/decimal/decimal128_test6.js
@@ -46,4 +46,4 @@ parseErrors.forEach(function(testCase) {
}
assert.throws(test, [], `[Test - ${testCase.description}] should have failed with error.`);
});
-}());
\ No newline at end of file
+}());
diff --git a/jstests/decimal/decimal128_test7.js b/jstests/decimal/decimal128_test7.js
index d9ff5774ade84..0bf6c558ae590 100644
--- a/jstests/decimal/decimal128_test7.js
+++ b/jstests/decimal/decimal128_test7.js
@@ -415,4 +415,4 @@ parseErrors.forEach(function(testCase) {
}
assert.throws(test, [], `[Test - ${testCase.description}] should have failed with error.`);
});
-}());
\ No newline at end of file
+}());
diff --git a/jstests/disk/dbNoCreate.js b/jstests/disk/dbNoCreate.js
index 785d7473f26b2..2d48afad92ea0 100644
--- a/jstests/disk/dbNoCreate.js
+++ b/jstests/disk/dbNoCreate.js
@@ -14,4 +14,4 @@ MongoRunner.stopMongod(m);
m = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: m.dbpath});
assert.eq(
-1, m.getDBNames().indexOf(baseName), "found " + baseName + " in " + tojson(m.getDBNames()));
-MongoRunner.stopMongod(m);
\ No newline at end of file
+MongoRunner.stopMongod(m);
diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js
index 84183eae7dfd1..e6eb3271200a4 100644
--- a/jstests/disk/directoryperdb.js
+++ b/jstests/disk/directoryperdb.js
@@ -7,7 +7,7 @@ var storageEngine = "wiredTiger";
var dbFileMatcher = /(collection|index)-.+\.wt$/;
// Set up helper functions.
-assertDocumentCount = function(db, count) {
+let assertDocumentCount = function(db, count) {
assert.eq(count,
db[baseName].count(),
'Expected ' + count + ' documents in ' + db._name + '.' + baseName + '. ' +
@@ -42,7 +42,7 @@ const waitForDatabaseDirectoryRemoval = function(dbName, dbDirPath) {
/**
* Returns the current connection which gets restarted with wiredtiger.
*/
-checkDBFilesInDBDirectory = function(conn, dbToCheck) {
+let checkDBFilesInDBDirectory = function(conn, dbToCheck) {
MongoRunner.stopMongod(conn);
conn = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: '', restart: true});
@@ -58,9 +58,9 @@ checkDBFilesInDBDirectory = function(conn, dbToCheck) {
dir = dbpath + Array(22).join('.229.135.166');
}
- files = listFiles(dir);
+ let files = listFiles(dir);
var fileCount = 0;
- for (f in files) {
+ for (let f in files) {
if (files[f].isDirectory)
continue;
fileCount += 1;
@@ -74,13 +74,13 @@ checkDBFilesInDBDirectory = function(conn, dbToCheck) {
/**
* Returns the restarted connection with wiredtiger.
*/
-checkDBDirectoryNonexistent = function(conn, dbToCheck) {
+let checkDBDirectoryNonexistent = function(conn, dbToCheck) {
MongoRunner.stopMongod(conn);
conn = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: '', restart: true});
var files = listFiles(dbpath);
// Check that there are no files in the toplevel dbpath.
- for (f in files) {
+ for (let f in files) {
if (!files[f].isDirectory) {
assert(!dbFileMatcher.test(files[f].name),
'Database file' + files[f].name +
diff --git a/jstests/disk/libs/wt_file_helper.js b/jstests/disk/libs/wt_file_helper.js
index 6e819f889e776..dcf076d99befd 100644
--- a/jstests/disk/libs/wt_file_helper.js
+++ b/jstests/disk/libs/wt_file_helper.js
@@ -1,9 +1,9 @@
-load("jstests/libs/analyze_plan.js");
+import {getWinningPlan} from "jstests/libs/analyze_plan.js";
/**
* Get the URI of the wt collection file given the collection name.
*/
-let getUriForColl = function(coll) {
+export let getUriForColl = function(coll) {
assert(coll.exists()); // Collection must exist
return coll.stats().wiredTiger.uri.split("table:")[1];
};
@@ -11,7 +11,7 @@ let getUriForColl = function(coll) {
/**
* Get the URI of the wt index file given the collection name and the index name.
*/
-let getUriForIndex = function(coll, indexName) {
+export let getUriForIndex = function(coll, indexName) {
assert(coll.exists()); // Collection must exist
const ret = assert.commandWorked(coll.getDB().runCommand({collStats: coll.getName()}));
return ret.indexDetails[indexName].uri.split("table:")[1];
@@ -20,7 +20,7 @@ let getUriForIndex = function(coll, indexName) {
/**
* 'Corrupt' the file by replacing it with an empty file.
*/
-let corruptFile = function(file) {
+export let corruptFile = function(file) {
removeFile(file);
writeFile(file, "");
};
@@ -29,7 +29,7 @@ let corruptFile = function(file) {
* Starts a mongod on the provided data path without clearing data. Accepts 'options' as parameters
* to runMongod.
*/
-let startMongodOnExistingPath = function(dbpath, options) {
+export let startMongodOnExistingPath = function(dbpath, options) {
let args = {dbpath: dbpath, noCleanData: true};
for (let attr in options) {
if (options.hasOwnProperty(attr))
@@ -38,7 +38,7 @@ let startMongodOnExistingPath = function(dbpath, options) {
return MongoRunner.runMongod(args);
};
-let assertQueryUsesIndex = function(coll, query, indexName) {
+export let assertQueryUsesIndex = function(coll, query, indexName) {
let res = coll.find(query).explain();
assert.commandWorked(res);
@@ -50,7 +50,7 @@ let assertQueryUsesIndex = function(coll, query, indexName) {
/**
* Assert that running MongoDB with --repair on the provided dbpath exits cleanly.
*/
-let assertRepairSucceeds = function(dbpath, port, opts) {
+export let assertRepairSucceeds = function(dbpath, port, opts) {
let args = ["mongod", "--repair", "--port", port, "--dbpath", dbpath, "--bind_ip_all"];
for (let a in opts) {
if (opts.hasOwnProperty(a))
@@ -64,7 +64,7 @@ let assertRepairSucceeds = function(dbpath, port, opts) {
assert.eq(0, runMongoProgram.apply(this, args));
};
-let assertRepairFailsWithFailpoint = function(dbpath, port, failpoint) {
+export let assertRepairFailsWithFailpoint = function(dbpath, port, failpoint) {
const param = "failpoint." + failpoint + "={'mode': 'alwaysOn'}";
jsTestLog("The node should fail to complete repair with --setParameter " + param);
@@ -77,7 +77,7 @@ let assertRepairFailsWithFailpoint = function(dbpath, port, failpoint) {
/**
* Asserts that running MongoDB with --repair on the provided dbpath fails.
*/
-let assertRepairFails = function(dbpath, port) {
+export let assertRepairFails = function(dbpath, port) {
jsTestLog("The node should complete repairing the node but fails.");
assert.neq(0, runMongoProgram("mongod", "--repair", "--port", port, "--dbpath", dbpath));
@@ -87,7 +87,7 @@ let assertRepairFails = function(dbpath, port) {
* Assert that starting MongoDB with --replSet on an existing data path exits with a specific
* error.
*/
-let assertErrorOnStartupWhenStartingAsReplSet = function(dbpath, port, rsName) {
+export let assertErrorOnStartupWhenStartingAsReplSet = function(dbpath, port, rsName) {
jsTestLog("The repaired node should fail to start up with the --replSet option");
clearRawMongoProgramOutput();
@@ -103,7 +103,7 @@ let assertErrorOnStartupWhenStartingAsReplSet = function(dbpath, port, rsName) {
* Assert that starting MongoDB as a standalone on an existing data path exits with a specific
* error because the previous repair failed.
*/
-let assertErrorOnStartupAfterIncompleteRepair = function(dbpath, port) {
+export let assertErrorOnStartupAfterIncompleteRepair = function(dbpath, port) {
jsTestLog("The node should fail to start up because a previous repair did not complete");
clearRawMongoProgramOutput();
@@ -119,7 +119,7 @@ let assertErrorOnStartupAfterIncompleteRepair = function(dbpath, port) {
* Assert that starting MongoDB as a standalone on an existing data path succeeds. Uses a provided
* testFunc to run any caller-provided checks on the started node.
*/
-let assertStartAndStopStandaloneOnExistingDbpath = function(dbpath, port, testFunc) {
+export let assertStartAndStopStandaloneOnExistingDbpath = function(dbpath, port, testFunc) {
jsTestLog("The repaired node should start up and serve reads as a standalone");
let node = MongoRunner.runMongod({dbpath: dbpath, port: port, noCleanData: true});
assert(node);
@@ -133,7 +133,8 @@ let assertStartAndStopStandaloneOnExistingDbpath = function(dbpath, port, testFu
*
* Returns the started node.
*/
-let assertStartInReplSet = function(replSet, originalNode, cleanData, expectResync, testFunc) {
+export let assertStartInReplSet = function(
+ replSet, originalNode, cleanData, expectResync, testFunc) {
jsTestLog("The node should rejoin the replica set. Clean data: " + cleanData +
". Expect resync: " + expectResync);
// Skip clearing initial sync progress after a successful initial sync attempt so that we
@@ -166,7 +167,7 @@ let assertStartInReplSet = function(replSet, originalNode, cleanData, expectResy
/**
* Assert certain error messages are thrown on startup when files are missing or corrupt.
*/
-let assertErrorOnStartupWhenFilesAreCorruptOrMissing = function(
+export let assertErrorOnStartupWhenFilesAreCorruptOrMissing = function(
dbpath, dbName, collName, deleteOrCorruptFunc, errmsgRegExp) {
// Start a MongoDB instance, create the collection file.
const mongod = MongoRunner.runMongod({dbpath: dbpath, cleanData: true});
@@ -187,11 +188,11 @@ let assertErrorOnStartupWhenFilesAreCorruptOrMissing = function(
/**
* Assert certain error messages are thrown on a specific request when files are missing or corrupt.
*/
-let assertErrorOnRequestWhenFilesAreCorruptOrMissing = function(
+export let assertErrorOnRequestWhenFilesAreCorruptOrMissing = function(
dbpath, dbName, collName, deleteOrCorruptFunc, requestFunc, errmsgRegExp) {
// Start a MongoDB instance, create the collection file.
- mongod = MongoRunner.runMongod({dbpath: dbpath, cleanData: true});
- testColl = mongod.getDB(dbName)[collName];
+ let mongod = MongoRunner.runMongod({dbpath: dbpath, cleanData: true});
+ let testColl = mongod.getDB(dbName)[collName];
const doc = {a: 1};
assert.commandWorked(testColl.insert(doc));
@@ -220,7 +221,7 @@ let assertErrorOnRequestWhenFilesAreCorruptOrMissing = function(
/**
* Runs the WiredTiger tool with the provided arguments.
*/
-let runWiredTigerTool = function(...args) {
+export let runWiredTigerTool = function(...args) {
const cmd = ['wt'].concat(args);
// TODO (SERVER-67632): Check the return code on Windows variants again.
if (_isWindows()) {
@@ -234,7 +235,7 @@ let runWiredTigerTool = function(...args) {
* Stops the given mongod, runs the truncate command on the given uri using the WiredTiger tool, and
* starts mongod again on the same path.
*/
-let truncateUriAndRestartMongod = function(uri, conn, mongodOptions) {
+export let truncateUriAndRestartMongod = function(uri, conn, mongodOptions) {
MongoRunner.stopMongod(conn, null, {skipValidation: true});
runWiredTigerTool("-h", conn.dbpath, "truncate", uri);
return startMongodOnExistingPath(conn.dbpath, mongodOptions);
@@ -243,7 +244,7 @@ let truncateUriAndRestartMongod = function(uri, conn, mongodOptions) {
/**
* Stops the given mongod and runs the alter command to modify the index table's metadata.
*/
-let alterIndexFormatVersion = function(uri, conn, formatVersion) {
+export let alterIndexFormatVersion = function(uri, conn, formatVersion) {
MongoRunner.stopMongod(conn, null, {skipValidation: true});
runWiredTigerTool(
"-h",
@@ -257,8 +258,9 @@ let alterIndexFormatVersion = function(uri, conn, formatVersion) {
* Stops the given mongod, dumps the table with the uri, modifies the content, and loads it back to
* the table.
*/
-let count = 0;
-let rewriteTable = function(uri, conn, modifyData) {
+export let count = 0;
+
+export let rewriteTable = function(uri, conn, modifyData) {
MongoRunner.stopMongod(conn, null, {skipValidation: true});
const separator = _isWindows() ? '\\' : '/';
const tempDumpFile = conn.dbpath + separator + "temp_dump";
@@ -281,12 +283,12 @@ let rewriteTable = function(uri, conn, modifyData) {
// In WiredTiger table dumps, the first seven lines are the header and key that we don't want to
// modify. We will skip them and start from the line containing the first value.
-const wtHeaderLines = 7;
+export const wtHeaderLines = 7;
/**
* Inserts the documents with duplicate field names into the MongoDB server.
*/
-let insertDocDuplicateFieldName = function(coll, uri, conn, numDocs) {
+export let insertDocDuplicateFieldName = function(coll, uri, conn, numDocs) {
for (let i = 0; i < numDocs; ++i) {
coll.insert({a: "aaaaaaa", b: "bbbbbbb"});
}
@@ -304,7 +306,7 @@ let insertDocDuplicateFieldName = function(coll, uri, conn, numDocs) {
rewriteTable(uri, conn, makeDuplicateFieldNames);
};
-let insertDocSymbolField = function(coll, uri, conn, numDocs) {
+export let insertDocSymbolField = function(coll, uri, conn, numDocs) {
for (let i = 0; i < numDocs; ++i) {
coll.insert({a: "aaaaaaa"});
}
@@ -324,7 +326,7 @@ let insertDocSymbolField = function(coll, uri, conn, numDocs) {
/**
* Inserts array document with non-sequential indexes into the MongoDB server.
*/
-let insertNonSequentialArrayIndexes = function(coll, uri, conn, numDocs) {
+export let insertNonSequentialArrayIndexes = function(coll, uri, conn, numDocs) {
for (let i = 0; i < numDocs; ++i) {
coll.insert({arr: [1, 2, [1, [1, 2], 2], 3]});
}
@@ -343,7 +345,7 @@ let insertNonSequentialArrayIndexes = function(coll, uri, conn, numDocs) {
/**
* Inserts documents with invalid regex options into the MongoDB server.
*/
-let insertInvalidRegex = function(coll, mongod, nDocuments) {
+export let insertInvalidRegex = function(coll, mongod, nDocuments) {
const regex = "a*.conn";
const options = 'gimsuy';
@@ -377,7 +379,7 @@ let insertInvalidRegex = function(coll, mongod, nDocuments) {
/**
* Inserts document with invalid UTF-8 string into the MongoDB server.
*/
-let insertInvalidUTF8 = function(coll, uri, conn, numDocs) {
+export let insertInvalidUTF8 = function(coll, uri, conn, numDocs) {
for (let i = 0; i < numDocs; ++i) {
coll.insert({validString: "\x70"});
}
@@ -392,4 +394,4 @@ let insertInvalidUTF8 = function(coll, uri, conn, numDocs) {
}
};
rewriteTable(uri, conn, makeInvalidUTF8);
-};
\ No newline at end of file
+};
diff --git a/jstests/disk/repair_clustered_collection.js b/jstests/disk/repair_clustered_collection.js
index 001ac2dbeb241..8a227033c5dea 100644
--- a/jstests/disk/repair_clustered_collection.js
+++ b/jstests/disk/repair_clustered_collection.js
@@ -4,9 +4,12 @@
*
* @tags: [requires_wiredtiger]
*/
-(function() {
+import {
+ assertRepairSucceeds,
+ getUriForColl,
+ startMongodOnExistingPath
+} from "jstests/disk/libs/wt_file_helper.js";
-load('jstests/disk/libs/wt_file_helper.js');
load("jstests/libs/collection_drop_recreate.js");
const dbName = jsTestName();
@@ -55,7 +58,7 @@ const runRepairTest = function runRepairTestOnMongoDInstance(
// Ensure the orphaned collection is valid and the document is preserved.
const orphanedImportantCollName = "orphan." + testCollUri.replace(/-/g, "_");
const localDb = mongod.getDB("local");
- orphanedCollection = localDb[orphanedImportantCollName];
+ let orphanedCollection = localDb[orphanedImportantCollName];
assert(orphanedCollection.exists());
assert.eq(orphanedCollection.count(expectedOrphanDoc),
1,
@@ -83,5 +86,4 @@ docToInsert = {
"timestamp": ISODate("2021-05-18T00:00:00.000Z"),
"temp": 12
};
-runRepairTest(clusteredCollOptions, docToInsert, isTimeseries);
-})();
+runRepairTest(clusteredCollOptions, docToInsert, isTimeseries);
\ No newline at end of file
diff --git a/jstests/disk/repair_corrupt_document.js b/jstests/disk/repair_corrupt_document.js
index 428bbf247566d..ffc5198ced560 100644
--- a/jstests/disk/repair_corrupt_document.js
+++ b/jstests/disk/repair_corrupt_document.js
@@ -2,9 +2,12 @@
* Tests that --repair deletes corrupt BSON documents.
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {
+ assertQueryUsesIndex,
+ assertRepairSucceeds,
+ getUriForIndex,
+ startMongodOnExistingPath,
+} from "jstests/disk/libs/wt_file_helper.js";
const baseName = "repair_corrupt_document";
const collName = "test";
@@ -83,5 +86,4 @@ let corruptDocumentOnInsert = function(db, coll) {
MongoRunner.stopMongod(mongod);
jsTestLog("Exiting runValidateWithRepairMode.");
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/disk/repair_does_not_invalidate_config_on_standalone.js b/jstests/disk/repair_does_not_invalidate_config_on_standalone.js
index 3560ce5331124..1b7d95ebedc17 100644
--- a/jstests/disk/repair_does_not_invalidate_config_on_standalone.js
+++ b/jstests/disk/repair_does_not_invalidate_config_on_standalone.js
@@ -4,9 +4,11 @@
* @tags: [requires_wiredtiger]
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {
+ assertRepairSucceeds,
+ assertStartAndStopStandaloneOnExistingDbpath,
+ getUriForColl,
+} from "jstests/disk/libs/wt_file_helper.js";
const baseName = "repair_does_not_invalidate_config_on_standalone";
const dbName = baseName;
@@ -40,5 +42,4 @@ assertStartAndStopStandaloneOnExistingDbpath(dbpath, port, function(node) {
assert.eq(nodeDB[collName].find().itcount(), 0);
assert(!nodeDB.getSiblingDB("local")["system.replset"].exists());
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/disk/repair_duplicate_keys.js b/jstests/disk/repair_duplicate_keys.js
index 4b639b9b7ba2d..9e8bd5da2bc70 100644
--- a/jstests/disk/repair_duplicate_keys.js
+++ b/jstests/disk/repair_duplicate_keys.js
@@ -5,9 +5,12 @@
* @tags: [requires_wiredtiger]
*/
-(function() {
+import {
+ assertQueryUsesIndex,
+ assertRepairSucceeds,
+ startMongodOnExistingPath
+} from "jstests/disk/libs/wt_file_helper.js";
-load('jstests/disk/libs/wt_file_helper.js');
load("jstests/libs/uuid_util.js");
const baseName = "repair_duplicate_keys";
@@ -255,5 +258,4 @@ runRepairAndVerifyCollectionDocs();
MongoRunner.stopMongod(mongod);
jsTestLog("Exiting checkLostAndFoundCollForDoubleDup.");
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/disk/repair_failure_is_recoverable.js b/jstests/disk/repair_failure_is_recoverable.js
index 2ede4bfe36db0..4b5a693f5e3d8 100644
--- a/jstests/disk/repair_failure_is_recoverable.js
+++ b/jstests/disk/repair_failure_is_recoverable.js
@@ -5,9 +5,12 @@
* This is not storage-engine specific.
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {
+ assertErrorOnStartupAfterIncompleteRepair,
+ assertRepairFailsWithFailpoint,
+ assertRepairSucceeds,
+ assertStartAndStopStandaloneOnExistingDbpath,
+} from "jstests/disk/libs/wt_file_helper.js";
const exitBeforeRepairParameter = "exitBeforeDataRepair";
const exitBeforeRepairInvalidatesConfigParameter = "exitBeforeRepairInvalidatesConfig";
@@ -58,5 +61,4 @@ assertStartAndStopStandaloneOnExistingDbpath(dbpath, port, function(node) {
let nodeDB = node.getDB(dbName);
assert(nodeDB[collName].exists());
assert.eq(nodeDB[collName].find().itcount(), 1);
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/disk/repair_index_format_version.js b/jstests/disk/repair_index_format_version.js
index 46922a98b0904..c18a29f962494 100644
--- a/jstests/disk/repair_index_format_version.js
+++ b/jstests/disk/repair_index_format_version.js
@@ -2,9 +2,11 @@
* Tests that mismatch of index type and index format version will be resolved during startup.
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {
+ alterIndexFormatVersion,
+ getUriForIndex,
+ startMongodOnExistingPath
+} from "jstests/disk/libs/wt_file_helper.js";
const baseName = "repair_index_format_version";
const collNamePrefix = "test_";
@@ -43,5 +45,4 @@ alterIndexFormatVersion(uri, mongod, 14);
mongod = startMongodOnExistingPath(dbpath);
checkLog.containsJson(mongod, 6818600);
-MongoRunner.stopMongod(mongod, null, {skipValidation: true});
-})();
+MongoRunner.stopMongod(mongod, null, {skipValidation: true});
\ No newline at end of file
diff --git a/jstests/disk/repair_invalidates_replica_set_config.js b/jstests/disk/repair_invalidates_replica_set_config.js
index 8b3745f5d778a..256317a3e96d8 100644
--- a/jstests/disk/repair_invalidates_replica_set_config.js
+++ b/jstests/disk/repair_invalidates_replica_set_config.js
@@ -5,9 +5,13 @@
* @tags: [requires_wiredtiger, requires_replication]
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {
+ assertErrorOnStartupWhenStartingAsReplSet,
+ assertRepairSucceeds,
+ assertStartAndStopStandaloneOnExistingDbpath,
+ assertStartInReplSet,
+ getUriForColl,
+} from "jstests/disk/libs/wt_file_helper.js";
// This test triggers an unclean shutdown (an fassert), which may cause inaccurate fast counts.
TestData.skipEnforceFastCountOnValidate = true;
@@ -123,5 +127,4 @@ secondary = assertStartInReplSet(
assert.eq(nodeDB[collName].find().itcount(), 1);
});
-replSet.stopSet();
-})();
+replSet.stopSet();
\ No newline at end of file
diff --git a/jstests/disk/repair_unfinished_indexes.js b/jstests/disk/repair_unfinished_indexes.js
index 0f2a84fc68ab2..ae0e79b1aeff5 100644
--- a/jstests/disk/repair_unfinished_indexes.js
+++ b/jstests/disk/repair_unfinished_indexes.js
@@ -5,9 +5,15 @@
* @tags: [requires_wiredtiger, requires_replication]
*/
-(function() {
+import {
+ assertErrorOnStartupWhenStartingAsReplSet,
+ assertRepairSucceeds,
+ assertStartInReplSet,
+ corruptFile,
+ getUriForColl,
+ startMongodOnExistingPath,
+} from "jstests/disk/libs/wt_file_helper.js";
-load('jstests/disk/libs/wt_file_helper.js');
load('jstests/noPassthrough/libs/index_build.js');
const dbName = "repair_unfinished_indexes";
@@ -91,5 +97,4 @@ assertErrorOnStartupWhenStartingAsReplSet(
newSecondary.getDB(dbName).getCollection(collName), 2, ["_id_", "a_1"]);
})();
-replSet.stopSet();
-})();
+replSet.stopSet();
\ No newline at end of file
diff --git a/jstests/disk/validate_bson_inconsistency.js b/jstests/disk/validate_bson_inconsistency.js
index 7f23e3ff8c5fe..5740e3a1e5db4 100644
--- a/jstests/disk/validate_bson_inconsistency.js
+++ b/jstests/disk/validate_bson_inconsistency.js
@@ -4,9 +4,15 @@
* @tags: [requires_fcv_62]
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {
+ getUriForColl,
+ insertDocDuplicateFieldName,
+ insertDocSymbolField,
+ insertInvalidRegex,
+ insertInvalidUTF8,
+ insertNonSequentialArrayIndexes,
+ startMongodOnExistingPath,
+} from "jstests/disk/libs/wt_file_helper.js";
const baseName = "validate_bson_inconsistency";
const collNamePrefix = "test_";
@@ -236,7 +242,7 @@ resetDbpath(dbpath);
db = mongod.getDB(baseName);
testColl = db[collName];
- res = assert.commandWorked(testColl.validate());
+ let res = assert.commandWorked(testColl.validate());
assert(res.valid, tojson(res));
assert.eq(res.nNonCompliantDocuments, 10);
assert.eq(res.warnings.length, 1);
@@ -266,7 +272,7 @@ resetDbpath(dbpath);
db = mongod.getDB(baseName);
testColl = db[collName];
- res = assert.commandWorked(testColl.validate());
+ let res = assert.commandWorked(testColl.validate());
assert(res.valid, tojson(res));
assert.eq(res.nNonCompliantDocuments, 0);
assert.eq(res.warnings.length, 0);
@@ -319,5 +325,4 @@ resetDbpath(dbpath);
assert.eq(res.warnings.length, 1);
MongoRunner.stopMongod(mongod, null, {skipValidation: true});
-})();
-})();
+})();
\ No newline at end of file
diff --git a/jstests/disk/wt_corrupt_file_errors.js b/jstests/disk/wt_corrupt_file_errors.js
index bd799a992ea99..46cb462125fd7 100644
--- a/jstests/disk/wt_corrupt_file_errors.js
+++ b/jstests/disk/wt_corrupt_file_errors.js
@@ -4,9 +4,13 @@
* @tags: [requires_wiredtiger]
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {
+ assertErrorOnRequestWhenFilesAreCorruptOrMissing,
+ assertErrorOnStartupWhenFilesAreCorruptOrMissing,
+ corruptFile,
+ getUriForColl,
+ getUriForIndex,
+} from "jstests/disk/libs/wt_file_helper.js";
const baseName = "wt_corrupt_file_errors";
const collName = "test";
@@ -80,5 +84,4 @@ assertErrorOnRequestWhenFilesAreCorruptOrMissing(
testColl.insert({a: 1});
});
},
- new RegExp("Fatal assertion.*50882"));
-})();
+ new RegExp("Fatal assertion.*50882"));
\ No newline at end of file
diff --git a/jstests/disk/wt_missing_file_errors.js b/jstests/disk/wt_missing_file_errors.js
index 85310fa82e901..e71220d7352ec 100644
--- a/jstests/disk/wt_missing_file_errors.js
+++ b/jstests/disk/wt_missing_file_errors.js
@@ -4,9 +4,12 @@
* @tags: [requires_wiredtiger]
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {
+ assertErrorOnRequestWhenFilesAreCorruptOrMissing,
+ assertErrorOnStartupWhenFilesAreCorruptOrMissing,
+ getUriForColl,
+ getUriForIndex,
+} from "jstests/disk/libs/wt_file_helper.js";
const baseName = "wt_missing_file_errors";
const collName = "test";
@@ -80,5 +83,4 @@ assertErrorOnRequestWhenFilesAreCorruptOrMissing(
testColl.insert({a: 1});
});
},
- new RegExp("Fatal assertion.*50883"));
-})();
+ new RegExp("Fatal assertion.*50883"));
\ No newline at end of file
diff --git a/jstests/disk/wt_repair_corrupt_files.js b/jstests/disk/wt_repair_corrupt_files.js
index 2c0dcd7c67543..4968fc87f3f5d 100644
--- a/jstests/disk/wt_repair_corrupt_files.js
+++ b/jstests/disk/wt_repair_corrupt_files.js
@@ -5,9 +5,16 @@
* @tags: [requires_wiredtiger]
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {
+ assertErrorOnStartupAfterIncompleteRepair,
+ assertQueryUsesIndex,
+ assertRepairFails,
+ assertRepairSucceeds,
+ corruptFile,
+ getUriForColl,
+ getUriForIndex,
+ startMongodOnExistingPath,
+} from "jstests/disk/libs/wt_file_helper.js";
const baseName = "wt_repair_corrupt_files";
const collName = "test";
@@ -169,5 +176,4 @@ let runTest = function(mongodOptions) {
runTest({});
runTest({directoryperdb: ""});
-runTest({wiredTigerDirectoryForIndexes: ""});
-})();
+runTest({wiredTigerDirectoryForIndexes: ""});
\ No newline at end of file
diff --git a/jstests/disk/wt_repair_corrupt_metadata.js b/jstests/disk/wt_repair_corrupt_metadata.js
index 6e529bf5d90d9..0ef7eb8df0e49 100644
--- a/jstests/disk/wt_repair_corrupt_metadata.js
+++ b/jstests/disk/wt_repair_corrupt_metadata.js
@@ -5,9 +5,7 @@
* @tags: [requires_wiredtiger]
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {assertRepairSucceeds, startMongodOnExistingPath} from "jstests/disk/libs/wt_file_helper.js";
const baseName = "wt_repair_corrupt_metadata";
const collName = "test";
@@ -99,5 +97,4 @@ let runTest = function(mongodOptions) {
MongoRunner.stopMongod(mongod);
};
-runTest({});
-})();
+runTest({});
\ No newline at end of file
diff --git a/jstests/disk/wt_repair_inconsistent_index.js b/jstests/disk/wt_repair_inconsistent_index.js
index f11e03a5e0502..f409bd13decab 100644
--- a/jstests/disk/wt_repair_inconsistent_index.js
+++ b/jstests/disk/wt_repair_inconsistent_index.js
@@ -4,9 +4,14 @@
* @tags: [requires_wiredtiger]
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {
+ assertQueryUsesIndex,
+ assertRepairSucceeds,
+ getUriForColl,
+ getUriForIndex,
+ startMongodOnExistingPath,
+ truncateUriAndRestartMongod,
+} from "jstests/disk/libs/wt_file_helper.js";
const baseName = "wt_repair_inconsistent_index";
const collName = "test";
@@ -114,5 +119,4 @@ let runTest = function(mongodOptions) {
runTest({});
runTest({directoryperdb: ""});
-runTest({wiredTigerDirectoryForIndexes: ""});
-})();
+runTest({wiredTigerDirectoryForIndexes: ""});
\ No newline at end of file
diff --git a/jstests/disk/wt_repair_missing_files.js b/jstests/disk/wt_repair_missing_files.js
index 7f3b8ce42d061..a97da2bfbad02 100644
--- a/jstests/disk/wt_repair_missing_files.js
+++ b/jstests/disk/wt_repair_missing_files.js
@@ -5,9 +5,13 @@
* @tags: [requires_wiredtiger]
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {
+ assertQueryUsesIndex,
+ assertRepairSucceeds,
+ getUriForColl,
+ getUriForIndex,
+ startMongodOnExistingPath,
+} from "jstests/disk/libs/wt_file_helper.js";
const baseName = "wt_repair_missing_files";
const collName = "test";
@@ -167,5 +171,4 @@ testColl = mongod.getDB(baseName)[collName];
assert.eq(testColl.find(doc).itcount(), 1);
assert.eq(testColl.count(), 1);
-MongoRunner.stopMongod(mongod);
-})();
+MongoRunner.stopMongod(mongod);
\ No newline at end of file
diff --git a/jstests/disk/wt_repair_orphaned_idents.js b/jstests/disk/wt_repair_orphaned_idents.js
index 83d3bfee42473..606e2269f5553 100644
--- a/jstests/disk/wt_repair_orphaned_idents.js
+++ b/jstests/disk/wt_repair_orphaned_idents.js
@@ -4,9 +4,7 @@
* @tags: [requires_wiredtiger]
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {getUriForColl} from "jstests/disk/libs/wt_file_helper.js";
const baseName = "wt_repair_orphaned_idents";
const dbpath = MongoRunner.dataPath + baseName + "/";
@@ -82,5 +80,4 @@ for (let entry of res.cursor.firstBatch) {
assert(testDb[collName].drop());
}
-MongoRunner.stopMongod(mongod);
-})();
+MongoRunner.stopMongod(mongod);
\ No newline at end of file
diff --git a/jstests/disk/wt_startup_with_missing_user_collection.js b/jstests/disk/wt_startup_with_missing_user_collection.js
index 23752fa20ba0e..f48cbc9f2f500 100644
--- a/jstests/disk/wt_startup_with_missing_user_collection.js
+++ b/jstests/disk/wt_startup_with_missing_user_collection.js
@@ -4,9 +4,12 @@
*
* @tags: [requires_wiredtiger]
*/
-(function() {
+import {
+ getUriForColl,
+ getUriForIndex,
+ startMongodOnExistingPath
+} from "jstests/disk/libs/wt_file_helper.js";
-load('jstests/disk/libs/wt_file_helper.js');
load('jstests/noPassthrough/libs/index_build.js');
// This test triggers an unclean shutdown (an fassert), which may cause inaccurate fast counts.
@@ -74,5 +77,4 @@ assert.neq(null, mongod, "Failed to start");
testDB = mongod.getDB(dbName);
assert(testDB.getCollection("a").drop());
-MongoRunner.stopMongod(mongod);
-}());
+MongoRunner.stopMongod(mongod);
\ No newline at end of file
diff --git a/jstests/disk/wt_table_checks.js b/jstests/disk/wt_table_checks.js
index cb8003c92811b..ce30ea921aae6 100644
--- a/jstests/disk/wt_table_checks.js
+++ b/jstests/disk/wt_table_checks.js
@@ -4,9 +4,7 @@
*
* @tags: [requires_wiredtiger]
*/
-(function() {
-
-load('jstests/disk/libs/wt_file_helper.js');
+import {startMongodOnExistingPath} from "jstests/disk/libs/wt_file_helper.js";
function checkTableLogSettings(conn, enabled) {
conn.getDBNames().forEach(function(d) {
@@ -40,7 +38,7 @@ function checkTableLogSettings(conn, enabled) {
function checkTableChecksFileRemoved(dbpath) {
let files = listFiles(dbpath);
- for (file of files) {
+ for (let file of files) {
assert.eq(false, file.name.includes("_wt_table_checks"));
}
}
@@ -123,5 +121,4 @@ checkLog.containsJson(conn, 22432);
// Skipping table logging checks.
assert(checkLog.checkContainsWithCountJson(conn, 5548302, undefined, 0));
checkTableLogSettings(conn, /*enabled=*/ true);
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/disk/wt_validate_table_logging.js b/jstests/disk/wt_validate_table_logging.js
index 939280feeb8ea..7659075266e8e 100644
--- a/jstests/disk/wt_validate_table_logging.js
+++ b/jstests/disk/wt_validate_table_logging.js
@@ -5,10 +5,7 @@
* requires_wiredtiger,
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
let conn = MongoRunner.runMongod();
@@ -92,5 +89,4 @@ if (csiEnabled) {
{index: '$**_columnstore', uri: indexUri(conn, '$**_columnstore'), expected: true});
}
-MongoRunner.stopMongod(conn, null, {skipValidation: true});
-}());
+MongoRunner.stopMongod(conn, null, {skipValidation: true});
\ No newline at end of file
diff --git a/jstests/fle2/convert_encrypted_to_capped.js b/jstests/fle2/convert_encrypted_to_capped.js
index 17be6f78c53cb..e977f2e5bf545 100644
--- a/jstests/fle2/convert_encrypted_to_capped.js
+++ b/jstests/fle2/convert_encrypted_to_capped.js
@@ -8,13 +8,10 @@
* requires_fcv_70
* ]
*/
-load("jstests/fle2/libs/encrypted_client_util.js");
-
-(function() {
-'use strict';
+import {isFLE2ReplicationEnabled} from "jstests/fle2/libs/encrypted_client_util.js";
if (!isFLE2ReplicationEnabled()) {
- return;
+ quit();
}
const dbTest = db.getSiblingDB('convert_encrypted_to_capped_db');
@@ -42,4 +39,3 @@ assert.commandFailedWithCode(
dbTest.runCommand({cloneCollectionAsCapped: "basic", toCollection: "capped", size: 100000}),
6367302,
"Clone encrypted collection as capped passed");
-}());
diff --git a/jstests/fle2/create_encrypted_collection.js b/jstests/fle2/create_encrypted_collection.js
index b186f8cb7591a..6308abbb6db9e 100644
--- a/jstests/fle2/create_encrypted_collection.js
+++ b/jstests/fle2/create_encrypted_collection.js
@@ -6,11 +6,6 @@
* requires_fcv_70
* ]
*/
-load("jstests/fle2/libs/encrypted_client_util.js");
-
-(function() {
-'use strict';
-
let dbTest = db.getSiblingDB('create_encrypted_collection_db');
dbTest.dropDatabase();
@@ -85,5 +80,4 @@ assert.commandWorked(dbTest.createCollection("basic_int32_cf", {
"queries": {"queryType": "equality", contention: NumberInt(123)}
}]
}
-}));
-}());
+}));
\ No newline at end of file
diff --git a/jstests/fle2/create_encrypted_indexes.js b/jstests/fle2/create_encrypted_indexes.js
index ce16988325e01..b1b0dd52ee274 100644
--- a/jstests/fle2/create_encrypted_indexes.js
+++ b/jstests/fle2/create_encrypted_indexes.js
@@ -6,11 +6,6 @@
* requires_fcv_70
* ]
*/
-load("jstests/fle2/libs/encrypted_client_util.js");
-
-(function() {
-'use strict';
-
let dbTest = db.getSiblingDB('create_encrypted_indexes_db');
dbTest.basic.drop();
@@ -73,5 +68,4 @@ assert.commandFailedWithCode(res, 6346502, "Create compound index on encrypted f
assert.commandWorked(dbTest.basic.createIndex({"paymentMethods.creditCards.notNumber": 1}));
// A wildcard index on the entire document is allowed.
-assert.commandWorked(dbTest.basic.createIndex({"$**": 1}));
-}());
+assert.commandWorked(dbTest.basic.createIndex({"$**": 1}));
\ No newline at end of file
diff --git a/jstests/fle2/libs/encrypted_client_util.js b/jstests/fle2/libs/encrypted_client_util.js
index a74099a2704a7..7bf8d0b81c2fc 100644
--- a/jstests/fle2/libs/encrypted_client_util.js
+++ b/jstests/fle2/libs/encrypted_client_util.js
@@ -1,13 +1,13 @@
load("jstests/concurrency/fsm_workload_helpers/server_types.js"); // For isMongos.
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
/**
* Create a FLE client that has an unencrypted and encrypted client to the same database
*/
-var kSafeContentField = "__safeContent__";
+export var kSafeContentField = "__safeContent__";
-var EncryptedClient = class {
+export var EncryptedClient = class {
/**
* Create a new encrypted FLE connection to the target server with a local KMS
*
@@ -21,6 +21,14 @@ var EncryptedClient = class {
// use
this.useImplicitSharding = !(typeof (ImplicitlyShardAccessCollSettings) === "undefined");
+ if (conn.isAutoEncryptionEnabled()) {
+ this._keyVault = conn.getKeyVault();
+ this._edb = conn.getDB(dbName);
+ this._db = undefined;
+ this._admindb = conn.getDB("admin");
+ return;
+ }
+
const localKMS = {
key: BinData(
0,
@@ -69,6 +77,13 @@ var EncryptedClient = class {
return this._edb;
}
+ /**
+ * Creates a session on the encryptedClient.
+ */
+ startSession() {
+ return this._edb.getMongo().startSession();
+ }
+
/**
* Return an encrypted database
*
@@ -158,21 +173,21 @@ var EncryptedClient = class {
// All our tests use "last" as the key to query on so shard on "last" instead of "_id"
if (this.useImplicitSharding) {
- let resShard = this._db.adminCommand({enableSharding: this._db.getName()});
+ let resShard = this._edb.adminCommand({enableSharding: this._edb.getName()});
// enableSharding may only be called once for a database.
if (resShard.code !== ErrorCodes.AlreadyInitialized) {
assert.commandWorked(
- resShard, "enabling sharding on the '" + this._db.getName() + "' db failed");
+ resShard, "enabling sharding on the '" + this._edb.getName() + "' db failed");
}
let shardCollCmd = {
- shardCollection: this._db.getName() + "." + name,
+ shardCollection: this._edb.getName() + "." + name,
key: {last: "hashed"},
collation: {locale: "simple"}
};
- resShard = this._db.adminCommand(shardCollCmd);
+ resShard = this._edb.adminCommand(shardCollCmd);
jsTestLog("Sharding: " + tojson(shardCollCmd));
}
@@ -207,7 +222,7 @@ var EncryptedClient = class {
if (tenantId) {
Object.extend(listCollCmdObj, {"$tenant": tenantId});
}
- const cis = assert.commandWorked(this._db.runCommand(listCollCmdObj));
+ const cis = assert.commandWorked(this._edb.runCommand(listCollCmdObj));
assert.eq(
cis.cursor.firstBatch.length, 1, `Expected to find one collection named '${name}'`);
@@ -244,7 +259,7 @@ var EncryptedClient = class {
const actualEcoc = countDocuments(sessionDB, ef.ecocCollection, tenantId);
assert.eq(actualEcoc,
- this.ecocCountMatchesEscCount ? expectedEsc : expectedEcoc,
+ expectedEcoc,
`ECOC document count is wrong: Actual ${actualEcoc} vs Expected ${expectedEcoc}`);
}
@@ -258,7 +273,24 @@ var EncryptedClient = class {
*/
assertEncryptedCollectionCounts(name, expectedEdc, expectedEsc, expectedEcoc, tenantId) {
this.assertEncryptedCollectionCountsByObject(
- this._db, name, expectedEdc, expectedEsc, expectedEcoc, tenantId);
+ this._edb, name, expectedEdc, expectedEsc, expectedEcoc, tenantId);
+ }
+
+ /**
+ * Assert the number of non-anchor documents in the ESC associated with the given EDC
+ * collection name matches the expected.
+ *
+ * @param {string} name Name of EDC
+ * @param {number} expectedCount Number of non-anchors expected in ESC
+ */
+ assertESCNonAnchorCount(name, expectedCount) {
+ const escName = this.getStateCollectionNamespaces(name).esc;
+ const actualCount =
+ this._edb.getCollection(escName).countDocuments({"value": {"$exists": false}});
+ assert.eq(
+ actualCount,
+ expectedCount,
+ `ESC non-anchor count is wrong: Actual ${actualCount} vs Expected ${expectedCount}`);
}
/**
@@ -371,7 +403,10 @@ var EncryptedClient = class {
assert.docEq(docs, onDiskDocs);
}
- assertStateCollectionsAfterCompact(collName, ecocExists, ecocTempExists = false) {
+ assertStateCollectionsAfterCompact(collName,
+ ecocExists,
+ ecocTempExists = false,
+ escDeletesExists = false) {
const baseCollInfos = this._edb.getCollectionInfos({"name": collName});
assert.eq(baseCollInfos.length, 1);
const baseCollInfo = baseCollInfos[0];
@@ -380,10 +415,11 @@ var EncryptedClient = class {
const checkMap = {};
// Always expect the ESC collection, optionally expect ECOC.
- // ECOC is not expected in sharded clusters.
checkMap[baseCollInfo.options.encryptedFields.escCollection] = true;
checkMap[baseCollInfo.options.encryptedFields.ecocCollection] = ecocExists;
checkMap[baseCollInfo.options.encryptedFields.ecocCollection + ".compact"] = ecocTempExists;
+ checkMap[baseCollInfo.options.encryptedFields.escCollection + ".deletes"] =
+ escDeletesExists;
const edb = this._edb;
Object.keys(checkMap).forEach(function(coll) {
@@ -394,7 +430,7 @@ var EncryptedClient = class {
}
};
-function runEncryptedTest(db, dbName, collName, encryptedFields, runTestsCallback) {
+export function runEncryptedTest(db, dbName, collNames, encryptedFields, runTestsCallback) {
const dbTest = db.getSiblingDB(dbName);
dbTest.dropDatabase();
@@ -408,8 +444,14 @@ function runEncryptedTest(db, dbName, collName, encryptedFields, runTestsCallbac
let client = new EncryptedClient(db.getMongo(), dbName);
- assert.commandWorked(
- client.createEncryptionCollection(collName, {encryptedFields: encryptedFields}));
+ if (typeof collNames === "string") {
+ collNames = [collNames];
+ }
+
+ for (let collName of collNames) {
+ assert.commandWorked(
+ client.createEncryptionCollection(collName, {encryptedFields: encryptedFields}));
+ }
let edb = client.getDB();
runTestsCallback(edb, client);
@@ -418,22 +460,21 @@ function runEncryptedTest(db, dbName, collName, encryptedFields, runTestsCallbac
/**
* @returns Returns true if talking to a replica set
*/
-function isFLE2ReplicationEnabled() {
+export function isFLE2ReplicationEnabled() {
return typeof (testingReplication) == "undefined" || testingReplication === true;
}
-// TODO SERVER-67760 remove once feature flag is gone
/**
- * @returns Returns true if featureFlagFLE2Range is enabled
+ * @returns Returns true if featureFlagFLE2CleanupCommand is enabled
*/
-function isFLE2RangeEnabled(db) {
- return FeatureFlagUtil.isPresentAndEnabled(db, "FLE2Range");
+export function isFLE2CleanupEnabled(db) {
+ return FeatureFlagUtil.isEnabled(db, "FLE2CleanupCommand");
}
/**
* @returns Returns true if internalQueryFLEAlwaysUseEncryptedCollScanMode is enabled
*/
-function isFLE2AlwaysUseCollScanModeEnabled(db) {
+export function isFLE2AlwaysUseCollScanModeEnabled(db) {
const doc = assert.commandWorked(
db.adminCommand({getParameter: 1, internalQueryFLEAlwaysUseEncryptedCollScanMode: 1}));
return (doc.internalQueryFLEAlwaysUseEncryptedCollScanMode === true);
@@ -445,7 +486,7 @@ function isFLE2AlwaysUseCollScanModeEnabled(db) {
*
* @param {BinData} value bindata value
*/
-function assertIsIndexedEncryptedField(value) {
+export function assertIsIndexedEncryptedField(value) {
assert(value instanceof BinData, "Expected BinData, found: " + value);
assert.eq(value.subtype(), 6, "Expected Encrypted bindata: " + value);
assert(value.hex().startsWith("0e") || value.hex().startsWith("0f"),
@@ -457,7 +498,7 @@ function assertIsIndexedEncryptedField(value) {
*
* @param {BinData} value bindata value
*/
-function assertIsEqualityIndexedEncryptedField(value) {
+export function assertIsEqualityIndexedEncryptedField(value) {
assert(value instanceof BinData, "Expected BinData, found: " + value);
assert.eq(value.subtype(), 6, "Expected Encrypted bindata: " + value);
assert(value.hex().startsWith("0e"),
@@ -469,7 +510,7 @@ function assertIsEqualityIndexedEncryptedField(value) {
*
* @param {BinData} value bindata value
*/
-function assertIsRangeIndexedEncryptedField(value) {
+export function assertIsRangeIndexedEncryptedField(value) {
assert(value instanceof BinData, "Expected BinData, found: " + value);
assert.eq(value.subtype(), 6, "Expected Encrypted bindata: " + value);
assert(value.hex().startsWith("0f"),
@@ -481,7 +522,7 @@ function assertIsRangeIndexedEncryptedField(value) {
*
* @param {BinData} value bindata value
*/
-function assertIsUnindexedEncryptedField(value) {
+export function assertIsUnindexedEncryptedField(value) {
assert(value instanceof BinData, "Expected BinData, found: " + value);
assert.eq(value.subtype(), 6, "Expected Encrypted bindata: " + value);
assert(value.hex().startsWith("10"),
diff --git a/jstests/fle2/libs/qe_state_collection_stats_tracker.js b/jstests/fle2/libs/qe_state_collection_stats_tracker.js
new file mode 100644
index 0000000000000..004175f6130ea
--- /dev/null
+++ b/jstests/fle2/libs/qe_state_collection_stats_tracker.js
@@ -0,0 +1,239 @@
+/**
+ * Class that tracks the document counts in the QE state collections for every unique
+ * field+value pair that exists in the encrypted data collection.
+ *
+ * NOTE: This tracker is only accurate if the encrypted fields being tracked all have
+ * a contention factor of 0. Also, the type of the encrypted value has to be a string.
+ */
+class QEStateCollectionStatsTracker {
+ constructor() {
+ /* fieldStats is a map of field names to a map of values mapped to objects
+ containing stats counters. For example:
+ {
+ "first" : {
+ "erwin" : { nonAnchors: 2, anchors: 0, nullAnchor: false, ecoc: 2, new: true},
+ ...
+ },
+ ...
+ }
+ */
+ this.fieldStats = {};
+ }
+
+ /**
+ * Updates the stats after inserting a single encrypted document that contains the
+ * specified field (key) and value pair.
+ * Every insert of an encrypted field adds one non-anchor to the ESC and adds one
+ * entry in the ECOC.
+ *
+ * @param {string} key the field name
+ * @param {string} value the field value
+ */
+ updateStatsPostInsert(key, value) {
+ if (!this.fieldStats.hasOwnProperty(key)) {
+ this.fieldStats[key] = {};
+ }
+
+ const field = this.fieldStats[key];
+ if (field.hasOwnProperty(value)) {
+ field[value].nonAnchors++;
+ field[value].ecoc++;
+ } else {
+ field[value] = {nonAnchors: 1, anchors: 0, nullAnchor: false, ecoc: 1, new: true};
+ }
+ }
+
+ /**
+ * Updates the stats after compacting the collection where documents
+ * containing the specified encrypted fields exist.
+ * For every encrypted value that has been inserted for each field that has not been
+ * compacted/cleaned-up (i.e. has one or more ECOC entries), we update the stats for this
+ * field+value pair by adding one ESC anchor, and clearing the counts for non-anchors & ecoc.
+ *
+ * This assumes that all non-anchors & ecoc entries for this value have been deleted after
+ * compaction.
+ *
+ * @param {string} keys list of field names that were compacted
+ */
+ updateStatsPostCompactForFields(...keys) {
+ keys.forEach(key => {
+ if (!this.fieldStats.hasOwnProperty(key)) {
+ print("Skipping field " + key +
+ " in updateStatsPostCompact because it is not tracked");
+ return;
+ }
+ const field = this.fieldStats[key];
+ Object.entries(field).forEach(([value, stats]) => {
+ if (stats.ecoc > 0) {
+ stats.anchors++;
+ stats.nonAnchors = 0;
+ stats.ecoc = 0;
+ }
+ stats.new = false;
+ });
+ });
+ }
+
+ /**
+ * Updates the stats after cleanup of the encrypted collection where documents
+ * containing the specified encrypted fields exist.
+ * For every field+value pair that has been inserted but not yet compacted/cleaned-up
+ * (i.e. has one or more ECOC entries), we update the stats for this field+value pair
+ * by adding one ESC null anchor (if none exists yet), and clearing the
+ * counts for normal anchors, non-anchors, & ecoc.
+ *
+ * This assumes that all non-anchors and normal anchors for this value have been deleted
+ * from the ESC after cleanup. This also assumes all ECOC entries for this value have
+ * been deleted post-cleanup.
+ *
+ * @param {string} keys list of field names that were compacted
+ */
+ updateStatsPostCleanupForFields(...keys) {
+ keys.forEach(key => {
+ if (!this.fieldStats.hasOwnProperty(key)) {
+ print("Skipping field " + key +
+ " in updateStatsPostCleanup because it is not tracked");
+ return;
+ }
+ const field = this.fieldStats[key];
+ Object.entries(field).forEach(([value, stats]) => {
+ if (stats.ecoc > 0) {
+ stats.nullAnchor = true;
+ stats.nonAnchors = 0;
+ stats.anchors = 0;
+ stats.ecoc = 0;
+ }
+ stats.new = false;
+ });
+ });
+ }
+
+ /**
+ * Returns an object that contains the aggregated statistics for each
+ * field specified in keys.
+ *
+ * @param {string} keys list of field names that were compacted
+ * @returns {Object}
+ */
+ calculateTotalStatsForFields(...keys) {
+ const totals = {
+ esc: 0, // # of ESC entries
+ escNonAnchors: 0, // # of ESC non-anchors
+ escAnchors: 0, // # of ESC anchors
+ escNullAnchors: 0, // # of ESC null anchors
+ escDeletableAnchors: 0, // # of ESC anchors that may be deleted in the next cleanup
+ escFutureNullAnchors: 0, // # of null anchors that may be inserted in the next cleanup
+ ecoc: 0, // # of ECOC entries
+ ecocUnique: 0, // # of ECOC entries that are unique
+ new: 0, // # of new values
+ };
+ keys.forEach(key => {
+ if (!this.fieldStats.hasOwnProperty(key)) {
+ print("Skipping field " + key + " in stats aggregation because it is not tracked");
+ return;
+ }
+ const field = this.fieldStats[key];
+ Object.entries(field).forEach(([value, stats]) => {
+ totals.esc += (stats.nonAnchors + stats.anchors + (stats.nullAnchor ? 1 : 0));
+ totals.escNonAnchors += stats.nonAnchors;
+ totals.escAnchors += stats.anchors;
+ totals.escNullAnchors += (stats.nullAnchor ? 1 : 0);
+ totals.escDeletableAnchors += ((stats.ecoc > 0) ? stats.anchors : 0);
+ totals.escFutureNullAnchors += ((stats.ecoc > 0 && stats.nullAnchor == 0) ? 1 : 0);
+ totals.ecoc += stats.ecoc;
+ totals.ecocUnique += ((stats.ecoc > 0) ? 1 : 0);
+ totals.new += (stats.new ? 1 : 0);
+ });
+ });
+
+ return totals;
+ }
+
+ _calculateEstimatedEmuBinaryReads(nAnchors, nNonAnchors, hasNullAnchor, escSize) {
+ let total = 0;
+
+ // anchor binary hops
+ //
+ total += 1; // null anchor read for lambda
+ let rho = 2;
+ if (nAnchors > 0) {
+ rho = Math.pow(2, Math.floor(Math.log2(nAnchors)) + 1);
+ }
+ total += Math.log2(rho); // # reads to find rho
+ total += Math.log2(rho); // # reads in the binary search iterations
+ total += (nAnchors == 0 ? 1 : 0); // extra read if no anchors exist
+
+ // binary hops
+ //
+ total += (nAnchors > 0 || hasNullAnchor) ? 1 : 0; // anchor read for lambda
+ rho = Math.max(2, escSize);
+ total += 1; // estimated # of reads to find final value of rho
+ total += Math.ceil(Math.log2(rho)); // estimated # of binary search iterations
+ total += (nNonAnchors == 0 ? 1 : 0); // extra read if no non-anchors exist
+ return total;
+ }
+
+ /**
+ * Returns a lower-bound on how many ESC reads will be performed if a
+ * cleanup is performed on the current encrypted collection state.
+ * NOTE: call this *before* calling cleanup and before updating the tracker
+ * with updateStatsPostCleanupForFields.
+ *
+ * @param {string} keys list of field names that have been added to the encrypted collection
+ * @returns {int}
+ */
+ calculateEstimatedESCReadCountForCleanup(...keys) {
+ let totals = this.calculateTotalStatsForFields(keys);
+ let estimate = 0;
+
+ estimate += totals.escNonAnchors; // # of reads into in-mem delete set
+
+ keys.forEach(key => {
+ if (!this.fieldStats.hasOwnProperty(key)) {
+ return;
+ }
+ const field = this.fieldStats[key];
+ Object.entries(field).forEach(([value, stats]) => {
+ if (stats.ecoc == 0) {
+ return; // value not compacted
+ }
+ estimate += 1; // null anchor read
+ estimate += this._calculateEstimatedEmuBinaryReads(
+ stats.anchors, stats.nonAnchors, stats.nullAnchor, totals.esc);
+ });
+ });
+ return estimate;
+ }
+
+ /**
+ * Returns a lower-bound on how many ESC reads will be performed if a
+ * compact is performed on the current encrypted collection state.
+ * NOTE: call this *before* calling compact and before updating the tracker
+ * with updateStatsPostCompactForFields.
+ *
+ * @param {string} keys list of field names that have been added to the encrypted collection
+ * @returns {int}
+ */
+ calculateEstimatedESCReadCountForCompact(...keys) {
+ let totals = this.calculateTotalStatsForFields(keys);
+ let estimate = 0;
+
+ estimate += totals.escNonAnchors; // # of reads into in-mem delete set
+
+ keys.forEach(key => {
+ if (!this.fieldStats.hasOwnProperty(key)) {
+ return;
+ }
+ const field = this.fieldStats[key];
+ Object.entries(field).forEach(([value, stats]) => {
+ if (stats.ecoc == 0) {
+ return; // value not compacted
+ }
+ estimate += (stats.nullAnchor ? 1 : 0); // null anchor read
+ estimate += this._calculateEstimatedEmuBinaryReads(
+ stats.anchors, stats.nonAnchors, stats.nullAnchor, totals.esc);
+ });
+ });
+ return estimate;
+ }
+}
diff --git a/jstests/fle2/modify_encrypted_collection.js b/jstests/fle2/modify_encrypted_collection.js
index 7cae63cdeeed0..a31d55f4ffb93 100644
--- a/jstests/fle2/modify_encrypted_collection.js
+++ b/jstests/fle2/modify_encrypted_collection.js
@@ -6,11 +6,6 @@
* requires_fcv_70
* ]
*/
-load("jstests/fle2/libs/encrypted_client_util.js");
-
-(function() {
-'use strict';
-
let dbTest = db.getSiblingDB('modify_encrypted_collection_db');
dbTest.basic.drop();
@@ -38,5 +33,4 @@ assert.commandFailedWithCode(dbTest.runCommand({collMod: "basic", validationLeve
ErrorCodes.BadValue);
assert.commandWorked(
- dbTest.runCommand({collMod: "basic", validationLevel: "strict", validationAction: "error"}));
-}());
+ dbTest.runCommand({collMod: "basic", validationLevel: "strict", validationAction: "error"}));
\ No newline at end of file
diff --git a/jstests/fle2/shard_collection.js b/jstests/fle2/shard_collection.js
index e6ce9da60372c..be7cbce121e47 100644
--- a/jstests/fle2/shard_collection.js
+++ b/jstests/fle2/shard_collection.js
@@ -5,14 +5,11 @@
* requires_fcv_70
* ]
*/
-load("jstests/fle2/libs/encrypted_client_util.js");
-
-(function() {
-'use strict';
+import {EncryptedClient} from "jstests/fle2/libs/encrypted_client_util.js";
// Passthrough workaround
if (!isMongos(db)) {
- return;
+ quit();
}
let dbName = 'shard_state';
@@ -38,4 +35,3 @@ assert.commandFailedWithCode(
db.adminCommand({shardCollection: 'shard_state.enxcol_.basic.ecc', key: {_id: 1}}), 6464401);
assert.commandFailedWithCode(
db.adminCommand({shardCollection: 'shard_state.enxcol_.basic.ecoc', key: {_id: 1}}), 6464401);
-}());
diff --git a/jstests/free_mon/free_mon_announce.js b/jstests/free_mon/free_mon_announce.js
index d78d0b58608d9..3c35e91cf40fa 100644
--- a/jstests/free_mon/free_mon_announce.js
+++ b/jstests/free_mon/free_mon_announce.js
@@ -14,20 +14,11 @@ const mongod = MongoRunner.runMongod({
assert.neq(mongod, null, 'mongod not running');
const admin = mongod.getDB('admin');
-function getConnectAnnounce() {
- // Capture message as it'd be presented to a user.
- clearRawMongoProgramOutput();
- const exitCode = runMongoProgram(
- 'mongo', '--port', mongod.port, '--eval', "shellHelper( 'show', 'freeMonitoring' );");
- assert.eq(exitCode, 0);
- return rawMongoProgramOutput();
-}
-
// state === 'enabled'.
admin.enableFreeMonitoring();
WaitForRegistration(mongod);
const reminder = "To see your monitoring data";
-assert.neq(getConnectAnnounce().search(reminder), -1, 'userReminder not found');
+assert(FreeMonGetStatus(mongod).userReminder.includes(reminder), 'userReminder not found');
// Cleanup.
MongoRunner.stopMongod(mongod);
diff --git a/jstests/free_mon/free_mon_register_cmd.js b/jstests/free_mon/free_mon_register_cmd.js
index 295908663a591..006d022e6b2ad 100644
--- a/jstests/free_mon/free_mon_register_cmd.js
+++ b/jstests/free_mon/free_mon_register_cmd.js
@@ -21,9 +21,7 @@ assert.neq(null, conn, 'mongod was unable to start up');
sleep(10 * 1000);
// Then verify that no registrations happened since we haven't runtime enabled yed.
-assert.eq('undecided',
- conn.getDB('admin').getFreeMonitoringStatus().state,
- "Initial state should be 'undecided'");
+assert.eq('undecided', FreeMonGetStatus(conn).state, "Initial state should be 'undecided'");
assert.eq(0, mock_web.queryStats().registers, "mongod registered without enabling free_mod");
assert.commandWorked(conn.adminCommand({setFreeMonitoring: 1, action: "enable"}));
@@ -31,8 +29,7 @@ assert.commandWorked(conn.adminCommand({setFreeMonitoring: 1, action: "enable"})
WaitForFreeMonServerStatusState(conn, 'enabled');
// The command should either timeout or suceed after registration is complete
-const retStatus1 = conn.adminCommand({getFreeMonitoringStatus: 1});
-assert.commandWorked(retStatus1);
+const retStatus1 = FreeMonGetStatus(conn);
assert.eq(retStatus1.state, "enabled", tojson(retStatus1));
const stats = mock_web.queryStats();
@@ -64,8 +61,7 @@ assert.soon(function() {
return regDoc.state == "disabled";
}, "Failed to unregister", 60 * 1000);
-const retStatus2 = conn.adminCommand({getFreeMonitoringStatus: 1});
-assert.commandWorked(retStatus2);
+const retStatus2 = FreeMonGetStatus(conn);
assert.eq(retStatus2.state, "disabled", tojson(retStatus1));
MongoRunner.stopMongod(conn);
diff --git a/jstests/free_mon/libs/free_mon.js b/jstests/free_mon/libs/free_mon.js
index 269f1e6530695..351f1a33e488f 100644
--- a/jstests/free_mon/libs/free_mon.js
+++ b/jstests/free_mon/libs/free_mon.js
@@ -288,7 +288,12 @@ function FreeMonGetStatus(conn) {
'use strict';
const admin = conn.getDB("admin");
- return assert.commandWorked(admin.runCommand({getFreeMonitoringStatus: 1}));
+ const reply = assert.commandWorked(admin.runCommand({getFreeMonitoringStatus: 1}));
+ // FreeMonitoring has been deprecated and reports 'disabled' regardless of status.
+ assert.eq(reply.state, 'disabled', 'FreeMonitoring has been deprecated');
+
+ // Use the "true" state tucked into the 'debug' field if its available.
+ return reply.debug || reply;
}
/**
diff --git a/jstests/hooks/run_aggregate_metrics_background.js b/jstests/hooks/run_aggregate_metrics_background.js
deleted file mode 100644
index 32335852fcd61..0000000000000
--- a/jstests/hooks/run_aggregate_metrics_background.js
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Runs the $operationMetrics stage and ensures that all the expected fields are present.
- */
-
-'use strict';
-
-(function() {
-load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
-
-if (typeof db === 'undefined') {
- throw new Error(
- "Expected mongo shell to be connected a server, but global 'db' object isn't defined");
-}
-
-// Disable implicit sessions so FSM workloads that kill random sessions won't interrupt the
-// operations in this test that aren't resilient to interruptions.
-TestData.disableImplicitSessions = true;
-
-const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
-
-const aggregateMetricsBackground = function(host) {
- function verifyFields(doc) {
- const kTopLevelFields = [
- "docBytesWritten",
- "docUnitsWritten",
- "idxEntryBytesWritten",
- "idxEntryUnitsWritten",
- "totalUnitsWritten",
- "cpuNanos",
- "db",
- "primaryMetrics",
- "secondaryMetrics"
- ];
- const kReadFields = [
- "docBytesRead",
- "docUnitsRead",
- "idxEntryBytesRead",
- "idxEntryUnitsRead",
- "keysSorted",
- "docUnitsReturned"
- ];
-
- for (let key of kTopLevelFields) {
- assert(doc.hasOwnProperty(key), "The metrics output is missing the property: " + key);
- }
- let primaryMetrics = doc.primaryMetrics;
- for (let key of kReadFields) {
- assert(primaryMetrics.hasOwnProperty(key),
- "The metrics output is missing the property: primaryMetrics." + key);
- }
- let secondaryMetrics = doc.secondaryMetrics;
- for (let key of kReadFields) {
- assert(secondaryMetrics.hasOwnProperty(key),
- "The metrics output is missing the property: secondaryMetrics." + key);
- }
- }
-
- let conn = new Mongo(host);
- conn.setSecondaryOk();
-
- assert.neq(
- null, conn, "Failed to connect to host '" + host + "' for background metrics collection");
-
- // Filter out arbiters.
- if (conn.adminCommand({isMaster: 1}).arbiterOnly) {
- print("Skipping background aggregation against test node: " + host +
- " because it is an arbiter and has no data.");
- return;
- }
-
- let db = conn.getDB("admin");
- let clearMetrics = Math.random() < 0.9 ? false : true;
- print("Running $operationMetrics with {clearMetrics: " + clearMetrics + "} on host: " + host);
- const cursor = db.aggregate([{$operationMetrics: {clearMetrics: clearMetrics}}]);
- while (cursor.hasNext()) {
- let doc = cursor.next();
- try {
- verifyFields(doc);
- } catch (e) {
- print("caught exception while verifying that all expected fields are in the metrics " +
- "output: " + tojson(doc));
- throw (e);
- }
- }
-};
-
-// This file is run continuously and is very fast so we want to impose some kind of rate limiting
-// which is why we sleep for 1 second here. This sleep is here rather than in
-// aggregate_metrics_background.py because the background job that file uses is designed to be run
-// continuously so it is easier and cleaner to just sleep here.
-sleep(1000);
-if (topology.type === Topology.kStandalone) {
- try {
- aggregateMetricsBackground(topology.mongod);
- } catch (e) {
- print("background aggregate metrics against the standalone failed");
- throw e;
- }
-} else if (topology.type === Topology.kReplicaSet) {
- for (let replicaMember of topology.nodes) {
- try {
- aggregateMetricsBackground(replicaMember);
- } catch (e) {
- print("background aggregate metrics was not successful against all replica set " +
- "members");
- throw e;
- }
- }
-} else {
- throw new Error("Unsupported topology configuration: " + tojson(topology));
-}
-})();
diff --git a/jstests/hooks/run_analyze_shard_key_background.js b/jstests/hooks/run_analyze_shard_key_background.js
index 625eb6bc59e6d..96f361b45d28f 100644
--- a/jstests/hooks/run_analyze_shard_key_background.js
+++ b/jstests/hooks/run_analyze_shard_key_background.js
@@ -153,9 +153,15 @@ function getLatestSampleQueryDocument() {
* of the resulting metrics.
*/
function analyzeShardKey(ns, shardKey, indexKey) {
- jsTest.log(`Analyzing shard keys ${tojsononeline({ns, shardKey, indexKey})}`);
-
- const res = conn.adminCommand({analyzeShardKey: ns, key: shardKey});
+ const cmdObj = {analyzeShardKey: ns, key: shardKey};
+ const rand = Math.random();
+ if (rand < 0.25) {
+ cmdObj.sampleRate = Math.random() * 0.5 + 0.5;
+ } else if (rand < 0.5) {
+ cmdObj.sampleSize = NumberLong(AnalyzeShardKeyUtil.getRandInteger(1000, 10000));
+ }
+ jsTest.log(`Analyzing shard keys ${tojsononeline({shardKey, indexKey, cmdObj})}`);
+ const res = conn.adminCommand(cmdObj);
if (res.code == ErrorCodes.BadValue || res.code == ErrorCodes.IllegalOperation ||
res.code == ErrorCodes.NamespaceNotFound ||
@@ -204,15 +210,37 @@ function analyzeShardKey(ns, shardKey, indexKey) {
tojsononeline(res)}`);
return res;
}
+ if (res.code == 7559401) {
+ print(`Failed to analyze the shard key because one of the shards fetched the split ` +
+ `point documents after the TTL deletions had started. ${tojsononeline(res)}`);
+ return res;
+ }
+ if (res.code == 7588600) {
+ print(`Failed to analyze the shard key because the document for one of the most common ` +
+ `shard key values got deleted while the command was running. ${tojsononeline(res)}`);
+ return res;
+ }
+ if (res.code == 7826501) {
+ print(`Failed to analyze the shard key because $collStats indicates that the collection ` +
+ `is empty. ${tojsononeline(res)}`);
+ return res;
+ }
+ if (res.code == 7826505) {
+ print(`Failed to analyze the shard key because the collection becomes empty during the ` +
+ `step for calculating the monotonicity metrics. ${tojsononeline(res)}`);
+ return res;
+ }
+ if (res.code == 7826506 || res.code == 7826507) {
+ print(`Failed to analyze the shard key because the collection becomes empty during the ` +
+ `step for calculating the cardinality and frequency metrics. ${tojsononeline(res)}`);
+ return res;
+ }
assert.commandWorked(res);
jsTest.log(`Finished analyzing the shard key: ${tojsononeline(res)}`);
- // The response should only contain the "numDocs" field if it also contains the fields about the
- // characteristics of the shard key (e.g. "numDistinctValues" and "mostCommonValues") since the
- // number of documents is just a supporting metric for those metrics.
- if (res.hasOwnProperty("numDocs")) {
- AnalyzeShardKeyUtil.assertContainKeyCharacteristicsMetrics(res);
+ if (res.hasOwnProperty("keyCharacteristics")) {
+ AnalyzeShardKeyUtil.validateKeyCharacteristicsMetrics(res.keyCharacteristics);
} else {
AnalyzeShardKeyUtil.assertNotContainKeyCharacteristicsMetrics(res);
}
diff --git a/jstests/hooks/run_check_metadata_consistency.js b/jstests/hooks/run_check_metadata_consistency.js
index 50ca85f3f9a54..80e410d335d49 100644
--- a/jstests/hooks/run_check_metadata_consistency.js
+++ b/jstests/hooks/run_check_metadata_consistency.js
@@ -1,8 +1,5 @@
-'use strict';
-
-(function() {
-load('jstests/libs/check_metadata_consistency_helpers.js'); // For MetadataConsistencyChecker.
-load('jstests/libs/fixture_helpers.js'); // For FixtureHelpers.
+import {MetadataConsistencyChecker} from "jstests/libs/check_metadata_consistency_helpers.js";
+load('jstests/libs/fixture_helpers.js'); // For FixtureHelpers.
assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a server?');
@@ -16,7 +13,7 @@ assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a
jsTest.log(
`Aborted metadata consistency check due to retriable error during topology discovery: ${
e}`);
- return;
+ quit();
} else {
throw e;
}
@@ -26,4 +23,3 @@ assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a
const mongos = db.getMongo();
MetadataConsistencyChecker.run(mongos);
-})();
diff --git a/jstests/hooks/run_check_routing_table_consistency.js b/jstests/hooks/run_check_routing_table_consistency.js
index 305ac9550bc28..bfcbf4f12ffb5 100644
--- a/jstests/hooks/run_check_routing_table_consistency.js
+++ b/jstests/hooks/run_check_routing_table_consistency.js
@@ -1,7 +1,6 @@
-'use strict';
-
-(function() {
-load('jstests/libs/check_routing_table_consistency_helpers.js'); // For check implementation.
+import {
+ RoutingTableConsistencyChecker
+} from "jstests/libs/check_routing_table_consistency_helpers.js";
load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a server?');
@@ -15,4 +14,3 @@ if (topology.type !== Topology.kShardedCluster) {
tojson(topology));
}
RoutingTableConsistencyChecker.run(db.getMongo());
-})();
diff --git a/jstests/hooks/run_fcv_upgrade_downgrade_background.js b/jstests/hooks/run_fcv_upgrade_downgrade_background.js
new file mode 100644
index 0000000000000..f1c5e02c8ecef
--- /dev/null
+++ b/jstests/hooks/run_fcv_upgrade_downgrade_background.js
@@ -0,0 +1,92 @@
+/**
+ * Runs dbCheck in background.
+ *
+ * may need more checks, see: jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js
+ */
+'use strict';
+
+(function() {
+load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
+load('jstests/libs/parallelTester.js'); // For Thread.
+
+if (typeof db === 'undefined') {
+ throw new Error(
+ "Expected mongo shell to be connected a server, but global 'db' object isn't defined");
+}
+
+// Disable implicit sessions so FSM workloads that kill random sessions won't interrupt the
+// operations in this test that aren't resilient to interruptions.
+TestData.disableImplicitSessions = true;
+
+const conn = db.getMongo();
+
+const sendFCVUpDown = function(ver) {
+ try {
+ print("Running adminCommand({setFeatureCompatibilityVersion: " + ver + "}");
+ const res = conn.adminCommand({setFeatureCompatibilityVersion: ver});
+ assert.commandWorked(res);
+ } catch (e) {
+ if (e.code === 332) {
+ // Cannot downgrade the cluster as collection xxx has 'encryptedFields' with range
+ // indexes.
+ jsTestLog('setFCV: Can not downgrade');
+ return;
+ }
+ if (e.code === 5147403) {
+ // Invalid fcv transition (e.g lastContinuous -> lastLTS).
+ jsTestLog('setFCV: Invalid transition');
+ return;
+ }
+ if (e.code === 7428200) {
+ // Cannot upgrade FCV if a previous FCV downgrade stopped in the middle of cleaning
+ // up internal server metadata.
+ assertAlways.eq(latestFCV, targetFCV);
+ jsTestLog(
+ 'setFCV: Cannot upgrade FCV if a previous FCV downgrade stopped in the middle \
+ of cleaning up internal server metadata');
+ return;
+ }
+ throw e;
+ }
+};
+
+Random.setRandomSeed();
+let maxSleep = 5000; // 5 sec.
+let currSleep = 10; // Start at 10ms.
+
+// Get time interval to sleep in ms.
+// Value returned will be between currSleep and 2 * currSleep.
+// Also increase currSleep in order to sleep for longer and longer intervals.
+// This type of exponential backoff ensures that we run (several times) for short tests,
+// but dont cause long tests to time out.
+const getRandTimeIncInterval = function() {
+ let ret = Random.randInt(currSleep) + currSleep;
+ currSleep *= 4;
+ return ret;
+};
+
+// Only go throug the loop a few times sleeping (by an increasing duration) between sendFCV
+// commands. This is so even short duration tests can experience a few FCV changes, but for long
+// running tests to not time out (which can happen if sleep duration was a fixed small duration).
+while (currSleep <= maxSleep) {
+ // downgrade FCV
+ sleep(getRandTimeIncInterval());
+ sendFCVUpDown(lastLTSFCV);
+
+ // upgrade FCV
+ sleep(getRandTimeIncInterval());
+ sendFCVUpDown(latestFCV);
+ // At this point FCV is back to latestFCV.
+
+ if (lastLTSFCV !== lastContinuousFCV) {
+ // downgrade FCV
+ sleep(getRandTimeIncInterval());
+ sendFCVUpDown(lastContinuousFCV);
+
+ // upgrade FCV
+ sleep(getRandTimeIncInterval());
+ sendFCVUpDown(latestFCV);
+ // At this point FCV is back to latestFCV.
+ }
+}
+})();
diff --git a/jstests/hooks/validate_collections.js b/jstests/hooks/validate_collections.js
index 1ef1a9a761aea..70459c5914106 100644
--- a/jstests/hooks/validate_collections.js
+++ b/jstests/hooks/validate_collections.js
@@ -70,6 +70,13 @@ function CollectionValidator() {
print('Skipping collection validation for ' + coll.getFullName() +
' since collection was not found');
continue;
+ } else if (res.codeName === "CommandNotSupportedOnView") {
+ // Even though we pass a filter to getCollectionInfos() to only fetch
+ // collections, nothing is preventing the collection from being dropped and
+ // recreated as a view.
+ print('Skipping collection validation for ' + coll.getFullName() +
+ ' as it is a view');
+ continue;
}
const host = db.getMongo().host;
print('Collection validation failed on host ' + host +
diff --git a/jstests/libs/analyze_plan.js b/jstests/libs/analyze_plan.js
index 308119d5262d5..4fc8cb5cc65af 100644
--- a/jstests/libs/analyze_plan.js
+++ b/jstests/libs/analyze_plan.js
@@ -7,7 +7,7 @@ load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
/**
* Returns a sub-element of the 'queryPlanner' explain output which represents a winning plan.
*/
-function getWinningPlan(queryPlanner) {
+export function getWinningPlan(queryPlanner) {
// The 'queryPlan' format is used when the SBE engine is turned on. If this field is present,
// it will hold a serialized winning plan, otherwise it will be stored in the 'winningPlan'
// field itself.
@@ -18,7 +18,7 @@ function getWinningPlan(queryPlanner) {
/**
* Returns an element of explain output which represents a rejected candidate plan.
*/
-function getRejectedPlan(rejectedPlan) {
+export function getRejectedPlan(rejectedPlan) {
// The 'queryPlan' format is used when the SBE engine is turned on. If this field is present,
// it will hold a serialized winning plan, otherwise it will be stored in the 'rejectedPlan'
// element itself.
@@ -28,7 +28,7 @@ function getRejectedPlan(rejectedPlan) {
/**
* Returns a sub-element of the 'cachedPlan' explain output which represents a query plan.
*/
-function getCachedPlan(cachedPlan) {
+export function getCachedPlan(cachedPlan) {
// The 'queryPlan' format is used when the SBE engine is turned on. If this field is present, it
// will hold a serialized cached plan, otherwise it will be stored in the 'cachedPlan' field
// itself.
@@ -40,7 +40,7 @@ function getCachedPlan(cachedPlan) {
* subdocuments whose stage is 'stage'. Returns an empty array if the plan does not have the
* requested stage. if 'stage' is 'null' returns all the stages in 'root'.
*/
-function getPlanStages(root, stage) {
+export function getPlanStages(root, stage) {
var results = [];
if (root.stage === stage || stage === undefined) {
@@ -99,7 +99,7 @@ function getPlanStages(root, stage) {
* Given the root stage of explain's JSON representation of a query plan ('root'), returns a list of
* all the stages in 'root'.
*/
-function getAllPlanStages(root) {
+export function getAllPlanStages(root) {
return getPlanStages(root);
}
@@ -108,7 +108,7 @@ function getAllPlanStages(root) {
* subdocument with its stage as 'stage'. Returns null if the plan does not have such a stage.
* Asserts that no more than one stage is a match.
*/
-function getPlanStage(root, stage) {
+export function getPlanStage(root, stage) {
var planStageList = getPlanStages(root, stage);
if (planStageList.length === 0) {
@@ -124,7 +124,7 @@ function getPlanStage(root, stage) {
/**
* Returns the set of rejected plans from the given replset or sharded explain output.
*/
-function getRejectedPlans(root) {
+export function getRejectedPlans(root) {
if (root.queryPlanner.winningPlan.hasOwnProperty("shards")) {
const rejectedPlans = [];
for (let shard of root.queryPlanner.winningPlan.shards) {
@@ -141,7 +141,7 @@ function getRejectedPlans(root) {
* Given the root stage of explain's JSON representation of a query plan ('root'), returns true if
* the query planner reports at least one rejected alternative plan, and false otherwise.
*/
-function hasRejectedPlans(root) {
+export function hasRejectedPlans(root) {
function sectionHasRejectedPlans(explainSection) {
assert(explainSection.hasOwnProperty("rejectedPlans"), tojson(explainSection));
return explainSection.rejectedPlans.length !== 0;
@@ -184,7 +184,7 @@ function hasRejectedPlans(root) {
/**
* Returns an array of execution stages from the given replset or sharded explain output.
*/
-function getExecutionStages(root) {
+export function getExecutionStages(root) {
if (root.hasOwnProperty("executionStats") &&
root.executionStats.executionStages.hasOwnProperty("shards")) {
const executionStages = [];
@@ -216,7 +216,7 @@ function getExecutionStages(root) {
* Returns an empty array if the plan does not have the requested stage. Asserts that agg explain
* structure matches expected format.
*/
-function getAggPlanStages(root, stage, useQueryPlannerSection = false) {
+export function getAggPlanStages(root, stage, useQueryPlannerSection = false) {
let results = [];
function getDocumentSources(docSourceArray) {
@@ -309,7 +309,7 @@ function getAggPlanStages(root, stage, useQueryPlannerSection = false) {
* If 'useQueryPlannerSection' is set to 'true', the 'queryPlanner' section of the explain output
* will be used to lookup the given 'stage', even if 'executionStats' section is available.
*/
-function getAggPlanStage(root, stage, useQueryPlannerSection = false) {
+export function getAggPlanStage(root, stage, useQueryPlannerSection = false) {
let planStageList = getAggPlanStages(root, stage, useQueryPlannerSection);
if (planStageList.length === 0) {
@@ -329,7 +329,7 @@ function getAggPlanStage(root, stage, useQueryPlannerSection = false) {
* explain plans, and it can search for a query planner stage like "FETCH" or an agg stage like
* "$group."
*/
-function aggPlanHasStage(root, stage) {
+export function aggPlanHasStage(root, stage) {
return getAggPlanStages(root, stage).length > 0;
}
@@ -340,7 +340,7 @@ function aggPlanHasStage(root, stage) {
* Expects that the stage appears once or zero times per node. If the stage appears more than once
* on one node's query plan, an error will be thrown.
*/
-function planHasStage(db, root, stage) {
+export function planHasStage(db, root, stage) {
const matchingStages = getPlanStages(root, stage);
// If we are executing against a mongos, we may get more than one occurrence of the stage.
@@ -360,7 +360,7 @@ function planHasStage(db, root, stage) {
* Given the root stage of explain's BSON representation of a query plan ('root'),
* returns true if the plan is index only. Otherwise returns false.
*/
-function isIndexOnly(db, root) {
+export function isIndexOnly(db, root) {
return !planHasStage(db, root, "FETCH") && !planHasStage(db, root, "COLLSCAN");
}
@@ -368,7 +368,7 @@ function isIndexOnly(db, root) {
* Returns true if the BSON representation of a plan rooted at 'root' is using
* an index scan, and false otherwise.
*/
-function isIxscan(db, root) {
+export function isIxscan(db, root) {
return planHasStage(db, root, "IXSCAN");
}
@@ -376,7 +376,7 @@ function isIxscan(db, root) {
* Returns true if the BSON representation of a plan rooted at 'root' is using
* the idhack fast path, and false otherwise.
*/
-function isIdhack(db, root) {
+export function isIdhack(db, root) {
return planHasStage(db, root, "IDHACK");
}
@@ -384,11 +384,11 @@ function isIdhack(db, root) {
* Returns true if the BSON representation of a plan rooted at 'root' is using
* a collection scan, and false otherwise.
*/
-function isCollscan(db, root) {
+export function isCollscan(db, root) {
return planHasStage(db, root, "COLLSCAN");
}
-function isClusteredIxscan(db, root) {
+export function isClusteredIxscan(db, root) {
return planHasStage(db, root, "CLUSTERED_IXSCAN");
}
@@ -396,7 +396,7 @@ function isClusteredIxscan(db, root) {
* Returns true if the BSON representation of a plan rooted at 'root' is using the aggregation
* framework, and false otherwise.
*/
-function isAggregationPlan(root) {
+export function isAggregationPlan(root) {
if (root.hasOwnProperty("shards")) {
const shards = Object.keys(root.shards);
return shards.reduce(
@@ -410,7 +410,7 @@ function isAggregationPlan(root) {
* Returns true if the BSON representation of a plan rooted at 'root' is using just the query layer,
* and false otherwise.
*/
-function isQueryPlan(root) {
+export function isQueryPlan(root) {
if (root.hasOwnProperty("shards")) {
const shards = Object.keys(root.shards);
return shards.reduce(
@@ -424,7 +424,7 @@ function isQueryPlan(root) {
* Get the "chunk skips" for a single shard. Here, "chunk skips" refer to documents excluded by the
* shard filter.
*/
-function getChunkSkipsFromShard(shardPlan, shardExecutionStages) {
+export function getChunkSkipsFromShard(shardPlan, shardExecutionStages) {
const shardFilterPlanStage = getPlanStage(getWinningPlan(shardPlan), "SHARDING_FILTER");
if (!shardFilterPlanStage) {
return 0;
@@ -452,7 +452,7 @@ function getChunkSkipsFromShard(shardPlan, shardExecutionStages) {
* Get the sum of "chunk skips" from all shards. Here, "chunk skips" refer to documents excluded by
* the shard filter.
*/
-function getChunkSkipsFromAllShards(explainResult) {
+export function getChunkSkipsFromAllShards(explainResult) {
const shardPlanArray = explainResult.queryPlanner.winningPlan.shards;
const shardExecutionStagesArray = explainResult.executionStats.executionStages.shards;
assert.eq(shardPlanArray.length, shardExecutionStagesArray.length, explainResult);
@@ -468,7 +468,7 @@ function getChunkSkipsFromAllShards(explainResult) {
* Given explain output at executionStats level verbosity, confirms that the root stage is COUNT or
* RECORD_STORE_FAST_COUNT and that the result of the count is equal to 'expectedCount'.
*/
-function assertExplainCount({explainResults, expectedCount}) {
+export function assertExplainCount({explainResults, expectedCount}) {
const execStages = explainResults.executionStats.executionStages;
// If passed through mongos, then the root stage should be the mongos SINGLE_SHARD stage or
@@ -500,7 +500,7 @@ function assertExplainCount({explainResults, expectedCount}) {
/**
* Verifies that a given query uses an index and is covered when used in a count command.
*/
-function assertCoveredQueryAndCount({collection, query, project, count}) {
+export function assertCoveredQueryAndCount({collection, query, project, count}) {
let explain = collection.find(query, project).explain();
assert(isIndexOnly(db, getWinningPlan(explain.queryPlanner)),
"Winning plan was not covered: " + tojson(explain.queryPlanner.winningPlan));
@@ -517,7 +517,7 @@ function assertCoveredQueryAndCount({collection, query, project, count}) {
* present exactly once in the plan returned. When 'stagesNotExpected' array is passed, also
* verifies that none of those stages are present in the explain() plan.
*/
-function assertStagesForExplainOfCommand({coll, cmdObj, expectedStages, stagesNotExpected}) {
+export function assertStagesForExplainOfCommand({coll, cmdObj, expectedStages, stagesNotExpected}) {
const plan = assert.commandWorked(coll.runCommand({explain: cmdObj}));
const winningPlan = getWinningPlan(plan.queryPlanner);
for (let expectedStage of expectedStages) {
@@ -534,7 +534,7 @@ function assertStagesForExplainOfCommand({coll, cmdObj, expectedStages, stagesNo
/**
* Utility to obtain a value from 'explainRes' using 'getValueCallback'.
*/
-function getFieldValueFromExplain(explainRes, getValueCallback) {
+export function getFieldValueFromExplain(explainRes, getValueCallback) {
assert(explainRes.hasOwnProperty("queryPlanner"), explainRes);
const plannerOutput = explainRes.queryPlanner;
const fieldValue = getValueCallback(plannerOutput);
@@ -545,7 +545,7 @@ function getFieldValueFromExplain(explainRes, getValueCallback) {
/**
* Get the 'planCacheKey' from 'explainRes'.
*/
-function getPlanCacheKeyFromExplain(explainRes, db) {
+export function getPlanCacheKeyFromExplain(explainRes, db) {
return getFieldValueFromExplain(explainRes, function(plannerOutput) {
return FixtureHelpers.isMongos(db) && plannerOutput.hasOwnProperty("winningPlan") &&
plannerOutput.winningPlan.hasOwnProperty("shards")
@@ -557,7 +557,7 @@ function getPlanCacheKeyFromExplain(explainRes, db) {
/**
* Get the 'queryHash' from 'explainRes'.
*/
-function getQueryHashFromExplain(explainRes, db) {
+export function getQueryHashFromExplain(explainRes, db) {
return getFieldValueFromExplain(explainRes, function(plannerOutput) {
return FixtureHelpers.isMongos(db) ? plannerOutput.winningPlan.shards[0].queryHash
: plannerOutput.queryHash;
@@ -568,7 +568,7 @@ function getQueryHashFromExplain(explainRes, db) {
* Helper to run a explain on the given query shape and get the "planCacheKey" from the explain
* result.
*/
-function getPlanCacheKeyFromShape({
+export function getPlanCacheKeyFromShape({
query = {},
projection = {},
sort = {},
@@ -588,7 +588,7 @@ function getPlanCacheKeyFromShape({
* Helper to run a explain on the given pipeline and get the "planCacheKey" from the explain
* result.
*/
-function getPlanCacheKeyFromPipeline(pipeline, collection, db) {
+export function getPlanCacheKeyFromPipeline(pipeline, collection, db) {
const explainRes = assert.commandWorked(collection.explain().aggregate(pipeline));
return getPlanCacheKeyFromExplain(explainRes, db);
@@ -597,7 +597,7 @@ function getPlanCacheKeyFromPipeline(pipeline, collection, db) {
/**
* Given the winning query plan, flatten query plan tree into a list of plan stage names.
*/
-function flattenQueryPlanTree(winningPlan) {
+export function flattenQueryPlanTree(winningPlan) {
let stages = [];
while (winningPlan) {
stages.push(winningPlan.stage);
@@ -606,3 +606,37 @@ function flattenQueryPlanTree(winningPlan) {
stages.reverse();
return stages;
}
+
+/**
+ * Assert that a command plan has no FETCH stage or if the stage is present, it has no filter.
+ */
+export function assertNoFetchFilter({coll, cmdObj}) {
+ const plan = assert.commandWorked(coll.runCommand({explain: cmdObj}));
+ const winningPlan = getWinningPlan(plan.queryPlanner);
+ const fetch = getPlanStage(winningPlan, "FETCH");
+ assert((fetch === null || !fetch.hasOwnProperty("filter")),
+ "Unexpected fetch: " + tojson(fetch));
+ return winningPlan;
+}
+
+/**
+ * Assert that a find plan has a FETCH stage with expected filter and returns a specified number of
+ * results.
+ */
+export function assertFetchFilter({coll, predicate, expectedFilter, nReturned}) {
+ const exp = coll.find(predicate).explain("executionStats");
+ const plan = getWinningPlan(exp.queryPlanner);
+ const fetch = getPlanStage(plan, "FETCH");
+ assert(fetch !== null, "Missing FETCH stage " + plan);
+ assert(fetch.hasOwnProperty("filter"),
+ "Expected filter in the fetch stage, got " + tojson(fetch));
+ assert.eq(expectedFilter,
+ fetch.filter,
+ "Expected filter " + tojson(expectedFilter) + " got " + tojson(fetch.filter));
+
+ if (nReturned !== null) {
+ assert.eq(exp.executionStats.nReturned,
+ nReturned,
+ "Expected " + nReturned + " documents, got " + exp.executionStats.nReturned);
+ }
+}
diff --git a/jstests/libs/auto_retry_transaction_in_sharding.js b/jstests/libs/auto_retry_transaction_in_sharding.js
index f81a366970a2d..7ccac0b7d5a66 100644
--- a/jstests/libs/auto_retry_transaction_in_sharding.js
+++ b/jstests/libs/auto_retry_transaction_in_sharding.js
@@ -94,4 +94,4 @@ var {
retryOnceOnTransientOnMongos,
retryOnceOnTransientAndRestartTxnOnMongos
};
-})();
\ No newline at end of file
+})();
diff --git a/jstests/libs/badSAN.pem b/jstests/libs/badSAN.pem
index b36e5a4c72080..9e60d23d52162 100644
--- a/jstests/libs/badSAN.pem
+++ b/jstests/libs/badSAN.pem
@@ -3,53 +3,53 @@
#
# Certificate with an otherwise permissible CommonName, but with an unmatchable SubjectAlternateName.
-----BEGIN CERTIFICATE-----
-MIIDwjCCAqqgAwIBAgIEbr2RhTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDwjCCAqqgAwIBAgIEJiElLDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjBvMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM2WhcNMjUwOTEwMTQyODM2WjBvMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDESMBAGA1UEAwwJ
-MTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3q91q4t8
-/v442v1IaL20H8mcxaNh2jE1X1+mPz9La1tKsjDN3BwIOt2PamMDUdJhT9YL0MDF
-h+gUaPcAvJA8KMMGwnzrzbCSI2SkWyQ4+QKyDeKnNZ1sn/EmZNruHZ+meiXu4EtE
-t0HrFRIgEYFZGj6iMhkQUfDan6Aed1eBgTrjHr61pTywCw5SJaDlI78uuZQvs7KB
-UpsEHJDdvyZvK8TxmllL+mLqcP1D6aLmlNwC6Pnzc8wWrSuqI9v+YP1YsGBEyi/l
-a/+q7QOOWcaruRXqH/nj4KkRixGBpRQy80OQse68pmtLA4FAJateFKtXYD6Vh/g4
-jI9FE1PmAni/vQIDAQABo2EwXzAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV
-HSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQUBcxltaZPDRkxtSGwZvfNtbMfOMsw
-EQYDVR0RBAowCIIGYmFkU0FOMA0GCSqGSIb3DQEBCwUAA4IBAQATShkmAdj1+FkA
-BdACAz23f+V/ihcBHtCFpfi4SgS+vzmzS/BbP6pXD+DS78cpLfCtS0c4xJFMgyPo
-82/Ii0CsK7Jm5jT/ZWg02CW6ecaSVmAhzlVybQpjQ6qrXP1zoqJeyyOhTl1Sfcbu
-WFOkDh3d0SEuCctDne5pUGIHjZ3YDKiOmLPYNDUDKB3DX5P6t5PnqtPHpYPE3zG2
-6X96xs3OqpER0vUWHKSe1ofnVr9YmDt7mOYbOaa6MU1WUPedZ5BR0mOVwsxx7z5i
-QdVARPyZqSd0BCIub9cwTo9cqPPBzSt/MBd5brMR7NwdWhIAHIhYGvT+437ZXjBq
-RMvCdM5m
+MTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyxUzmqbz
+gz667HetJB/MU8n3mCouCBfBLGJBTjkA7Z+iu//fTDU4t9BSqg+CC8viwuWHxmW3
+Iv0m6XYKLtYCJLl2tDdSkpn/3N/OdfybN4KncCnUVqQ2kK5IR4gvWMe3w2wqt1mL
+Hxe8SFubXwbLv4soIpHtL7tx+hVOunKE0ANBZO3jijxJF7xz1QA1W/mkCAnrapva
+JeZHmHcltIketndVNYziOKIimeiuQ9tZeW1OoH4galkdOAnd3CdEPHh4ZLDePu4n
+6kor92Siav3PK073CmxbUl0H0PEP7d4HwiR/M/eq+Eqh9ykRVAwr3vJhLKuVcgen
+t0ZrlutxCVmqSwIDAQABo2EwXzAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV
+HSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQUgVc7HomlduDjgc2G95sshVuWTE4w
+EQYDVR0RBAowCIIGYmFkU0FOMA0GCSqGSIb3DQEBCwUAA4IBAQCV8Wacy4CjWV1S
+VyfeZ7OeNw/ggF6vaZrJbYeI69GtHU38wnklviKeG5zkBVv6MPrw8bkqM28Jtyjg
++gXc0y0Po+m6+56mnfJAEUwtZbKQ/IXSPh0NcETO98Dk4rJyhk+/PQYtUqL7lJ/T
+7zaQG8MgyiILkxQqRDqAcGu3nF4UBujFBNKqF3p2yC42pL9TYrgbmuUiWN7Qpyk0
+3ZqJivlUHBOX3D5K8xf+RUwHUquVFOZfX+U68yA/mZ3fsFNBhzhZowVgcS3NMFoT
+xtnZRku+KHoodwVwyyIeTuFlpzekRDafFoQsW06rXo+I2Wl7pLfA5YAAbH53eZCX
+8YbCofVu
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDer3Wri3z+/jja
-/UhovbQfyZzFo2HaMTVfX6Y/P0trW0qyMM3cHAg63Y9qYwNR0mFP1gvQwMWH6BRo
-9wC8kDwowwbCfOvNsJIjZKRbJDj5ArIN4qc1nWyf8SZk2u4dn6Z6Je7gS0S3QesV
-EiARgVkaPqIyGRBR8NqfoB53V4GBOuMevrWlPLALDlIloOUjvy65lC+zsoFSmwQc
-kN2/Jm8rxPGaWUv6Yupw/UPpouaU3ALo+fNzzBatK6oj2/5g/ViwYETKL+Vr/6rt
-A45Zxqu5Feof+ePgqRGLEYGlFDLzQ5Cx7ryma0sDgUAlq14Uq1dgPpWH+DiMj0UT
-U+YCeL+9AgMBAAECggEAKt0tUkr0UYUP3p9Ye8jgTioZwjVT1ZMZSfV4Y75EPzMA
-atJYHeFLeEFDpN/QOMS9NIfoGFOy8hkrLJU3EBTXb6UyvOcc4Kf3SVbLCDwtt0jc
-iy2wr/JQgW0CzfESWqNrgSGiHIyAeqH2pUSq5ZO3WgTqZahLaupouscwdhpi9EPM
-fhPmuiFnJ+W8NA9DWRBiEZh+DyYFBad4+Dyk/R0OAAh0m5kgkFEVyOmE0t4APwiw
-55ePv7bAR1QDbL+Q5bKV3vwr+gsylNjwZexYxAJQJZD2PyLjsftUH7E1W9RYM0Om
-duSVEoXJZFCWtosx07B4pQHb6ryqGUS76ax0J9ZXIQKBgQD7t1fuJtKgj9Nagcij
-J2Hz9ws7NZx/PLew1Br8a8j96jYHNfJvEG6gDVYoVW/qKO/UJIvdj557WSnVPDEJ
-4h9Bdc3MV0IhiLpi4m63pHUKwEdiCooUcJm2OtQSVd3FXnNSJpjQrN5jg4VXe1kg
-Bjok0G5YqEMOfd0uZcWrW9uApQKBgQDieaMdfwU8z32G/v4oSa/m2PfN11BjdZn5
-AQkvRvHTIP+hwunAX1uu0iTE6y4IO0fjrSTmpJbOsFslwiAYXmikUQk1F1I+s7FM
-NEoQb3zueXbOs9K+WnepgMuMOgMbDm8qqoaGYmikU7mHZZb34nC0JSRA66DaoXsC
-RmCDAsa/OQKBgQD2JKVRdplE9R/CQ2NFV/+LJ6rN7XDpYyBlRCRXbbcPxPWsO26k
-eLcUv7Xenx1fJ0TOeCZlNEnPaLNllwSFG59gzae/CBzc2e0ZQT7vSVxCdR/YmWHI
-9wr4jbJPc7P3ipLOZHvOoxycx0Ge+DmA/VXiJgehnnhkNWQSOOcA2ERfGQKBgQC9
-W46yvt2WNrZyBQpkjRfyID8xcHHadx3CQMd5LAxNxy12Bw6CtjhmN3Ze33ShlU9K
-Yh6UadFeB75uF53WQjmkCc11AobVvlDjsmSq0UzX598afOgcGHAs3W9TU739BViV
-h/bqraVooEhjmOFdaYtqVBO35EueAZ5kDIvtfojGoQKBgED/n9nPsVLjuDvgNyux
-8fYlfGnIeQSoOnasOvsWh+hsw5MIAaFknUZ27rYB6BblJLrLAbUCVEeOZsyWtJWm
-Kp3IlXPUz7cYt0aCFLcDhCSQevfCDzJ+8dexAMzCkbrdcD5Wl3xWjRmsgw67YtzS
-dy3/1MEdNReJZZoJv80le9Ty
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDLFTOapvODPrrs
+d60kH8xTyfeYKi4IF8EsYkFOOQDtn6K7/99MNTi30FKqD4ILy+LC5YfGZbci/Sbp
+dgou1gIkuXa0N1KSmf/c3851/Js3gqdwKdRWpDaQrkhHiC9Yx7fDbCq3WYsfF7xI
+W5tfBsu/iygike0vu3H6FU66coTQA0Fk7eOKPEkXvHPVADVb+aQICetqm9ol5keY
+dyW0iR62d1U1jOI4oiKZ6K5D21l5bU6gfiBqWR04Cd3cJ0Q8eHhksN4+7ifqSiv3
+ZKJq/c8rTvcKbFtSXQfQ8Q/t3gfCJH8z96r4SqH3KRFUDCve8mEsq5VyB6e3RmuW
+63EJWapLAgMBAAECggEARWcYfKjDnb3Jadi4bhkD3jquh+0aUB+2H7HweYgXnh2p
+IrCeIlAp8v+Z0I5+NgPWmh6RGSdXv/hd3Pk/H6R5lQ1g2NPzPzCr9VLzHvVWIlFf
+YYCaFYYHM8ir4O5Up/WRApMrwf8zAUw+R0gvP/l1lh87dx0BvwdDO2Eex68tXlyy
+NDVaQIcPTWIh2HDMBQqEoINbzrLrl+RqJ3IY7APDIzCj9DQ3/T6jkIFnye7GLYem
++ucqdw0UH8EEda//t94MeVL+G3HARH1t5JTr4KSyIEGwF+6cwYMlIE594IbZrF1l
+vyD4WoT0ebrSYYSbGhlMkt5ZJGT91MFAn4C5Vt+9wQKBgQD094SfcDmTcpN640Xb
+lQxAhkHiAwoJVSf2NWU3o2Jh9a+wAgZnWa/ExRzcz23FPi5cuRi5dC9rRsTkn2Pi
+DNHm71kzT+Ayy53SKzlhpy4OVrFUv0yCovweJcUFl5aNMwQM9QW7kZ3o6p3X0PAO
+vlufFdImIILXh0p+we3LATiO4QKBgQDUOsIPrE/0xL91Y37uh78573EkNR7HoLPe
+ylaQQkUjwA/zU+qPSorkWbeYT9V3SXSXjdQB0GoaCc7o3IQ6Qycv7rafwvhheXRf
+iQrGWnzu8KrX2Qs91CCiTitut0ojwaWg6LJd6cNU5uyKDmLBk1ewyyj8/G58149S
+ypLzStt6qwKBgENogRQmm5FZa//a3nRIFXEEAMkisPZUeoSjuNCQoxf1tXyncf+q
+jFWHMCQK6FfofnWBca5GrG3GsZN/0Mp5YKE9p7wY9MbFhQ46WrXmfSPw8ycw5Orl
+0p6xIgAh8Im2Sh0Op58vuNZJCVpD4msMMsYaCRP4ykhosDUlRDwif9/BAoGBAIad
+EnRghoKpvQsXZe6kWc3Eq44cx34114FL9CYicrpfW36qzo++52Q55/MLSEoWZIaw
+pjXUXUhQylX3cUOHTrbfgiTJxUQGhqMGSLhRswbXznWErNW11FE+pdvfFH6jmIv8
+rQ5WdNhIdOaIg2lnLOrtofz9nJNBIx6PcTAyXg5rAoGAG1MjzmYd705B9GNwulMp
+sHAZAFDVDbkuDkBS0Jzv7vLSOarD4hal8aOAoLU/0Ya9f+bMwyt2c6bglqY2HoTY
+7hXD6xxYt4ocka6phiuok97TMNzRAzIWd3TNotOtyDP8anXjXLzZ8dkHHIvdaUxE
+9POtRW3Pjf+k4zKsP7khbJ8=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/badSAN.pem.digest.sha1 b/jstests/libs/badSAN.pem.digest.sha1
index 98251b6edc657..ddd1a4df3fcd4 100644
--- a/jstests/libs/badSAN.pem.digest.sha1
+++ b/jstests/libs/badSAN.pem.digest.sha1
@@ -1 +1 @@
-ABC6CD0E2DBFF1D8CCF4F67BE7EA74049CA4219A
\ No newline at end of file
+E0A5C1F7BB4842CAB8B6FD12B9236C469514975F
\ No newline at end of file
diff --git a/jstests/libs/badSAN.pem.digest.sha256 b/jstests/libs/badSAN.pem.digest.sha256
index d2b1d0bfa9c90..b040870b002e0 100644
--- a/jstests/libs/badSAN.pem.digest.sha256
+++ b/jstests/libs/badSAN.pem.digest.sha256
@@ -1 +1 @@
-16C50CECD049C952B5BA9E2CDFCFCD9D999E7D5344BA85501BCEA05CCEE5529B
\ No newline at end of file
+4EF9CCD46DCBF7B038EC3CA9560A43E5B2EE24D3FB0B6612C031FF4265927AB8
\ No newline at end of file
diff --git a/jstests/libs/bulk_write_utils.js b/jstests/libs/bulk_write_utils.js
new file mode 100644
index 0000000000000..09109da99e330
--- /dev/null
+++ b/jstests/libs/bulk_write_utils.js
@@ -0,0 +1,11 @@
+
+/**
+ * Helper function to check a BulkWrite cursorEntry.
+ */
+const cursorEntryValidator = function(entry, expectedEntry) {
+ assert.eq(entry.ok, expectedEntry.ok);
+ assert.eq(entry.idx, expectedEntry.idx);
+ assert.eq(entry.n, expectedEntry.n);
+ assert.eq(entry.nModified, expectedEntry.nModified);
+ assert.eq(entry.code, expectedEntry.code);
+};
diff --git a/jstests/libs/ca-2019.pem b/jstests/libs/ca-2019.pem
deleted file mode 100644
index d1a5689cf0f36..0000000000000
--- a/jstests/libs/ca-2019.pem
+++ /dev/null
@@ -1,48 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDczCCAlugAwIBAgIBATANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
-ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
-FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
-BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB0MRcwFQYDVQQD
-Ew5LZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25n
-b0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazEL
-MAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCBxSXj
-qA5y2EMQkcmvLDNikE88Og3+spJ3ex60HWVPk8EeXN68jyfbKLYsoCcBE2rBAE/N
-shVBJa8irh0o/UTh1XNW4iGCsfMvYamXiHnaOjmGVKjfBoj6pzQH0uK0X5olm3Sa
-zZPkLLCR81yxsK6woJZMFTvrlEjxj/SmDZ9tVXW692bC4i6nGvOCSpgv9kms85xO
-Ed2xbuCLXFDXKafXZd5AK+iegkDs3ah7VXMEE8sbqGnlqC1nsy5bpCnb7aC+3af7
-SV2XEFlSQT5kwTmk9CvTDzM9O78SO8nNhEOFBLQEdGDGd3BShE8dCdh2JTy3zKsb
-WeE+mxy0mEwxNfGfAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
-BQADggEBAANwbvhM5K/Jcl6yzUSqAawvyAypT5aWBob7rt9KFq/aemjMN0gY2nsS
-8WTGd9jiXlxGc/TzrK6MOsJ904UAFE1L9uR//G1gIBa9dNbYoiii2Fc8b1xDVJEP
-b23rl/+GAT6UTSY+YgEjeA4Jk6H9zotO07lSw06rbCQam5SdA5UiMvuLHWCo3BHY
-8WzqLiW/uHlb4K5prF9yuTUBEIgkRvvvyOKXlRvm1Ed5UopT2hmwA86mffAfgJc2
-vSbm9/8Q00fYwO7mluB6mbEcnbquaqRLoB83k+WbwUAZ2yjWHXuXVMPwyaysazcp
-nOjaLwQJQgKejY62PiNcw7xC/nIxBeI=
------END CERTIFICATE-----
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAgcUl46gOcthDEJHJrywzYpBPPDoN/rKSd3setB1lT5PBHlze
-vI8n2yi2LKAnARNqwQBPzbIVQSWvIq4dKP1E4dVzVuIhgrHzL2Gpl4h52jo5hlSo
-3waI+qc0B9LitF+aJZt0ms2T5CywkfNcsbCusKCWTBU765RI8Y/0pg2fbVV1uvdm
-wuIupxrzgkqYL/ZJrPOcThHdsW7gi1xQ1ymn12XeQCvonoJA7N2oe1VzBBPLG6hp
-5agtZ7MuW6Qp2+2gvt2n+0ldlxBZUkE+ZME5pPQr0w8zPTu/EjvJzYRDhQS0BHRg
-xndwUoRPHQnYdiU8t8yrG1nhPpsctJhMMTXxnwIDAQABAoIBAD5iGOnM800wO2Uu
-wGbOd9FNEFoiinHDRHfdnw/1BavwmqjO+mBo7T8E3jarsrRosiwfyz1V+7O6uuuQ
-CgKXZlKuOuksgfGDPCWt7EolWHaZAOhbsGaujJD6ah/MuCD/yGmFxtNYOl05QpSX
-Cht9lSzhtf7TQl/og/xkOLbO27JB540ck/OCSOczXg9Z/O8AmIUyDn7AKb6G1Zhk
-2IN//HQoAvDUMZLWrzy+L7YGbA8pBR3yiPsYBH0rX2Oc9INpiGA+B9Nf1HDDsxeZ
-/o+5xLbRDDfIDtlYO0cekJ053W0zUQLrMEIn9991EpG2O/fPgs10NlKJtaFH8CmT
-ExgVA9ECgYEA+6AjtUdxZ0BL3Wk773nmhesNH5/5unWFaGgWpMEaEM7Ou7i6QApL
-KAbzOYItV3NNCbkcrejq7jsDGEmiwUOdXeQx6XN7/Gb2Byc/wezy5ALi0kcUwaur
-6s9+Ah+T4vcU2AjfuCWXIpe46KLEbwORmCRQGwkCBCwRhHGt5sGGxTkCgYEAhAaw
-voHI6Cb+4z3PNAKRnf2rExBYRyCz1KF16ksuwJyQSLzFleXRyRWFUEwLuVRL0+EZ
-JXhMbtrILrc23dJGEsB8kOCFehSH/IuL5eB0QfKpDFA+e6pimsbVeggx/rZhcERB
-WkcV3jN4O82gSL3EnIgvAT1/nwhmbmjvDhFJhZcCgYBaW4E3IbaZaz9S/O0m69Fa
-GbQWvS3CRV1oxqgK9cTUcE9Qnd9UC949O3GwHw0FMERjz3N7B/8FGW/dEuQ9Hniu
-NLmvqWbGlnqWywNcMihutJKbDCdp/Km5olUPkiNbB3sWsOkViXoiU/V0pK6BZvir
-d67EZpGwydpogyH9kVVCEQKBgGHXc3Q7SmCBRbOyQrQQk0m6i+V8328W1S5m2bPg
-M62aWXMOMn976ZRT1pBDSwz1Y5yJ3NDf7gTZLjEwpgCNrFCJRcc4HLL0NDL8V5js
-VjvpUU5GyYdsJdb+M4ZUPHi/QEaqzqPQumwJSLlJEdfWirZWVj9dDA8XcpGwQjjy
-psHRAoGBAJUTgeJYhjK7k5sgfh+PRqiRJP0msIH8FK7SenBGRUkelWrW6td2Riey
-EcOCMFkRWBeDgnZN5xDyWLBgrzpw9iHQQIUyyBaFknQcRUYKHkCx+k+fr0KHHCUb
-X2Kvf0rbeMucb4y/h7950HkBBq83AYKMAoI8Ql3cx7pKmyOLXRov
------END RSA PRIVATE KEY-----
\ No newline at end of file
diff --git a/jstests/libs/ca.pem b/jstests/libs/ca.pem
index dfc69f542030f..4638aa633f480 100644
--- a/jstests/libs/ca.pem
+++ b/jstests/libs/ca.pem
@@ -3,73 +3,51 @@
#
# Primary Root Certificate Authority Most Certificates are issued by this CA.
-----BEGIN CERTIFICATE-----
-MIIDeTCCAmGgAwIBAgIEe9SskzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDeTCCAmGgAwIBAgIESt5aGjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQyWhcNMjQwNDMwMjE1OTQyWjB0MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM1WhcNMjUwOTEwMTQyODM1WjB0MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO
-S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDf
-vZIt82obTHnc3iHgUYSc+yVkCHyERF3kdcTTFszDbN9mVPL5ZkH9lIAC3A2rj24T
-pItMW1N+zOaLHU5tJB9VnCnKSFz5CHd/KEcLA3Ql2K70z7n1FvINnBmqAQdgPcPu
-Et2rFgGg3atR3T3bV7ZRlla0CcoAFl/YoDI16oHRXboxAtoAzaIwvS6HUrOYQPYq
-BLGt00Wws4bpILk3b04lDLEHmzDe6N3/v3FgBurPzR2tL97/sJGePE94I833hYG4
-vBdU0Kdt9FbTDEFOgrfRCisHyZY6Vw6rIiWBSLUBCjtm2vipgoD0H3DvyZLbMQRr
-qmctCX4KQtOZ8dV3JQkNAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
-hvcNAQELBQADggEBAJnz4lK9GiCWhCXIPzghYRRheYWL8nhkZ+3+oC1B3/mGEf71
-2VOdND6fMPdHinD8jONH75mOpa7TanriVYX3KbrQ4WABFNJMX9uz09F+0A2D5tyc
-iDkldnei+fiX4eSx80oCPgvaxdJWauiTsEi+fo2Do47PYkch9+BDXT9F/m3S3RRW
-cia7URBAV8Itq6jj2BHcpS/dEqZcmN9kGWujVagcCorc0wBKSmkO/PZIjISid+TO
-Db2g+AvqSBDU0lbdP7NXRSIxvZejDz4qMjcpSbhW9OS2BCYZcq5wgH2lwYkdPtmX
-JkhxWKwsW11WJWDcmaXcffO3a6lDizxyjnTedoU=
+S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCb
+k/WPIqqv46Nv9FeodWiPtx4/v3XZJwGxTk3JEje2CLjeVjU0q6OZoofP1wgSIZSh
+iO2o9iDC5O1Aedop0i+wqe9dMcn34O1K5aM4ff8c4orfBe0xqyvE3cJx4BeSTZ4n
+NY00x9PkCcoq98SoU7S9vkJq+AxUzUII34GQ4xCeaM7+g43PpGo5KFDwrzI/VUJX
+qaeRNXS0/j8Wwp7Gv8L1a+ZGlxrgpXTJLGamhtkWyVEWSpgcc5suA0qSwvkAE1KX
+5aJoBUDL22fLRhs91xNFDUYTAvkG8X4gM0f8lBL24+nbOBkOLdpqSZZ+dk59JKHD
+TFGBx0p17I1g0xjWNjMVAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
+hvcNAQELBQADggEBAIwWNyaQhZglJyKMIAUAwlvBL5REA99bua06xWfJwdmdlci9
+Bb6MgQzVk5K68rsNlcL0ma+Ri5FfU+j7gsYZh4pILYb9xqFxiKX7bxMZv99LR8Mi
+0EImM7gz3S579qYBXWd4V6/1G864qln8neHv+X3MF/wk3O9IYqepWsC3xDRos1Zv
+xQfb37Ol4pcHtue4wHXr5TV8+KPcUusfNcftnpsEHyEUHqPORdHB7xRpfhosRYvL
+7WwMXNseuyHFcdA/rEhUVsca+SUeOMIW+8euuU/as3ZaEpv1ZmpHEYXHb2SlS6W+
+gTzUOtNXsKVDrm9uEcUHytp+xvp9l9NNM/IRGGA=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDfvZIt82obTHnc
-3iHgUYSc+yVkCHyERF3kdcTTFszDbN9mVPL5ZkH9lIAC3A2rj24TpItMW1N+zOaL
-HU5tJB9VnCnKSFz5CHd/KEcLA3Ql2K70z7n1FvINnBmqAQdgPcPuEt2rFgGg3atR
-3T3bV7ZRlla0CcoAFl/YoDI16oHRXboxAtoAzaIwvS6HUrOYQPYqBLGt00Wws4bp
-ILk3b04lDLEHmzDe6N3/v3FgBurPzR2tL97/sJGePE94I833hYG4vBdU0Kdt9FbT
-DEFOgrfRCisHyZY6Vw6rIiWBSLUBCjtm2vipgoD0H3DvyZLbMQRrqmctCX4KQtOZ
-8dV3JQkNAgMBAAECggEBAImudWrzdGMSH5eL1hrc7xdO8QZqtn0KOSEiW5luWVkV
-ATBOrCpPA+LQ5CleUsc0/w71XYcCWBIacvA+e4lsBiA/dfP1rcjNrgNAvN8JJAs6
-GQerYIpysUcwvRZBhdFykMRLNb9XTm84mXlKBQuaISZ5ticaMwT0v8xlYCCPi41S
-0gfhW9fIKA45NYmTKUhS2l5M0kNqJ6JmONOHv0RaSgn4qxAVNrkAJCbb2npEKZiP
-qIR7XL1MbE7b1lNnL3PpbA6LCQpLBhNOHXrAfquagvm209d6zx8fAcIfFXMwdIZj
-S80DG9pYyXxCm9wfdEJHrpFwf/ueoXpr9p6JhVvenkECgYEA/MwD3zQ+8dF4X/z5
-UEbfXsmZ5olqH8bKithou5zDXkwkLi7VcOWK7TkNUpJ70ex37/Jf4CdRPYarzK8Y
-QCWadiUfthCkQxZAGK6vy/MGL3ha+rzuZmpERiq275kmUK+qQ79rKh10j7b08PYE
-Q9XZqDsWh3AyniZcjuf0KDH2J3ECgYEA4pNOmG0s7Xx6yJgLE4EKIIhHswnF01Tw
-6v3HlQRymecl8D8DuX974dB9ihlM1scNPRcqz9kr0ZRn0gRUirI+VBMuilp8SGxF
-CyUNoP9vnhYPJqeOIIv61XGno02HNzLjOC1oTFndYgAx5PH/wbbjqPCq64zVXpke
-pi5zfhJrhV0CgYB3X1NYvBAZGoQQN6H3EXB7DxHYdf0iAjo0dFqIdU8gIS5YjI8H
-n60Jtg4fdsJ3b4V3TemOLKLFPem0Xt5BtEALGB6wV41pjIE55otm8Fx0YA3+Jucg
-f3+77oGyBIy+PyVUlnhhS8V187wYaOO8sKf2M+jAje+pFI5SgR+fN3B4UQKBgQDL
-BJlBJVpDde2C/hG2qUJMwjAUseYLpam1ti32TaTuWFr7OUt6FEwJm8pRSrg6HuOy
-4KZg1URZJ3Qbj/u8x1fn98QU/l0se68l/E6I+zilWzi6Nxq3+RWJ2awPPHGzOSq1
-9KHnAALJRElwzpKPnfPCNdvA8lFh0SvQVgDwb0relQKBgGCGh51jqQieVVowiYc5
-/1mPI4I3MLg4k1v/iAUQzINEX7xkCG+xx7tz5LzUM//nNeI/UiO/rVG6/dnLNhUP
-ixVZG7LiRihtrPkUr+ecULCX+6RNDAi88X5z5EA8DSzZY41jpKrneowy2Wy16ZOq
-LKaGtbH/tHj8UT/l8OYicgI1
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCbk/WPIqqv46Nv
+9FeodWiPtx4/v3XZJwGxTk3JEje2CLjeVjU0q6OZoofP1wgSIZShiO2o9iDC5O1A
+edop0i+wqe9dMcn34O1K5aM4ff8c4orfBe0xqyvE3cJx4BeSTZ4nNY00x9PkCcoq
+98SoU7S9vkJq+AxUzUII34GQ4xCeaM7+g43PpGo5KFDwrzI/VUJXqaeRNXS0/j8W
+wp7Gv8L1a+ZGlxrgpXTJLGamhtkWyVEWSpgcc5suA0qSwvkAE1KX5aJoBUDL22fL
+Rhs91xNFDUYTAvkG8X4gM0f8lBL24+nbOBkOLdpqSZZ+dk59JKHDTFGBx0p17I1g
+0xjWNjMVAgMBAAECggEAa1GtWod3Zs2IuHm83h/feQPAggj4qksWB9EUJ1fBxnhN
+zlF5clPUDu7m8KBMmPVBKgzipGLkRozUfF2R+Ldc3mjbDVaDE9K83aV66kU/7hT3
+5tCXxmNlVfADabpDiFCygCmYtogBypRWOT2hEtxrGVhdSJzDJNxSaPyEzAVWOW7P
+w1Rf77yH7nMqxNUbJ/oGlEHOrWu/eBIKJnRK0P0yCkDFN3K4zK7TNg+HEQTbkCrk
+NmQQLtmIcKPc+hC/MLxlw0PhlBJkS0ukPpIXGsRGdNBAzvyOQSZhrt4bz8mQvOzw
+Ev2nJwKrRsCY+O97w3M95Le6z7ihpf2YnpJ80DRHIQKBgQDJMRmmArB04aMbAQwD
+wFcxTFim1reE9+8DU3IbXXyg+h70CzuZ1DticZy4gB0bUamx4L18FQE6Cto6KVvd
+DsaHo9e3phDlwvEkpN6WDDrp03vqkRTJiiijk0w2jB1LdQ1QRXGToCPOEvgdj+iv
+artNlQnUZfBlWFbZAhDF5V9+gwKBgQDF9cylDEJsPfPgOalAiuw501JBMnuB/wKg
+gtvmRmE9wTU2DBusLdlg6bBnhYZBY2qd419RSZPchuPyeqFcLAk2DGrZRXz7x0X0
+oS50CtAGgoSYiIObLr+vzRioiE66yTOfESUZW9zqLPBwC0WcPyE/BmhdFWMYlgvb
+/E0Ex7LUhwKBgCu7DW0lJn+xT8ed2aOzGgCwLJDPGQLA9WXOrH3AO0euNi4rht/v
+3pyOP1dnGWyHuo/cXrNqyWJ5W2fK95m8DXEMLuZPJVsbnViusWcB74hFnKimslMA
+QccUTDuLBw8QuT0aaw6Af2fafa3HSvdeBqpdW86b/b25jt8KcOTi73fLAoGBAIYU
+rzCXNiIkcBtOUtIRhmDAPjVEoRzygXAAUjsNLm3qgEUEPHYJc/PNNJtZeA6v7JXW
+XEgtgsXaY2hoIQTSGscN8A0LoNTUKxC/Xzxf2nieTHsX87PXHSRQ0UPNVy3ye1Fh
+BnS/oMjH+W6aY+Kpa7ZJe8SYaM9NSekNYYk8TWbfAoGBAMHSFnGo9V6BG1qh9xRl
+9S+QpROy4xevFlIN6KGzPNcCjjzOf7WdePss2BZdHXRzxoKANI9qHX/5hNP8YwrU
+4ofU8ObQ7YINk49we/VreTo+2VWKr7qtVHoi1rBUt57WcwhtZvdZ3hDLABQPHSfn
+TVHLzAuMRhYfHg1uhdTa3HCB
-----END PRIVATE KEY-----
-# Certificate from jstests/libs/ca-2019.pem
------BEGIN CERTIFICATE-----
-MIIDczCCAlugAwIBAgIBATANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
-ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
-FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
-BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB0MRcwFQYDVQQD
-Ew5LZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25n
-b0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazEL
-MAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCBxSXj
-qA5y2EMQkcmvLDNikE88Og3+spJ3ex60HWVPk8EeXN68jyfbKLYsoCcBE2rBAE/N
-shVBJa8irh0o/UTh1XNW4iGCsfMvYamXiHnaOjmGVKjfBoj6pzQH0uK0X5olm3Sa
-zZPkLLCR81yxsK6woJZMFTvrlEjxj/SmDZ9tVXW692bC4i6nGvOCSpgv9kms85xO
-Ed2xbuCLXFDXKafXZd5AK+iegkDs3ah7VXMEE8sbqGnlqC1nsy5bpCnb7aC+3af7
-SV2XEFlSQT5kwTmk9CvTDzM9O78SO8nNhEOFBLQEdGDGd3BShE8dCdh2JTy3zKsb
-WeE+mxy0mEwxNfGfAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
-BQADggEBAANwbvhM5K/Jcl6yzUSqAawvyAypT5aWBob7rt9KFq/aemjMN0gY2nsS
-8WTGd9jiXlxGc/TzrK6MOsJ904UAFE1L9uR//G1gIBa9dNbYoiii2Fc8b1xDVJEP
-b23rl/+GAT6UTSY+YgEjeA4Jk6H9zotO07lSw06rbCQam5SdA5UiMvuLHWCo3BHY
-8WzqLiW/uHlb4K5prF9yuTUBEIgkRvvvyOKXlRvm1Ed5UopT2hmwA86mffAfgJc2
-vSbm9/8Q00fYwO7mluB6mbEcnbquaqRLoB83k+WbwUAZ2yjWHXuXVMPwyaysazcp
-nOjaLwQJQgKejY62PiNcw7xC/nIxBeI=
------END CERTIFICATE-----
diff --git a/jstests/libs/ca.pem.digest.sha1 b/jstests/libs/ca.pem.digest.sha1
index dbe9e3898afc7..e1ec750dc4655 100644
--- a/jstests/libs/ca.pem.digest.sha1
+++ b/jstests/libs/ca.pem.digest.sha1
@@ -1 +1 @@
-F42B9419C2EF9D431D7C0E5061A82902D385203A
\ No newline at end of file
+D33E7C8B0748C66DBEEE6E24410FA72A47607DF3
\ No newline at end of file
diff --git a/jstests/libs/ca.pem.digest.sha256 b/jstests/libs/ca.pem.digest.sha256
index 2cffe1b5da960..4ac5afdd90414 100644
--- a/jstests/libs/ca.pem.digest.sha256
+++ b/jstests/libs/ca.pem.digest.sha256
@@ -1 +1 @@
-21A1C6A87B31AF590F5074EE716F193522B8F540081A5D571B25AE5DF72863E3
\ No newline at end of file
+6568E01751761F5EC6A07B050857C77DD2D2604CD05A70A62F7DDA14829C1077
\ No newline at end of file
diff --git a/jstests/libs/catalog_shard_util.js b/jstests/libs/catalog_shard_util.js
deleted file mode 100644
index b6bff02127c9c..0000000000000
--- a/jstests/libs/catalog_shard_util.js
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Utilities for testing config server catalog shard behaviors.
- */
-var CatalogShardUtil = (function() {
- load("jstests/libs/feature_flag_util.js");
-
- function isEnabledIgnoringFCV(st) {
- return FeatureFlagUtil.isEnabled(
- st.configRS.getPrimary(), "CatalogShard", undefined /* user */, true /* ignoreFCV */);
- }
-
- function isTransitionEnabledIgnoringFCV(st) {
- return FeatureFlagUtil.isEnabled(st.configRS.getPrimary(),
- "TransitionToCatalogShard",
- undefined /* user */,
- true /* ignoreFCV */);
- }
-
- function transitionToDedicatedConfigServer(st, timeout) {
- if (timeout == undefined) {
- timeout = 10 * 60 * 1000; // 10 minutes
- }
-
- assert.soon(function() {
- const res = st.s.adminCommand({transitionToDedicatedConfigServer: 1});
- if (!res.ok && res.code === ErrorCodes.ShardNotFound) {
- // If the config server primary steps down right after removing the config.shards
- // doc for the shard but before responding with "state": "completed", the mongos
- // would retry the _configsvrTransitionToDedicatedConfigServer command against the
- // new config server primary, which would not find the removed shard in its
- // ShardRegistry if it has done a ShardRegistry reload after the config.shards doc
- // for the shard was removed. This would cause the command to fail with
- // ShardNotFound.
- return true;
- }
- assert.commandWorked(res);
- return res.state == 'completed';
- }, "failed to transition to dedicated config server within " + timeout + "ms", timeout);
- }
-
- function waitForRangeDeletions(conn) {
- assert.soon(() => {
- const rangeDeletions = conn.getCollection("config.rangeDeletions").find().toArray();
- if (rangeDeletions.length) {
- print("Waiting for range deletions to complete: " + tojsononeline(rangeDeletions));
- sleep(100);
- return false;
- }
- return true;
- });
- }
-
- return {
- isEnabledIgnoringFCV,
- isTransitionEnabledIgnoringFCV,
- transitionToDedicatedConfigServer,
- waitForRangeDeletions,
- };
-})();
diff --git a/jstests/libs/ce_stats_utils.js b/jstests/libs/ce_stats_utils.js
index fa74012018274..daa5461c59e52 100644
--- a/jstests/libs/ce_stats_utils.js
+++ b/jstests/libs/ce_stats_utils.js
@@ -1,10 +1,16 @@
load('jstests/aggregation/extras/utils.js'); // For assertArrayEq.
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
+
+import {
+ checkCascadesFeatureFlagEnabled,
+ extractLogicalCEFromNode,
+ getPlanSkeleton,
+ navigateToRootNode,
+} from "jstests/libs/optimizer_utils.js";
/**
* Returns a simplified skeleton of the physical plan including intervals & logical CE.
*/
-function summarizeExplainForCE(explain) {
+export function summarizeExplainForCE(explain) {
const node = getPlanSkeleton(navigateToRootNode(explain), {
extraKeepKeys: ["interval", "properties"],
printLogicalCE: true,
@@ -15,7 +21,7 @@ function summarizeExplainForCE(explain) {
/**
* Extracts the cardinality estimate of the explain root node.
*/
-function getRootCE(explain) {
+export function getRootCE(explain) {
const rootNode = navigateToRootNode(explain);
assert.neq(rootNode, null, tojson(explain));
assert.eq(rootNode.nodeType, "Root", tojson(rootNode));
@@ -25,7 +31,7 @@ function getRootCE(explain) {
/**
* Asserts that expected and actual are equal, within a small tolerance.
*/
-function assertApproxEq(expected, actual, msg, tolerance = 0.01) {
+export function assertApproxEq(expected, actual, msg, tolerance = 0.01) {
assert(Math.abs(expected - actual) < tolerance, msg);
}
@@ -34,7 +40,7 @@ function assertApproxEq(expected, actual, msg, tolerance = 0.01) {
* if the ce parameter is omitted, we expect our estimate to exactly match what the query actually
* returns.
*/
-function verifyCEForMatch({coll, predicate, expected, ce, hint}) {
+export function verifyCEForMatch({coll, predicate, expected, ce, hint}) {
jsTestLog(`Verify CE for match ${tojson(predicate)}`);
const CEs = ce ? [ce] : undefined;
return verifyCEForMatchNodes(
@@ -48,7 +54,7 @@ function verifyCEForMatch({coll, predicate, expected, ce, hint}) {
* expected estimates should be defined in CEs, or it defaults to the number of documents expected
* to be returned by the query.
*/
-function verifyCEForMatchNodes({coll, predicate, expected, getNodeCEs, CEs, hint}) {
+export function verifyCEForMatchNodes({coll, predicate, expected, getNodeCEs, CEs, hint}) {
// Run aggregation & verify query results.
const options = hint ? {hint} : {};
const actual = coll.aggregate([{$match: predicate}], options).toArray();
@@ -76,11 +82,11 @@ function verifyCEForMatchNodes({coll, predicate, expected, getNodeCEs, CEs, hint
/**
* Creates a histogram for the given 'coll' along the input field 'key'.
*/
-function createHistogram(coll, key, options = {}) {
+export function createHistogram(coll, key, options = {}) {
// We can't use forceBonsai here because the new optimizer doesn't know how to handle the
// analyze command.
- assert.commandWorked(
- coll.getDB().adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"}));
+ assert.commandWorked(coll.getDB().adminCommand(
+ {setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"}));
// Set up histogram for test collection.
const res = coll.getDB().runCommand(Object.assign({analyze: coll.getName(), key}, options));
@@ -90,7 +96,7 @@ function createHistogram(coll, key, options = {}) {
/**
* Validates that the generated histogram for the given "coll" has the expected type counters.
*/
-function createAndValidateHistogram({coll, expectedHistogram, empty = false, options = {}}) {
+export function createAndValidateHistogram({coll, expectedHistogram, empty = false, options = {}}) {
const field = expectedHistogram._id;
createHistogram(coll, field, options);
@@ -107,8 +113,8 @@ function createAndValidateHistogram({coll, expectedHistogram, empty = false, opt
* estimation. This ensures that the appropriate flags/query knobs are set and ensures the state of
* relevant flags is restored after the test.
*/
-function runHistogramsTest(test) {
- if (!checkCascadesOptimizerEnabled(db)) {
+export function runHistogramsTest(test) {
+ if (!checkCascadesFeatureFlagEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
return;
}
@@ -140,7 +146,7 @@ function runHistogramsTest(test) {
/**
* Creates a single-field index for each field in the 'fields' array.
*/
-function createIndexes(coll, fields) {
+export function createIndexes(coll, fields) {
for (const field of fields) {
assert.commandWorked(coll.createIndex({[field]: 1}));
}
@@ -149,17 +155,18 @@ function createIndexes(coll, fields) {
/**
* Creates statistics for each field in the 'fields' array.
*/
-function analyzeFields(db, coll, fields, bucketCnt = 100) {
+export function analyzeFields(db, coll, fields, bucketCnt = 100) {
for (const field of fields) {
assert.commandWorked(
db.runCommand({analyze: coll.getName(), key: field, numberBuckets: bucketCnt}));
}
}
+
/**
* Given a scalar histogram document print it combining bounds with the corresponding buckets.
* hist = { buckets: [{boundaryCount: 1, rangeCount: 0, ...}], bounds: [100, 500]}
*/
-function printScalarHistogram(hist) {
+export function printScalarHistogram(hist) {
assert.eq(hist.buckets.length, hist.bounds.length);
let i = 0;
while (i < hist.buckets.length) {
@@ -168,7 +175,7 @@ function printScalarHistogram(hist) {
}
}
-function printHistogram(hist) {
+export function printHistogram(hist) {
jsTestLog(`Histogram on field: ${hist._id}`);
print("Scalar Histogram:\n");
printScalarHistogram(hist.statistics.scalarHistogram);
diff --git a/jstests/libs/change_stream_rewrite_util.js b/jstests/libs/change_stream_rewrite_util.js
index a5b678ef3de07..574b848a1bfa2 100644
--- a/jstests/libs/change_stream_rewrite_util.js
+++ b/jstests/libs/change_stream_rewrite_util.js
@@ -7,7 +7,8 @@ load("jstests/libs/fixture_helpers.js"); // For isMongos.
// Function which generates a write workload on the specified collection, including all events that
// a change stream may consume. Assumes that the specified collection does not already exist.
-function generateChangeStreamWriteWorkload(db, collName, numDocs, includInvalidatingEvents = true) {
+export function generateChangeStreamWriteWorkload(
+ db, collName, numDocs, includInvalidatingEvents = true) {
// If this is a sharded passthrough, make sure we shard on something other than _id so that a
// non-id field appears in the documentKey. This will generate 'create' and 'shardCollection'.
if (FixtureHelpers.isMongos(db)) {
@@ -99,7 +100,8 @@ function generateChangeStreamWriteWorkload(db, collName, numDocs, includInvalida
// Helper function to fully exhaust a change stream from the specified point and return all events.
// Assumes that all relevant events can fit into a single 16MB batch.
-function getAllChangeStreamEvents(db, extraPipelineStages = [], csOptions = {}, resumeToken) {
+export function getAllChangeStreamEvents(
+ db, extraPipelineStages = [], csOptions = {}, resumeToken) {
// Open a whole-cluster stream based on the supplied arguments.
const csCursor = db.getMongo().watch(
extraPipelineStages,
@@ -121,12 +123,13 @@ function getAllChangeStreamEvents(db, extraPipelineStages = [], csOptions = {},
}
// Helper function to check whether this value is a plain old javascript object.
-function isPlainObject(value) {
+export function isPlainObject(value) {
return (value && typeof (value) == "object" && value.constructor === Object);
}
// Verifies the number of change streams events returned from a particular shard.
-function assertNumChangeStreamDocsReturnedFromShard(stats, shardName, expectedTotalReturned) {
+export function assertNumChangeStreamDocsReturnedFromShard(
+ stats, shardName, expectedTotalReturned) {
assert(stats.shards.hasOwnProperty(shardName), stats);
const stages = stats.shards[shardName].stages;
const lastStage = stages[stages.length - 1];
@@ -134,7 +137,7 @@ function assertNumChangeStreamDocsReturnedFromShard(stats, shardName, expectedTo
}
// Verifies the number of oplog events read by a particular shard.
-function assertNumMatchingOplogEventsForShard(stats, shardName, expectedTotalReturned) {
+export function assertNumMatchingOplogEventsForShard(stats, shardName, expectedTotalReturned) {
assert(stats.shards.hasOwnProperty(shardName), stats);
assert.eq(Object.keys(stats.shards[shardName].stages[0])[0], "$cursor", stats);
const executionStats = stats.shards[shardName].stages[0].$cursor.executionStats;
@@ -145,7 +148,7 @@ function assertNumMatchingOplogEventsForShard(stats, shardName, expectedTotalRet
}
// Returns a newly created sharded collection sharded by caller provided shard key.
-function createShardedCollection(shardingTest, shardKey, dbName, collName, splitAt) {
+export function createShardedCollection(shardingTest, shardKey, dbName, collName, splitAt) {
const db = shardingTest.s.getDB(dbName);
assertDropAndRecreateCollection(db, collName);
@@ -173,7 +176,7 @@ function createShardedCollection(shardingTest, shardKey, dbName, collName, split
// 2. There are no additional events being returned other than the ones in the 'expectedResult'.
// 3. the filtering is been done at oplog level, and each of the shard read only the
// 'expectedOplogNReturnedPerShard' documents.
-function verifyChangeStreamOnWholeCluster({
+export function verifyChangeStreamOnWholeCluster({
st,
changeStreamSpec,
userMatchExpr,
@@ -235,13 +238,15 @@ function verifyChangeStreamOnWholeCluster({
verbosity: "executionStats"
});
- assertNumMatchingOplogEventsForShard(stats, st.rs0.name, expectedOplogNReturnedPerShard[0]);
- assertNumMatchingOplogEventsForShard(stats, st.rs1.name, expectedOplogNReturnedPerShard[1]);
+ assertNumMatchingOplogEventsForShard(
+ stats, st.shard0.shardName, expectedOplogNReturnedPerShard[0]);
+ assertNumMatchingOplogEventsForShard(
+ stats, st.shard1.shardName, expectedOplogNReturnedPerShard[1]);
if (expectedChangeStreamDocsReturnedPerShard !== undefined) {
assertNumChangeStreamDocsReturnedFromShard(
- stats, st.rs0.name, expectedChangeStreamDocsReturnedPerShard[0]);
+ stats, st.shard0.shardName, expectedChangeStreamDocsReturnedPerShard[0]);
assertNumChangeStreamDocsReturnedFromShard(
- stats, st.rs1.name, expectedChangeStreamDocsReturnedPerShard[1]);
+ stats, st.shard1.shardName, expectedChangeStreamDocsReturnedPerShard[1]);
}
}
diff --git a/jstests/libs/change_stream_util.js b/jstests/libs/change_stream_util.js
index 20af14f28d38c..037a8227b4054 100644
--- a/jstests/libs/change_stream_util.js
+++ b/jstests/libs/change_stream_util.js
@@ -370,7 +370,7 @@ function ChangeStreamTest(_db, name = "ChangeStreamTest") {
* If the current batch has a document in it, that one will be ignored.
*/
self.getOneChange = function(cursor, expectInvalidate = false) {
- changes = self.getNextChanges(cursor, 1, true);
+ const changes = self.getNextChanges(cursor, 1, true);
if (expectInvalidate) {
assert(isInvalidated(changes[changes.length - 1]),
diff --git a/jstests/libs/check_metadata_consistency_helpers.js b/jstests/libs/check_metadata_consistency_helpers.js
index df619b3077c5b..303a6a18cd82c 100644
--- a/jstests/libs/check_metadata_consistency_helpers.js
+++ b/jstests/libs/check_metadata_consistency_helpers.js
@@ -1,8 +1,6 @@
-'use strict';
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
-load('jstests/libs/feature_flag_util.js'); // For FeatureFlagUtil.
-
-var MetadataConsistencyChecker = (function() {
+export var MetadataConsistencyChecker = (function() {
const run = (mongos) => {
const adminDB = mongos.getDB('admin');
@@ -17,6 +15,31 @@ var MetadataConsistencyChecker = (function() {
return;
}
+ // The isTransientError() function is responsible for setting an error as transient and
+ // abort the metadata consistency check to be retried in the future.
+ const isTransientError = function(e) {
+ if (ErrorCodes.isRetriableError(e.code) || ErrorCodes.isInterruption(e.code)) {
+ return true;
+ }
+
+ // TODO SERVER-78117: Remove once checkMetadataConsistency command is robust to
+ // ShardNotFound
+ if (e.code === ErrorCodes.ShardNotFound) {
+ // Metadata consistency check can fail with ShardNotFound if the router's
+ // ShardRegistry reloads after choosing which shards to target and a chosen
+ // shard is no longer in the cluster.
+ return true;
+ }
+
+ if (e.code === ErrorCodes.FailedToSatisfyReadPreference) {
+ // Metadata consistency check can fail with FailedToSatisfyReadPreference error
+ // response when the primary of the shard is permanently down.
+ return true;
+ }
+
+ return false;
+ };
+
const checkMetadataConsistency = function() {
jsTest.log('Started metadata consistency check');
@@ -40,7 +63,7 @@ var MetadataConsistencyChecker = (function() {
try {
checkMetadataConsistency();
} catch (e) {
- if (ErrorCodes.isRetriableError(e.code) || ErrorCodes.isInterruption(e.code)) {
+ if (isTransientError(e)) {
jsTest.log(`Aborted metadata consistency check due to retriable error: ${e}`);
} else {
throw e;
diff --git a/jstests/libs/check_orphans_are_deleted_helpers.js b/jstests/libs/check_orphans_are_deleted_helpers.js
index 6233872842ce4..0cee0c5c9d6c1 100644
--- a/jstests/libs/check_orphans_are_deleted_helpers.js
+++ b/jstests/libs/check_orphans_are_deleted_helpers.js
@@ -46,14 +46,12 @@ var CheckOrphansAreDeletedHelpers = (function() {
adminDB
.aggregate([
{$currentOp: {idleCursors: true, allUsers: true}},
- {$match: {type: 'idleCursor', ns: ns}}
+ {$match: {type: 'idleCursor'}}
])
.toArray();
- print("Idle cursors on " + ns + " @ " + shardId + ": " +
- tojson(idleCursors));
+ print("Idle cursors on shard " + shardId + ": " + tojson(idleCursors));
} catch (e) {
- print("Failed to get idle cursors for " + ns + " @ " + shardId + ": " +
- tojson(e));
+ print("Failed to get idle cursors on shard " + shardId + ": " + tojson(e));
}
return 'timed out waiting for rangeDeletions on ' + ns + ' to be empty @ ' +
diff --git a/jstests/libs/check_routing_table_consistency_helpers.js b/jstests/libs/check_routing_table_consistency_helpers.js
index 14bea76173e8d..071c6dc3f24f3 100644
--- a/jstests/libs/check_routing_table_consistency_helpers.js
+++ b/jstests/libs/check_routing_table_consistency_helpers.js
@@ -1,6 +1,6 @@
-'use strict';
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
-var RoutingTableConsistencyChecker = (function() {
+export var RoutingTableConsistencyChecker = (function() {
const sameObjectFields = (lhsObjFields, rhsObjFields) => {
if (lhsObjFields.length !== rhsObjFields.length) {
return false;
@@ -81,6 +81,135 @@ var RoutingTableConsistencyChecker = (function() {
return true;
};
+ /**
+ * Reproduces the logic implemented in ShardingCatalogManager::initializePlacementHistory()
+ * to compute the placement of each existing collection and database by reading the content of
+ * - config.collections + config.chunks
+ * - config.databases.
+ * The output format follows the same schema of config.placementHistory; results are ordered by
+ * namespace.
+ **/
+ const buildCurrentPlacementData = (mongos) => {
+ const pipeline = [
+ {
+ $lookup: {
+ from: "chunks",
+ localField: "uuid",
+ foreignField: "uuid",
+ as: "timestampByShard",
+ pipeline: [
+ {
+ $group: {
+ _id: "$shard",
+ value: {
+ $max: "$onCurrentShardSince"
+ }
+ }
+ }
+ ],
+ }
+ },
+ {
+ $project: {
+ _id: 0,
+ nss: "$_id",
+ shards: "$timestampByShard._id",
+ uuid: 1,
+ timestamp: {
+ $max: "$timestampByShard.value"
+ },
+ }
+ },
+ {
+ $unionWith: {
+ coll: "databases",
+ pipeline: [
+ {
+ $project: {
+ _id: 0,
+ nss: "$_id",
+ shards: [
+ "$primary"
+ ],
+ timestamp: "$version.timestamp"
+ }
+ }
+ ]
+ }
+ },
+ {
+ $sort: {
+ nss: 1
+ }
+ }
+ ];
+
+ return mongos.getDB('config').collections.aggregate(pipeline);
+ };
+
+ /**
+ * Extracts from config.placementHistory the most recent document for each collection
+ * and database. Results are ordered by namespace.
+ */
+ const getHistoricalPlacementData = (mongos, atClusterTime) => {
+ const kConfigPlacementHistoryInitializationMarker = '';
+ const pipeline = [
+ {
+ $match: {
+ // Skip documents containing initialization metadata
+ nss: {$ne: kConfigPlacementHistoryInitializationMarker},
+ timestamp: {$lte: atClusterTime}
+ }
+ },
+ {
+ $group: {
+ _id: "$nss",
+ placement: {$top: {output: "$$CURRENT", sortBy: {"timestamp": -1}}}
+ }
+ },
+ // Disregard placement entries on dropped namespaces
+ {$match: {"placement.shards": {$not: {$size: 0}}}},
+ {$replaceRoot: {newRoot: "$placement"}},
+ {$sort: {nss: 1}}
+ ];
+ return mongos.getDB('config').placementHistory.aggregate(pipeline);
+ };
+
+ const checkHistoricalPlacementMetadataConsistency = (mongos) => {
+ const placementDataFromRoutingTable = buildCurrentPlacementData(mongos);
+ const now = mongos.getDB('admin').runCommand({isMaster: 1}).operationTime;
+ const historicalPlacementData = getHistoricalPlacementData(mongos, now);
+
+ placementDataFromRoutingTable.forEach(function(nssPlacementFromRoutingTable) {
+ assert(historicalPlacementData.hasNext(),
+ `Historical placement data on ${nssPlacementFromRoutingTable.nss} is missing`);
+ const historicalNssPlacement = historicalPlacementData.next();
+ assert.eq(nssPlacementFromRoutingTable.nss,
+ historicalNssPlacement.nss,
+ 'Historical placement data does not contain the expected number of entries');
+ assert.sameMembers(nssPlacementFromRoutingTable.shards,
+ historicalNssPlacement.shards,
+ `Inconsistent placement info detected: routing table ${
+ tojson(nssPlacementFromRoutingTable)} VS placement history ${
+ tojson(historicalNssPlacement)}`);
+
+ assert.eq(nssPlacementFromRoutingTable.uuid,
+ historicalNssPlacement.uuid,
+ `Inconsistent placement info detected: routing table ${
+ tojson(nssPlacementFromRoutingTable)} VS placement history ${
+ tojson(historicalNssPlacement)}`);
+ // Timestamps are not compared, since they are expected to diverge if a chunk
+ // migration, collection rename or a movePrimary request have been executed during
+ // the test.
+ });
+
+ if (historicalPlacementData.hasNext()) {
+ assert(false,
+ `Unexpected historical placement entries: ${
+ tojson(historicalPlacementData.toArray())}`);
+ }
+ };
+
const run = (mongos) => {
try {
jsTest.log('Checking routing table consistency');
@@ -101,6 +230,7 @@ var RoutingTableConsistencyChecker = (function() {
`Corrupted routing table detected for ${collData._id}! Details: ${
tojson(collData)}`);
});
+ jsTest.log('Routing table consistency check completed');
} catch (e) {
if (e.code !== ErrorCodes.Unauthorized) {
throw e;
@@ -108,7 +238,19 @@ var RoutingTableConsistencyChecker = (function() {
jsTest.log(
'Skipping check of routing table consistency - access to admin collections is not authorized');
}
- jsTest.log('Routing table consistency check completed');
+
+ try {
+ jsTest.log('Checking consistency of config.placementHistory against the routing table');
+ checkHistoricalPlacementMetadataConsistency(mongos);
+ jsTest.log('config.placementHistory consistency check completed');
+
+ } catch (e) {
+ if (e.code !== ErrorCodes.Unauthorized) {
+ throw e;
+ }
+ jsTest.log(
+ 'Skipping consistency check of config.placementHistory - access to admin collections is not authorized');
+ }
};
return {
diff --git a/jstests/libs/check_shard_filtering_metadata_helpers.js b/jstests/libs/check_shard_filtering_metadata_helpers.js
index 970be5e081dfe..987fad37a3d1b 100644
--- a/jstests/libs/check_shard_filtering_metadata_helpers.js
+++ b/jstests/libs/check_shard_filtering_metadata_helpers.js
@@ -3,6 +3,10 @@
var CheckShardFilteringMetadataHelpers = (function() {
function run(mongosConn, nodeConn, shardId, skipCheckShardedCollections = false) {
function checkDatabase(configDatabasesEntry) {
+ // No shard other than the db-primary shard can believe to be the db-primary. Non
+ // db-primary shards are allowed to have a stale notion of the dbVersion, as long as
+ // they believe they are not primary.
+
const dbName = configDatabasesEntry._id;
print(`CheckShardFilteringMetadata: checking database '${dbName}' on node '${
nodeConn.host}' of shard '${shardId}'`);
@@ -10,28 +14,59 @@ var CheckShardFilteringMetadataHelpers = (function() {
const nodeMetadata =
assert.commandWorked(nodeConn.adminCommand({getDatabaseVersion: dbName}));
+ // Skip this test if isPrimaryShardForDb is not present. Multiversion incompatible.
+ if (nodeMetadata.dbVersion.isPrimaryShardForDb === undefined) {
+ return;
+ }
+
if (nodeMetadata.dbVersion.timestamp === undefined) {
- // Shards are allowed to not know the dbVersion.
+ // Node has no knowledge of the database.
return;
}
- assert.eq(nodeMetadata.dbVersion.uuid,
- configDatabasesEntry.version.uuid,
- `Unexpected dbVersion.uuid for db '${dbName}' on node '${nodeConn.host}'`);
- assert.eq(timestampCmp(nodeMetadata.dbVersion.timestamp,
- configDatabasesEntry.version.timestamp),
- 0,
- `Unexpected dbVersion timestamp for db '${dbName}' on node '${
- nodeConn.host}'. Found '${
- tojson(nodeMetadata.dbVersion.timestamp)}'; expected '${
- tojson(configDatabasesEntry.version.timestamp)}'`);
- assert.eq(nodeMetadata.dbVersion.lastMod,
- configDatabasesEntry.version.lastMod,
- `Unexpected dbVersion lastMod for db '${dbName}' on node '${nodeConn.host}'`);
+ assert.eq(
+ configDatabasesEntry.primary === shardId,
+ nodeMetadata.isPrimaryShardForDb,
+ `Unexpected isPrimaryShardForDb for db '${dbName}' on node '${nodeConn.host}'`);
+
+ // If the node is the primary shard for the database, it should know the correct
+ // database version.
+ if (configDatabasesEntry.primary === shardId) {
+ assert.eq(
+ nodeMetadata.dbVersion.uuid,
+ configDatabasesEntry.version.uuid,
+ `Unexpected dbVersion.uuid for db '${dbName}' on node '${nodeConn.host}'`);
+ assert.eq(timestampCmp(nodeMetadata.dbVersion.timestamp,
+ configDatabasesEntry.version.timestamp),
+ 0,
+ `Unexpected dbVersion timestamp for db '${dbName}' on node '${
+ nodeConn.host}'. Found '${
+ tojson(nodeMetadata.dbVersion.timestamp)}'; expected '${
+ tojson(configDatabasesEntry.version.timestamp)}'`);
+ assert.eq(
+ nodeMetadata.dbVersion.lastMod,
+ configDatabasesEntry.version.lastMod,
+ `Unexpected dbVersion lastMod for db '${dbName}' on node '${nodeConn.host}'`);
+ }
print(`CheckShardFilteringMetadata: Database '${dbName}' on '${nodeConn.host}' OK`);
}
+ function getPrimaryShardForDB(dbName) {
+ if (dbName == 'config') {
+ return 'config';
+ }
+
+ const configDB = mongosConn.getDB('config');
+
+ const dbEntry = configDB.databases.findOne({_id: dbName});
+ assert(dbEntry, `Couldn't find database '${dbName}' in 'config.databases'`);
+ assert(dbEntry.primary,
+ `Database entry for db '${dbName}' does not contain primary shard: ${
+ tojson(dbEntry)}`);
+ return dbEntry.primary;
+ }
+
function checkShardedCollection(coll, nodeShardingState) {
const ns = coll._id;
print(`CheckShardFilteringMetadata: checking collection '${ns} ' on node '${
@@ -39,13 +74,13 @@ var CheckShardFilteringMetadataHelpers = (function() {
const configDB = mongosConn.getDB('config');
+ const dbName = mongosConn.getCollection(ns).getDB().getName();
+ const primaryShardId = getPrimaryShardForDB(dbName);
const highestChunkOnShard = configDB.chunks.find({uuid: coll.uuid, shard: shardId})
.sort({lastmod: -1})
.limit(1)
.toArray()[0];
- const expectedShardVersion =
- highestChunkOnShard ? highestChunkOnShard.lastmod : Timestamp(0, 0);
const expectedTimestamp = coll.timestamp;
const collectionMetadataOnNode = nodeShardingState.versions[ns];
@@ -55,25 +90,29 @@ var CheckShardFilteringMetadataHelpers = (function() {
return;
}
- if (collectionMetadataOnNode.timestamp === undefined) {
- // Versions earlier than v6.3 did not report the timestamp on shardingState command
- // (SERVER-70790). This early exit can be removed after v6.0 is no longer tested in
- // multiversion suites.
- return;
- }
-
- if (timestampCmp(collectionMetadataOnNode.timestamp, Timestamp(0, 0)) === 0) {
- // The metadata reflects an unsharded collection. It is okay for a node to have this
- // stale metadata, as long as the node knows the correct dbVersion.
+ // TODO BACKPORT-15533: re-enable the following checks in multiversion suites
+ const isMultiversion = jsTest.options().shardMixedBinVersions ||
+ jsTest.options().useRandomBinVersionsWithinReplicaSet;
+ if (isMultiversion ||
+ (shardId != getPrimaryShardForDB(dbName) && !highestChunkOnShard)) {
+ // The shard is neither primary for database nor owns some chunks for this
+ // collection.
+ // In this case the shard is allow to have a stale/wrong collection
+ // metadata as long as it has the correct db version.
return;
}
- // If the node knows its filtering info, then assert that it is correct.
+ // Check that timestamp is correct
assert.eq(timestampCmp(collectionMetadataOnNode.timestamp, expectedTimestamp),
0,
`Unexpected timestamp for ns '${ns}' on node '${nodeConn.host}'. Found '${
tojson(collectionMetadataOnNode.timestamp)}', expected '${
tojson(expectedTimestamp)}'`);
+
+ // Check that placement version is correct
+ const expectedShardVersion =
+ highestChunkOnShard ? highestChunkOnShard.lastmod : Timestamp(0, 0);
+
// Only check the major version because some operations (such as resharding or
// setAllowMigrations) bump the minor version without the shards knowing. This does not
// affect placement, so it is okay.
@@ -87,10 +126,9 @@ var CheckShardFilteringMetadataHelpers = (function() {
const configDB = mongosConn.getDB('config');
// Check shards know correct database versions.
- // TODO: SERVER-73991 Reenable this check.
- // configDB.databases.find({primary: shardId}).forEach(configDatabasesEntry => {
- // checkDatabase(configDatabasesEntry);
- // });
+ configDB.databases.find().forEach(configDatabasesEntry => {
+ checkDatabase(configDatabasesEntry);
+ });
// Check that shards have correct filtering metadata for sharded collections.
if (!skipCheckShardedCollections) {
diff --git a/jstests/libs/check_unique_indexes.js b/jstests/libs/check_unique_indexes.js
index abbb9bb536ff8..38e33baeb4bd0 100644
--- a/jstests/libs/check_unique_indexes.js
+++ b/jstests/libs/check_unique_indexes.js
@@ -12,7 +12,7 @@ function checkUniqueIndexFormatVersion(adminDB) {
if (!isWiredTiger)
return;
- res = assert.commandWorked(adminDB.runCommand({"listDatabases": 1}));
+ let res = assert.commandWorked(adminDB.runCommand({"listDatabases": 1}));
let databaseList = res.databases;
databaseList.forEach(function(database) {
diff --git a/jstests/libs/chunk_manipulation_util.js b/jstests/libs/chunk_manipulation_util.js
index a4afda3e9374b..d27fd97e8eaf4 100644
--- a/jstests/libs/chunk_manipulation_util.js
+++ b/jstests/libs/chunk_manipulation_util.js
@@ -2,7 +2,7 @@
// Utilities for testing chunk manipulation: moveChunk, mergeChunks, etc.
//
-load('./jstests/libs/test_background_ops.js');
+load('jstests/libs/test_background_ops.js');
//
// Start a background moveChunk.
diff --git a/jstests/libs/client-all-the-oids.pem b/jstests/libs/client-all-the-oids.pem
index 0ae372303c80a..4f65f2d9f466a 100644
--- a/jstests/libs/client-all-the-oids.pem
+++ b/jstests/libs/client-all-the-oids.pem
@@ -3,10 +3,10 @@
#
# Client certificate with a long list of OIDs. Ensures the server functions well in unexpected circumstances.
-----BEGIN CERTIFICATE-----
-MIIG4zCCBcugAwIBAgIEUZgH9zANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIG4zCCBcugAwIBAgIEZ+LS5zANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjCCA/ExEDAO
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM2WhcNMjUwOTEwMTQyODM2WjCCA/ExEDAO
BgNVBAMMB0RhdHVtLTMxEDAOBgNVBAQMB0RhdHVtLTQxEDAOBgNVBAUTB0RhdHVt
LTUxCzAJBgNVBAYTAlVTMRAwDgYDVQQHDAdEYXR1bS03MQswCQYDVQQIDAJOWTEQ
MA4GA1UECQwHRGF0dW0tOTERMA8GA1UECgwIRGF0dW0tMTAxETAPBgNVBAsMCERh
@@ -28,44 +28,44 @@ ETAPBgNVBC8MCERhdHVtLTQ3MREwDwYDVQQwDAhEYXR1bS00ODERMA8GA1UEMQwI
RGF0dW0tNDkxETAPBgNVBDIMCERhdHVtLTUwMREwDwYDVQQzDAhEYXR1bS01MTER
MA8GA1UENAwIRGF0dW0tNTIxETAPBgNVBDUMCERhdHVtLTUzMREwDwYDVQQ2DAhE
YXR1bS01NDERMA8GA1UEQQwIRGF0dW0tNjUxETAPBgNVBEgMCERhdHVtLTcyMIIB
-IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvuPee340G2hE/gE8HHnzqJkH
-Q4FqJv4TuzYcH1b4XyozDGAUTXaJo+/vJOcuh3WUJbSKfFRipq6JC85QZutRkfZX
-v/+VzJz95KJwwMTfg6yTx36Hn0+MlTps1tuwsvOClqRsAZ+KMqbyO3gQy3LzykOE
-BBHvYweC+qVa9oZdIO1/OqHySdHSPZKsKrJRny0f604wYFtB+zWHtO7nbNG4QCzr
-vjyox4IidDX6KPJ1f4CsjeIxSn0qtldEKADGHmEfpzGILaLHo2G1xEj0jhdahfve
-+BIpoJOTWbho0iDfAqE7P058ssM6XjX4y4dQ3tWhguRXI4pI5LsH5cubYJjA4wID
-AQABMA0GCSqGSIb3DQEBCwUAA4IBAQA4NnTRTEWrEYHQQ4AL9x6m1iT2zr3k3N67
-uwDQRiXqggr2Vtqz75N6DekU4KvsTpUFvl74HWTAZUKjjtryS/1sQIs9nSld/oEv
-iRYNoaXYTwI4Osng2LVC6uOZd5fAnqkbN3RdhbpqzwVBq/UPJgYC28mD2Wbn2axa
-wBOxR+RfJ7e53jwiTBBVHv9cO+3MqFvLeu4yMswUenN6dywL5VtkmjUWtzvrvWMr
-DL0eCmrdacYhbT/oRYRvgD6A72gI5SOBQ4sU+5t1fcYMkaRDr7woqh3/mY4LfthM
-Ya0joUdTxUqxSbPg4DjQNvawAFeI/KtCjDxjQulMtl7gs7Kqgmsb
+IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuPLQteDh5gvx/70joj9m21HB
+On6LdiuaOwDg9aK6lWSe0RU1uzGGmQPtLM6mArQZ/2RXwhl6WLvVJLC1PC6ZAmbp
+fOgMnWyF4nNElQTfC6jQctc+TaRMtXyq6A1Mt9z6JSwmEgGnMInTeoSINNyDWzhF
+Pez+BkPDdIYDv+qNUzM/fRflq7PoRgdABku+1XX4piJpJG3rTNJT5FmexlAZf7bc
+KUtwtcdoyBwIZWICGz26yFTry8/X0TEWuVlsIutBwTgzSIP/yEilcdZU2IZZYlhM
+Oeis2JngFD3JEIRhb1di5CHVHUBKhy8GkiZNtnjk1/aavyFvdw7rnyOOKT9tKwID
+AQABMA0GCSqGSIb3DQEBCwUAA4IBAQBqPLE1+AEIb8wwcdChqjyMTc3dSg9jSPxj
+IAWpRncsGR4OxzaJB1iDsWzyFVAJXz32L6ptjPLE/tk0DKnsvzFLttYj8jagLXMF
+OCTI6LczZDl1aPUVUFp+2BdYUPstr84glY+ptA7ZR6xlZOYnzmcAyGOo+aSeJuIp
+f0C7KeJNYsI2kO+FAJlOghwMsErQJzSF97Sb99nFm8t1lnk80q0Doo8mkuh1Dpot
+iRz/iYCODRHyh0owGeazvSS2WO/urC/AL1siTvAdJiWuejXb67dR2KGk0v4mYS6y
+7bOlbdnicaMNyz5T7/TOzCRJweYwTBlzBtdoa8OtXex5zSxnMwGb
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC+4957fjQbaET+
-ATwcefOomQdDgWom/hO7NhwfVvhfKjMMYBRNdomj7+8k5y6HdZQltIp8VGKmrokL
-zlBm61GR9le//5XMnP3konDAxN+DrJPHfoefT4yVOmzW27Cy84KWpGwBn4oypvI7
-eBDLcvPKQ4QEEe9jB4L6pVr2hl0g7X86ofJJ0dI9kqwqslGfLR/rTjBgW0H7NYe0
-7uds0bhALOu+PKjHgiJ0Nfoo8nV/gKyN4jFKfSq2V0QoAMYeYR+nMYgtosejYbXE
-SPSOF1qF+974Eimgk5NZuGjSIN8CoTs/TnyywzpeNfjLh1De1aGC5Fcjikjkuwfl
-y5tgmMDjAgMBAAECggEAeHU7FBrTrufhgZgt4ppiU+YdW0/zOJ/ku6KtpGkxWnw6
-snh+11MSEE18T4FDL1/XGOQQ79wgVKaW/Mg4nHmqg708KoCSewgmf3yyQjL6CRGC
-P8Vst+9u+0xfGkaP/p0DryQas/uFLemDultN3dSwWta4HAllUsyOrrRh7mdjpT6h
-YyRqVN9eo8pLApsQ4Ahow4Ut00bdSi32eOEy1VRhLCMnNa/Z7Fb4evczN3AOH+qv
-3CINABoMG0MJcluYN3W9qutSCz7vhG20r8Nxf3zlPg1CC0eJCP2vArlmgPHDNVN4
-F8QbNUCVdnNOfvtd35Y04Ud3jHtpxO0RDBgTlc2goQKBgQDzJ7ek394psAhw9ytp
-3XFAm0zkjr/3voF9MmCKxCSS1p8WSfjYZ9pJ2GOdiY648oF42kwS9kaUTIKONeCr
-m3pNz1ndZtMbzkmMBXUKEf0Kl78igyEY+4D0gsfY1lBkzVIiypppS3M0CUKudkFL
-czYIkCRHYds//oodGC2K4CxDFQKBgQDI+VnjrZZWsxI/rWkfGjITFmAFnp7HGTOB
-Md0A4WIOdL5rDXbycW2g+/5ya56h4j4evJALjA3nkhJ77Eza1Tf0CJCyfGyclXO4
-jjjOwm7Q0O4SMxGS4eSKe94bsXs5wxJZVVrxpXzfyRj+szQp2YkJoxGE0zh95Bf4
-lnGfAxpSFwKBgDQd/DfDoBuxVm3YHJ8JTr/5SYbnre/NDnYmORklJ22twNWHL2Y7
-BEe1sMxQcp3jpKqhp5Kc5M3ehFE07R37KkDJQ8q3wmIAWjU/6jEpX+JIWjhsgMiZ
-B6/g5DLu5LZmZ8d7Q5N0D5JEtd0tDZu9awR02MHQEK1rwnCwAPr2R6ZtAoGBAKu/
-5KDdLRiEyjie8rJAycBkI8VJrDlMAScMvOjOs+TUwhJsnFVOuDiWgfm4nQWNejb8
-QEGN/CgFPkSnSXrOMpYbZ8UQ7iTW1+FFsaiIovlTQ9FL0V3sLEpo1wRlpYBUg+7S
-MflGyrPYgMLR1Oda33Db6dHQTHvRnOa1cv5IQYsbAoGBANbD8okVET/Ct6FjO7xd
-LCB/SLYQP6yZ2ASuOSCznxuCIUIpIehklTYolBvcaov36dFwJ4h2tvfOBWwUfkc3
-IDKdsxO8O6r6605EkbqPLRQwXJn4F9WXl7rgsFTRdP2Vx9KvZ/gfsNzaREuy73cQ
-RE+ZlgSlISIgYJeBScwFU9UA
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC48tC14OHmC/H/
+vSOiP2bbUcE6fot2K5o7AOD1orqVZJ7RFTW7MYaZA+0szqYCtBn/ZFfCGXpYu9Uk
+sLU8LpkCZul86AydbIXic0SVBN8LqNBy1z5NpEy1fKroDUy33PolLCYSAacwidN6
+hIg03INbOEU97P4GQ8N0hgO/6o1TMz99F+Wrs+hGB0AGS77VdfimImkkbetM0lPk
+WZ7GUBl/ttwpS3C1x2jIHAhlYgIbPbrIVOvLz9fRMRa5WWwi60HBODNIg//ISKVx
+1lTYhlliWEw56KzYmeAUPckQhGFvV2LkIdUdQEqHLwaSJk22eOTX9pq/IW93Duuf
+I44pP20rAgMBAAECggEATcGAL8cxEgeZoXxCW/ct5i00AMStG2xvdjkDWyIkMaTR
+BHRXV7EVtoUpFLp4nxBbEb19C9SsVFv722YTfA7XM2RS67mffBvhGfh+L+yRXZSG
+tpF51yipO3ADZnYY+AAGhtRN2SoqwURgzdSkcxz2eMZqjgNyNO4OYZHqR6hz7DMb
+ihA0rKhDhdSNkMPnS2gHtoT3bi/+tzGDHwxevXgbO2cka/9UzdgTyzAxzfdxJpY+
+TS0Qx9HYuA8GXveIa/4ajXdbgGAJUknMQZDHj5yBwZkjCwNSVKhYhtggFR4Kmucr
+wMz3P2TjlhbIoiR6VpjfT2qJJ6rHFRYpBw20zkJMUQKBgQDfbYoiqcYtP8LuVmw7
+2ohMgvvW8p8UmFqwXLF+rWVHJ7oVBR3cjEFiwqKH+hsHTsDHM0JpY5B8XPPnUcV1
+kW7eYpoqMZau+Qmjo7LPD7/CxjkN/9rZxwGRi+DT0QgaMQs+h+be4f+ywZhcUTOF
+1jXxciAMAvofd817uonrS48+aQKBgQDT6TO97TD4HPbreyQp5LgulJb3jPgXqssI
+lbzT/qcLlPsGA3KO8F2CDmTuV3A0Hab3nr2zKx1uHJQ9VQieobQTD4NVO7lyH+39
+/hJXjpf9V2yMjzHtRSLa7hOym/8jTjl/cSgcDXIMzcVIEK37/7XRjB6Fkm1vdVNi
+94m9g27EcwKBgBeQdgEx5rTfBLAebUviwH1RUz5YWG7Torz3CcFSiFv41Kdi4sNg
+B4VjAcFVn4Qlyi84OMjkt1hmdVLwTOMZc5gAzkGk+T97BIk9up4cCx1/yoSvd2WA
+l6nCFvRAF5qrkYQG0VKwg0dXI7qY1dZHwWi1oKujpL2s5P8nrWQjk/gBAoGAeqbD
+V+5tJW4+TFVdvRGwuOUv6+AfyDMGiNWCEYGW1dFI6VYdfDYLKTeDm5/oRYHUHgY2
+7fjKc+z+r/EDqC0XuOCFt1N2JmPsr9Hac+sIdY2gOkq3LwmQ/v5FUF+R8LFZ/jVU
+bcgqdtaylN9ylCSQ69QZ5l22EYjq1qFHi/UCq5kCgYEAiGUNKMCsYycBX/kkjkMg
+pO/hl/pDEHSL2Dz5nFjF22u/zPRT2XjKYoBAPHGFJT4cYLmrJaV7W/mLUBSKwkI5
+74cEdgJwUmGanM+gu3+tTmKeUTqK/5U8j+bKLH5/IOvESdNeLydSTp0v4Sxxfwwz
+FfhUTXW/SmdW3z1KRSWrO70=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/client-all-the-oids.pem.digest.sha1 b/jstests/libs/client-all-the-oids.pem.digest.sha1
index f26c9661cee11..47fd432582441 100644
--- a/jstests/libs/client-all-the-oids.pem.digest.sha1
+++ b/jstests/libs/client-all-the-oids.pem.digest.sha1
@@ -1 +1 @@
-71FD2BBCC95D54BCD6BCA7AEB1E82A0605944A64
\ No newline at end of file
+F01FF9A9A4126740BF256680D0E2AD86AE4543CF
\ No newline at end of file
diff --git a/jstests/libs/client-all-the-oids.pem.digest.sha256 b/jstests/libs/client-all-the-oids.pem.digest.sha256
index daf94063c5e22..2a9facbe494f5 100644
--- a/jstests/libs/client-all-the-oids.pem.digest.sha256
+++ b/jstests/libs/client-all-the-oids.pem.digest.sha256
@@ -1 +1 @@
-D4A432248459C54976437EAD1F2C970FE9448A687492BBE9989C13FAF3510065
\ No newline at end of file
+F43F108A87484B8D256BF53482926E1B338E23CC14E7ACC3B5E11978321F4859
\ No newline at end of file
diff --git a/jstests/libs/client-custom-oids.pem b/jstests/libs/client-custom-oids.pem
index bd32651276be4..c417c95c16bb6 100644
--- a/jstests/libs/client-custom-oids.pem
+++ b/jstests/libs/client-custom-oids.pem
@@ -1,53 +1,53 @@
-# Autogenerated file, do not edit.
-# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml client-custom-oids.pem
+# Hand-generated file, do not regenerate. See jstests/ssl/x509/certs.yml for details.
#
-# Client certificate using non-standard OIDs.
+# Client certificate using non-standard OIDs. DO NOT regenerate without consulting the Server Security team first.
-----BEGIN CERTIFICATE-----
-MIIDjDCCAnSgAwIBAgIEcLf2wDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDjjCCAnagAwIBAgIEUS1DzjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjCBmzELMAkG
-A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD
-aXR5MRAwDgYDVQQKDAdNb25nb0RCMRMwEQYDVQQLDApLZXJuZWxVc2VyMQ8wDQYD
-VQQDDAZjbGllbnQxEzARBgMqAzgMClJhbmRvVmFsdWUxFDASBgMqAy0MC1ZhbHVl
-LFJhbmRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp+L/ayb/DnKq
-X5wjq2N9gbw+XhJAOKw2LJiCkHhcYbC+4ARO0DqKU+ptBLncKZ0BpBYVCSFIm//u
-gapPHEtXNLztBj81UV2aHOwF/XgotCIGwkJxVPKALO+87xYQ0zPMKBqP890XPidC
-d0KY/ItV36JOAzKa8ZmNZR/ChZvDMClT4iHwpEQ6FCMGaXJTqBA+vNiIn+tIc7Y1
-ZHgA3iTww+ruKC0u2pQdla+O/ImL/EWxCDtYcwKC4V64MWJ3RliUPoP88EB9i34g
-e1EH1r37QB9GRP4iX27TVZ07+cplHyNoOiVvZ+tVEzfQPDCZfxHYSsCOIEXxQqCJ
-Bq2txdnl/wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQA0+Wuquxc1ajfPWwTquuL7
-J7pHP25542R7IdBKHTluWbVFVuxaGJ6IiE3uwfYNhWOkixRglkVZKfWXQT5e5hco
-DY2QE+pagJ50eTlG5g6BwH3O96q/HgeHegWwgB34IXh4n4m/B9+w+GDvtW1cdRzN
-rVphbwko68EwMZ1gSdRxei9zbCafKLbaUC5/obGZDkGipyMjD4abHBXfKH3VMM1o
-Kf6EdfXHJlrK8NhMrbgbLhu195bWwLKqOztsbvUsmP1u4lqeEX78miIVK+SgifWX
-TVT5DDOOSH9Xr68v1GRRRml15E4252qhQtDdowxD4vihKN1DsgzESvXDEpQwUYtt
+IFRlc3QgQ0EwIBcNMjMwNTA5MTgxMjMxWhgPNTE5MjAzMzEwMzU5MTBaMIGbMQsw
+CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr
+IENpdHkxEDAOBgNVBAoMB01vbmdvREIxEzARBgNVBAsMCktlcm5lbFVzZXIxDzAN
+BgNVBAMMBmNsaWVudDETMBEGAyoDOAwKUmFuZG9WYWx1ZTEUMBIGAyoDLQwLVmFs
+dWUsUmFuZG8wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKIw6RslLI
+yML6K+UkSzJJvcWe2DkJsLrjxTehzJuP1fQL2uxh3NqBfN6/9ToBMZBRiQpxaPH8
+jbYcyddCP8qM6jXIpsxVMwc4Fa/+SVS3StnVD+Kne4i8EBhy11xnlUIfU8Vxvu61
+oiZnA/xLML2dLxi2gKNvVk+HIaqMqhdND6llEr0uJP8pQwToiSO0qHCI6XCQeeuI
+6BAiTS/VDQBnjtRjDjn0PGlCz80EgINK+rsN+y7E9Dy5/P0a6Mqqg51ylh1shVpI
+BShW1/ZItH6SNUaEDWIO6Zpf0fs5HyraGce3/wWpqpMbE1jj70aeFDAXDntkMQlZ
+ruzstg7UqIwHAgMBAAEwDQYJKoZIhvcNAQELBQADggEBALAh4waS16TzAR+faIqw
+1UqGPNGM2Zxvf+fbiYkNq39HipTvZOus5fNlKrakzbiK10uB6KxeCNzpg1TbLC7j
+kD1D8UOcVc5E3Di39A76pZ8CssVsl6+BB4BZlN9gj/R7mw7oHYICsWii9lfm1KEy
+XQp+Wz5C2lZiSm/7J3aJpfP/JVcif6YSx3Yn0XkDfR9Co3+YwdV7p6YEdqsrICO5
+JMZPgYHXVJhPvzx1WC1UCx5MY60vbldPNu75N4En/XnnbJLc2RjQYA6xuCjYVCQe
+UtMRO5Pz9UxM0n9/oFa8SIxg7tVwp77SlK0j1kqpP2WIOcSqMAcSYJuNBD8bu7AP
+TxE=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCn4v9rJv8Ocqpf
-nCOrY32BvD5eEkA4rDYsmIKQeFxhsL7gBE7QOopT6m0EudwpnQGkFhUJIUib/+6B
-qk8cS1c0vO0GPzVRXZoc7AX9eCi0IgbCQnFU8oAs77zvFhDTM8woGo/z3Rc+J0J3
-Qpj8i1Xfok4DMprxmY1lH8KFm8MwKVPiIfCkRDoUIwZpclOoED682Iif60hztjVk
-eADeJPDD6u4oLS7alB2Vr478iYv8RbEIO1hzAoLhXrgxYndGWJQ+g/zwQH2LfiB7
-UQfWvftAH0ZE/iJfbtNVnTv5ymUfI2g6JW9n61UTN9A8MJl/EdhKwI4gRfFCoIkG
-ra3F2eX/AgMBAAECggEATKgVP/PaWdp5eJZuov8We3pcb8+di0L2qX4pf5W1eNWf
-EeJlqiZQquhLRvEmWcnayfVbPYP5B2HgoUwGO0EbXHU3NLb/vVsj1zjds1J+I2G1
-/FUiMktXwyzj87b7j7QC7/zQ5eR5jGOYsaOy/v0QBMCzJjqhDNez/Ax4YVEx2ncG
-qFXQiVBwVtbmNFa6y4G/3SYbLjTmnkRUvjgUrWO+3Y8cHGbzugGMKhmfQKElMnRS
-Tflrg4WhtHHaI2gaIpmVHuIw5MRoIs9rMUV9IVfLrcmG/KjxVha+LlRAJFi0tMCC
-kKSb9YLS4E/uFAAWaW5qmDfGEU3gPT1oYwsAob/wAQKBgQDcwrxj3caDgnFWPh2z
-vy9e08Pvh71V7kJDVDoqrZCRlduLg/9qOP0sPHSRzWARr8ND8eqZtBv0lDvhAtfW
-w/flt0Hon0M1QTIMpAr9WMWrCz1g7RPvpgNwOVPmOi89Jgh4W6zICyRHm75NOSbT
-zO9HhEzEiKWu9MUorbTly4DyAQKBgQDCr5iHwKGPxRCbBx4nXhyPTCowI5L1MpS8
-n8mZx2Iz+6vb9JqWL7qMGDJ05py3E7m4HK7E3O+dGc9SVDLW5WDsdm2BqINZRSI+
-VhZRVmdZOiRYM4ogHexKqxDZzGWpRy9WhOywT1bpB56euw4QNsyJQivwqY7LKFZz
-rZGlESrX/wKBgQC1ErmphE/+sHC8F4kPVULKmhH9l64SHLKXPS/Ur7aD45JHlkZa
-31GyBghrSzSAbVVRls0hs5y2eRvATS+yxdzAvBbAclmTBd3Cho9JDJZd3INEzFDs
-4ZOyaiNKFPGIS3jpF/DZyGHCpplqB/W0BkVBuN+q2JZlsflBi3F0hv5GAQKBgFWR
-omQAacJsJjlNbpZ1Ce591okkCaKV4cPqI0zg0qvwcQATSaK7Ra3vsLWq6rSZCeb/
-TF2gkfyl7HbOJS5I0Un9X3I+o8Sn2z40zkhfxrr0ECdoN8yX+lRzTcu8x+4YKJlJ
-gVKY/KUdwT0ZEDxWKZ4mpHN5wxZOG3lqj3pSylb3AoGAVMf7yegFepBN0+4njyq/
-2XETNhk2MN/oplWy3XIQh8tL+KaFeFZCEmjPS1impLzg725CPwuL7/T+WOvx9zvx
-aJUY2no2nqsEF5p/ov9r2/Yy9uJGHS/b7dgPwHcwf4/uPcrq5qlStRQNGI/yqR88
-kebD0hJpUTbpKfsb0j/WI0w=
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDKIw6RslLIyML6
+K+UkSzJJvcWe2DkJsLrjxTehzJuP1fQL2uxh3NqBfN6/9ToBMZBRiQpxaPH8jbYc
+yddCP8qM6jXIpsxVMwc4Fa/+SVS3StnVD+Kne4i8EBhy11xnlUIfU8Vxvu61oiZn
+A/xLML2dLxi2gKNvVk+HIaqMqhdND6llEr0uJP8pQwToiSO0qHCI6XCQeeuI6BAi
+TS/VDQBnjtRjDjn0PGlCz80EgINK+rsN+y7E9Dy5/P0a6Mqqg51ylh1shVpIBShW
+1/ZItH6SNUaEDWIO6Zpf0fs5HyraGce3/wWpqpMbE1jj70aeFDAXDntkMQlZruzs
+tg7UqIwHAgMBAAECggEBAKVtaWf9u9iMzV02fVJlpCNLhydHp+5xT5JG/g+RxyJB
+JHhrU5eHyt+8VQReank9mfHQqNZF3/0j8Q3AdkXGtTr8FsY/E+0KVPcmcKE4XHeh
+b4L6fDE7XZ0jww6BiCNRuQqwYV+EthG6QZl/XoQewJ+aQqxhvN/KkE10VQLH1Uf/
+e3kBs5JnRzIHBpvw2eWqz0AEaG/dGmlk0adS50OCd/9Sd5fj3GxtBNtIwcxBXCul
+CJOlM8AiZ7cCAnDhtmlD13w5rCrv3m1dGQ9uddTMrEqzwLzvI7pHjpihuvpxe4AV
+c6gGtJi9rrIYGq89UFkViZMzrXF3ZOB/Q+QVMr3fJcECgYEA74vZ8B4MuYNZxwIb
+sQE1Pc28fMR7+ahxean1SoMgUnPVvPUVM4Rb5zmb+HM3qwuD8pjePeCFjI8JRMuY
+BLwbS768AhN2PEDmHeZqGW2+ejy5miKWJQkw0P2kwNTFiXWGQMJudwCVuLLwPRZz
+/+BF9+Y5LB+61s01x2yt57eC1ucCgYEA2AVnnKPn54SrvRYi4o3HMGhT04Z4ed3o
+mHIH4NwtAyuUSYyW4sznzLycp5YwwNr7VUIAAVHDFJDgRJClzExvN6MJMz5Pm74p
+1JyLJNJIAdOIeiuUQ72hCDIATnO8b8eSx3CyZk/pMAkX4jDdcdSHxYe4Q0lYY28w
+r4igqlZyneECgYEA5dS5mm9KaCeFWLJGHVL6YTzm7pKaHFQn48JUjVQ4C8QFu5Et
+8Uq53qTgZ8UsERkwVO+ks5uaptyJ2Q654TkVu1vaCOfhVjgyUVfETneD/MYMkb1D
+b29c/feOPlEm4hb3Y2TpFZZjEF3mr8W+MC9fElU6X4JmAjfLtHYqeJsSltsCgYBu
+1x8Z3XQRdB6Wr/QIYQzyhqV0ZIwOo9FD30axlue1t7enoW6OQusxPxn2V3b+jZ3m
+Wi4cfzJkrA2WwM2BrTpnxszisTcxx3o5MHWo2AjAfySI4zF5LKSiyt0jY/ktNa7X
+jLjNDHWvAwtxMPd+/7kGnqPqSokCxDur5aPioua+oQKBgHZpMxgTfBig2ZLQlb3F
+5d9WDstdnzntJ+xh0Ibvpk/fvu2xdAtqxwdxBf/pfS62urJt9QrmkXrf6lS0OWHC
+g3vzYidCoffJfPSXq+QE6E4cypyXVCZ3n8ZDVue50cZ6aqwmQ3VZqgG0Tj4z9lrb
+bwPuHxWcD061D1qE/+PNx00Y
-----END PRIVATE KEY-----
diff --git a/jstests/libs/client-custom-oids.pem.digest.sha1 b/jstests/libs/client-custom-oids.pem.digest.sha1
index fb28c8fb8239a..d57daf29cb2a7 100644
--- a/jstests/libs/client-custom-oids.pem.digest.sha1
+++ b/jstests/libs/client-custom-oids.pem.digest.sha1
@@ -1 +1 @@
-FF12C0191181574BBD7C455E746D32A427D746C0
\ No newline at end of file
+C13A7E8D8AE02F2D797C2764C4046D30CB7417E4
\ No newline at end of file
diff --git a/jstests/libs/client-custom-oids.pem.digest.sha256 b/jstests/libs/client-custom-oids.pem.digest.sha256
index 6dc0e47eb01a5..f18e2d865c2c8 100644
--- a/jstests/libs/client-custom-oids.pem.digest.sha256
+++ b/jstests/libs/client-custom-oids.pem.digest.sha256
@@ -1 +1 @@
-95A5EF3F2960CACE68851F62DD1B991758A5858288A688278B72DFD7C1DD0DC9
\ No newline at end of file
+722F166C90CF8633B2896D63272E4C908F6BAF2A1A57844D3B949805E63C12FA
\ No newline at end of file
diff --git a/jstests/libs/client-multivalue-rdn.pem b/jstests/libs/client-multivalue-rdn.pem
index a97355e002f6d..cb28fc2f5e155 100644
--- a/jstests/libs/client-multivalue-rdn.pem
+++ b/jstests/libs/client-multivalue-rdn.pem
@@ -4,49 +4,50 @@
# Client certificate containing multivalue RDNs
-----BEGIN CERTIFICATE-----
-MIIDUzCCAjsCBES9zCwwDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCVVMxETAP
+MIIDUzCCAjsCBGucNjowDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCVVMxETAP
BgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQK
DAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0
-IENBMB4XDTIyMDIwMzIxNTk0M1oXDTI0MDUwNzIxNTk0M1owaDEyMA0GA1UEAwwG
+IENBMB4XDTIzMDYxNjE0MjgzN1oXDTI1MDkxNzE0MjgzN1owaDEyMA0GA1UEAwwG
Y2xpZW50MA4GA1UECgwHTW9uZ29EQjARBgNVBAsMCktlcm5lbFVzZXIxMjAJBgNV
BAYTAlVTMA8GA1UECAwITmV3IFlvcmswFAYDVQQHDA1OZXcgWW9yayBDaXR5MIIB
-IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6Osw6+wVACn7qj/48i7+V+zm
-h0c5IxH4ba8a640fHOnScUlpWz4UyNiBIyF36ITTH9KJoXdcQWH51S5iHUrgZUbq
-GaSUuS+LO5PRDu+55W67L6+5Y1Qq3swCLeYi5rxOoMiHALY8ert6agAeoTSPv/AD
-3Us9P9n/H83qneFBHdb8yjMeZj8hiCIt8mC62w79pJCLQyDHiKKeSFtEqBCAhD3h
-yGkb45pjIAKUevbEDZvCIhpzT5FuwUgbhWVBuoEoX5apwA+49u0Ots+jbxcJSTkn
-y1gFJmu9PF6lvqwNq30jATvarFxHYB2BZjLRbSKH4TE0ITBiIbVL9H7tKqhP5QID
-AQABMA0GCSqGSIb3DQEBCwUAA4IBAQCPxLvlCIZLq7uW8ok74AC2MG+2TmmIyZTQ
-m/FRA7xRfDtueMcN6Zq49DrbuTtOoLC6tNt4X3o+wU+RTGoiRwR4LBltOF6dUz24
-4KbludShxMo2AsWELfRJCKGnZOi3WP8sA+nenSNPoWRzZkw4Tn4QWSFyzGOGIS77
-eTCkJX4BnQwMoknINUkxWiE4/AITe06hafA+YBW1keJUd7ouOjdCP89EVriR+p28
-OyoQyqvFwW6+gL6/iL2V+o9seP6b4vn7Rn25sDRoHeJVzUP/SyZVka5wTh80Pk3x
-OOZYWg8+enNza9SVAK2ReNNjaSlt+nByoZNjLOwnJQ6O7sDviINI
+IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4RDvOBSVYF9NjSpy1uOXXrUs
+ggmy1MLmdNZRXl7zP/npT4Fa1PP1gfA/O2PJWeat3btD5MS9n6VnX7Z8qGuqENW3
+doMLuSWqb6FbHQk7o4qWmK6BfHjbHSrpshosO3Fz4oLw+wRQyJZqFueNLRTU9cMS
+bOi45bzr3L1+0fhUI8Kifb1bM8TTeljgRXk7sukRTQsVHIheUxdfOP4d4kxKzTeU
+nssb74RzO3ME5Jmk6+hLw0aaMrK+3Vv4wXg++6Xs9XLfOSlarFq57SkMvpyDFfEE
+g8haNM8Mw5Nr8QZI4YDTfIXek7Hpvdym95mLh8Bj/8LgyotXaHxQTbLTuyIvOQID
+AQABMA0GCSqGSIb3DQEBCwUAA4IBAQBNQ+33kFzFgHT87EHh9mynDI8Cmrki37vj
+3WdrI+SnvnaVCk5AUX9TCHCGxCYUoXeLKqVTgZVYizWV9TrBO4qRUEO9ckBFgk3K
+o7jIP435bsRMP5UiWMELjYaL01vhfIq4srVicNE9AiLJXzKKQQTpP8zD+KGxcOAY
+3IHDZzd6muYoQ+bok7qQlc5VVu3rTSJyDDEOj96iuTTbuhNKPKjWi4BHSnW24t6W
+dsA+S82CMzLYgylSz67Ik1J+rysAU0InHTudjuU5j8xqBLTLF14CnKyPivhDv1+i
+OS3QkrUVQ2cVyiwdFmf1Zhw9KNk0BdGzdstElo9zZXARP2yu/2X1
-----END CERTIFICATE-----
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEA6Osw6+wVACn7qj/48i7+V+zmh0c5IxH4ba8a640fHOnScUlp
-Wz4UyNiBIyF36ITTH9KJoXdcQWH51S5iHUrgZUbqGaSUuS+LO5PRDu+55W67L6+5
-Y1Qq3swCLeYi5rxOoMiHALY8ert6agAeoTSPv/AD3Us9P9n/H83qneFBHdb8yjMe
-Zj8hiCIt8mC62w79pJCLQyDHiKKeSFtEqBCAhD3hyGkb45pjIAKUevbEDZvCIhpz
-T5FuwUgbhWVBuoEoX5apwA+49u0Ots+jbxcJSTkny1gFJmu9PF6lvqwNq30jATva
-rFxHYB2BZjLRbSKH4TE0ITBiIbVL9H7tKqhP5QIDAQABAoIBAHUsA/NbU4E+nY0b
-G5hyDZ+b3KjHKrY6zxgWk9tVpgY8lpJJFQhCpeAYEnbi2liNoUwL9RLWYgG+URlu
-eQs1ZMduMkxuICc9BLUW+n+iF4mU9/PYWdHfQKXOSXZfpMUgjAmUd13jT2+Kqt1y
-a/Y64+nxy2/i7tAVUaPlShbcf46LZ8Pjz4z25W+iSkPN0ACwbLDNZu/BPDzXGVgd
-yVBSZTDN2w4dOeeQm7HAGIdcuaRuAhPU3eonsWVHfobuD9p3XK6uUWoVvLoeei6r
-OH7bVPFSmcSOZlxLL1yXfAffPGswRlQWKqL5mHmBgkjm6FEyOzp08StqreG6s3+E
-P05pZw0CgYEA/jjO4GGgHph3lwGqmIkTAH7cFv4p7wDJjN8KQuH1riUW9YpEjHVK
-7QldA3paPXjsKCsJrTwB9nYnqOUqzauXvMW5ib8eLhn5oaj6qWeNu7HlxAlwiH9E
-AyEUrHho3jNASeRLe2/Ui9KIDSwqBk7EiyE0Bo4Xfr/FSr732hBj9BMCgYEA6ow9
-Lnl6nUo4751G/zoWqIIpFaU9UeEkLRloOnLkE1DFaaVICa7BPOe5quEPj+2yhlws
-HaYATAS1P1nlt43dodClGcnlrd5E8nAV4cd2Q+NBEldwVeyVnD6gybQ/7/0S3gCJ
-+pRPbRKelFDSn75FyeZ2a+51uQGqBKeuU9FTeycCgYEAzRP+rzuR85yDPKn3eL17
-OrcbzOCT613zOVTj7jhh/G2nK8Syr6wfGUCobBnTmitmNrEhSUJTQwLl03WrJeb6
-rSnEFr66Fe6WVjb+npIO4A8OjyoeQb6Imw2go9Eup7Eod3NXAOihyXm4jwfFjvkT
-zDiow3D0uybwfO+4+YynVUkCgYEAjYIW15dLNuK0/zwwvYPab8g36WtMV74yCVOu
-4rS2jkDJGjgyAkWBKLHV/xbSQM/0ScQKicjBnRuqDpK6Wcgp05sCQVDiVcgoaOzf
-Bt1EqSjO9bXzfKPEkAPpFki92pkhbPd+R8R7Nx9otasdqGsvx+RXxj6UoA40+aIB
-N2ivqTkCgYAzBFlob7cN6Dn0fSYkOGx5kVDE/A2X/qHMs9b6Z0qCc2hWrcEuVQra
-3SMcrF/STCrdlkO84GuolzZ0j6h8it0hRUJXIw4PblMisozaD9lmfwFdzvbyU8+q
-4kjthONkJjFsTyz3WOcRTXTAbj87U57WS3E3AApRw2lGtzdrkDPERQ==
------END RSA PRIVATE KEY-----
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDhEO84FJVgX02N
+KnLW45detSyCCbLUwuZ01lFeXvM/+elPgVrU8/WB8D87Y8lZ5q3du0PkxL2fpWdf
+tnyoa6oQ1bd2gwu5JapvoVsdCTujipaYroF8eNsdKumyGiw7cXPigvD7BFDIlmoW
+540tFNT1wxJs6LjlvOvcvX7R+FQjwqJ9vVszxNN6WOBFeTuy6RFNCxUciF5TF184
+/h3iTErNN5SeyxvvhHM7cwTkmaTr6EvDRpoysr7dW/jBeD77pez1ct85KVqsWrnt
+KQy+nIMV8QSDyFo0zwzDk2vxBkjhgNN8hd6Tsem93Kb3mYuHwGP/wuDKi1dofFBN
+stO7Ii85AgMBAAECggEAS9edjTvedHnZE9N0nHZYSXlq9K0Y/rpo14FrLQ55ERuB
+pCGWfsFw/2b6n+RJ80oZV+llgR9NORGPasD/c+/IAOFL8BtL5YnMS87HedrlJoCq
+q+vORagiSktuMe7A7g0WvINcenIjWNzbBjnkKIdnFiQ0vs+TkxNdNecDZ/UzQVMR
+EQCVrLFxwGeeQs8mgPErl4BEexNH77Pi07i511/swFP06kmOzxSI7uknY83vaNsr
+s4XrScpMxszOCcJf2857nUPYZSldnUJbvTjzDxKvn1sHpcvnlSdP5AsYa8i9UmM4
+d0FzUmywOCRmwhFzhPdNETDQVHNJj3nhD7rpw70bowKBgQDvcTkhniVYyQYLqFEi
+LRRVDAUy+hQF2uToYpId4+wSD1tdXZr25uzBi1wAb3wkSIxig5MItYAQnMQxS8H4
+Z9hmtsTNe5RLUtkt+t5rKDa8MpEt/0u7sKfzteTM0SPlAz8fQV1xIfrnYL0R3jy+
+thidEUfn9f1EaNNvKh8TMbsm8wKBgQDwoTcCd8RNJ+dfSzEXYgz++AZmPilnebBR
+BJjsZX/lF4y3V3ZMIB083DCnszb6l63nxMTBYlU2RRJ7C7AxxJVIHZF+WahW/Fq1
+TiGpkXZfyLkT5H1kNuH7IvcDVmAu/BfzsN5Ej6yC2wDpXEqWmV4TZbE1wQKApgy2
+t6VUyHO0IwKBgC7NSMeDa/Vgxae9rK4rdY/yG4oNS6ChWqD5s2nYRSp6ifdD9qhQ
+FvL11HPZVsCY1afj4/eN0oxsuASStEVjtu7MxscLYr8eYIkWQidb4ucCU1JUVm9H
+ZmzCnwhR0NxQuCPZ2PPmGm7jf9FgSStV2JXK7O0wLeMTQlC2QQC1dcl1AoGBAJOc
+og0+gUo/f1zi2HOar5Q9fFd+LJIQgUvCATmLLkDQCH90BNrAHI94F9TYSJLDN2hl
+ObnT46gOCT84NVbiXB1IHjefMnhiCBcOnfHSjQZHMNn2IcG3NTuFAHiopQpNlTfl
+uQPgCxie1llRR2RJIv/NMz9hbnKS6luwHpj6+pd3AoGANwlsfpj8zn4mwauqsSK9
+13cauws+HcI5g/vUda0Y/LreJsN5ELrWL4UQCoMthQSI05hYpBAxkLcvZsMJ1hs5
+Y8e0QukC7n6to4KNMYY2yiRjBitdoHjFKurWgrrUFbtrq6IFiZ18pPVX/1JH+oBa
+1IsF59mIpZ+JJCi9W/8cGJY=
+-----END PRIVATE KEY-----
diff --git a/jstests/libs/client-multivalue-rdn.pem.digest.sha1 b/jstests/libs/client-multivalue-rdn.pem.digest.sha1
index 6e49497ab0277..5a051aa7d53e6 100644
--- a/jstests/libs/client-multivalue-rdn.pem.digest.sha1
+++ b/jstests/libs/client-multivalue-rdn.pem.digest.sha1
@@ -1 +1 @@
-C544D25C0899ADC557BE6D274D47F49BD1686886
\ No newline at end of file
+2B638F696EF373BCFF1E9F911661F251DD460511
\ No newline at end of file
diff --git a/jstests/libs/client-multivalue-rdn.pem.digest.sha256 b/jstests/libs/client-multivalue-rdn.pem.digest.sha256
index 30404cb1f297b..2ff001b6501c4 100644
--- a/jstests/libs/client-multivalue-rdn.pem.digest.sha256
+++ b/jstests/libs/client-multivalue-rdn.pem.digest.sha256
@@ -1 +1 @@
-85548F4F4C885AEDC9B4C12C10EA3C80F1F2B3F77BD8DED940868C6B3F5573F9
\ No newline at end of file
+BC73453E12FADF69B3D40D3B19804A534945247D54F9B38C5BA8653D6549A633
\ No newline at end of file
diff --git a/jstests/libs/client-self-signed.pem b/jstests/libs/client-self-signed.pem
index 6ce670e4d2e26..2f64f2d98cc39 100644
--- a/jstests/libs/client-self-signed.pem
+++ b/jstests/libs/client-self-signed.pem
@@ -3,54 +3,54 @@
#
# A basic self-signed certificate.
-----BEGIN CERTIFICATE-----
-MIID8DCCAtigAwIBAgIEJzH3EjANBgkqhkiG9w0BAQsFADBwMQswCQYDVQQGEwJV
+MIID8DCCAtigAwIBAgIEV2NvBzANBgkqhkiG9w0BAQsFADBwMQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxEzARBgNVBAsMCktlcm5lbFVzZXIxDzANBgNVBAMMBmNs
-aWVudDAeFw0yMjAxMjcyMTU5NDZaFw0yNDA0MzAyMTU5NDZaMHAxCzAJBgNVBAYT
+aWVudDAeFw0yMzA2MDkxNDI4NDRaFw0yNTA5MTAxNDI4NDRaMHAxCzAJBgNVBAYT
AlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEQ
MA4GA1UECgwHTW9uZ29EQjETMBEGA1UECwwKS2VybmVsVXNlcjEPMA0GA1UEAwwG
-Y2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1n/0VkN2yib+
-jGaSkipaEn2jJ4KhmPXixLVzLj+YxXUN18mZNcA1IN1Lguci/sm2UQnj3Ulhfeff
-IGgU7C2aKXb51OVd+CHSfYwL9Y+SYtojmDFQg9GyZHWOC9hoNhbonziMGRReqOEg
-sI6onb88QwVo0nI3ADeIaiZXPVoxRBdmNg75sKjR3F5sFplfcx/ANGYKm/bjLZgj
-181m8hdOZLdXlE168o/yUlTh4yUc7TodTf6ERjjiQk7yn52XOQQW7aI4qUGpiv03
-Qqa8P7PkMzWx1iT0Hg1TfyexltEKcwfFP6OBPr227Jhl9ZUaAplN35I1mEudRInx
-U9qlWKxQaQIDAQABo4GRMIGOMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMBMGA1Ud
-JQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBRDkjlMAfHAtoiaTGvdpBNKnP9csjAS
+Y2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx+wr8anR1E4r
+QM5uHmtO4aEBJYc1hX2H/jtN7DNKxi6CmTNTN8IkT/awyTXxkEIimD58eLM5Fa21
+/t0esd4x+/h+AdZysnboDyiKGWWftrNEaBgVbwIqN2GI79yerRzdEfqKpxI5LiDM
+67Cx4cOqvU36bY/C/ruCzGeskiBGRpVrWfAaMobuV1oO9oOxdE6OE8MvPWq9KN39
+fVA9xRCuKnpTPaC+jFejEJNaI44jgYlmUihReZJYNmnp+AiJOM0FOF2+wVUOwfF6
+JaFr4R5md3OZNS2MRrepGBRBOUQ9F+RGrCwoTONRUWgXEj8JcAI6QSpSsyv4PeW8
+xLokkHZNxQIDAQABo4GRMIGOMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMBMGA1Ud
+JQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBTCeps5ZImYVRzQlmQH5xBTaENN+zAS
BgNVHSMECzAJoQSkAjAAggEAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVy
-YXRlZCBDZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsFAAOCAQEAQHaO5NAKaGkUgQYn
-wuQVfh5qGf41MRosL3gRZPDHaVjrAV4Rp5jytJTiB14H3lLS1pvEeN/06cSoXgxw
-/P9JjrkYXnNt5h+vZBOomfhZAsGSVQT3eRNcgpNqbVZLaN3T9+wl9Uel6Njg+iwu
-SW04b2L4x84+tJEX02+0u3yXq1vo42dQX+YY/VuEufmLZTLe+/AzB3gFKPzCs6ua
-Lj9u2UVH7nLzn1IcqbUrZyD4MjoHj4brWkR5mWoIpjXSihS/g8EK2ttIJ9gHKi5J
-7pA6ebfUjE8HLwKjY1DTrjt5oye6yqG+zzuNLj7qXLpM2iImG05K6WjcwTUy8Zv/
-7TyUtA==
+YXRlZCBDZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsFAAOCAQEAPXZo/1KXHHmRGVyb
+KN4zLu6RRE9MrdK2K2EPEObYr32/3WTM9l8y/ioP3bhovmpK74/7Sa3WCAnfDKQ4
+est9SzEGbyWogYgtU90g7o4v/TA/knvzIXPw1iGeOYjn+v+W5pLwjgF1ay448xB+
+14sDSqS0LuHg0ZgJJ5dxSgm5LMe9HUAZ6nckI4epMV0+cSORSMpcvRvQp5Ql2Qbr
+tQKOVd/t4WXdN+s//tKlcS7I91e8+rTNEavOqp1Gju5RwQEPRhU1+mgavprbgaBM
+OUa0qbUoHPi/cm99O8FuhjHu7cHkp4CuxCClIHsJitotd4Nvf6D7xCwq5toIGeW+
+p5ob+A==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDWf/RWQ3bKJv6M
-ZpKSKloSfaMngqGY9eLEtXMuP5jFdQ3XyZk1wDUg3UuC5yL+ybZRCePdSWF9598g
-aBTsLZopdvnU5V34IdJ9jAv1j5Ji2iOYMVCD0bJkdY4L2Gg2FuifOIwZFF6o4SCw
-jqidvzxDBWjScjcAN4hqJlc9WjFEF2Y2DvmwqNHcXmwWmV9zH8A0Zgqb9uMtmCPX
-zWbyF05kt1eUTXryj/JSVOHjJRztOh1N/oRGOOJCTvKfnZc5BBbtojipQamK/TdC
-prw/s+QzNbHWJPQeDVN/J7GW0QpzB8U/o4E+vbbsmGX1lRoCmU3fkjWYS51EifFT
-2qVYrFBpAgMBAAECggEAV9fI4Xb5c+oqPqdXCSLtBjuLkIev1CuGddZ1WBBVaS/5
-vlBiPR/84fuei/pKW5uR3Xg2aA5ALcbCmsvlHZ/DTY3a3HQeWprHUAEFeMgWRANf
-plkzxvgenYOC65jxfI0/MM2Amli8N18S+xtBNHD3pd7WQbDik2UqqYYQHiEMofxP
-PvE51/kuPY3F1nzQot87N+EINSRrkvzT2S+4qC8+QJ8Kocn8i+ETuESoWiyXAAFT
-4e5AsF7QiXcTaM9T/SjysRrzLkyXuGimNxhlygc1goaRVSwSdgwM81OckMYmBjcd
-FhZGCDOn5LKSl5DAi//zwaKqZ54v7dgPbHx8YakkDQKBgQDsyy7BGwZgCNrsEaUH
-JLVF8bG1jQPmXfmBGJrERfkxUwMcPzhBqXsKaJDIBv2yh/82XW4fytjfajxhT4ej
-XZshOXmOKWd63sh4MFYWmbVpoWW5pll4PGcE0kpyGugzGM6Y15pZkf+x+GZvECKQ
-KPn23TJBS2Kaf9wjUMgvYPbgvwKBgQDn5dvkhWP3YoWrzEV1PTgzNVHZDt3rhgvm
-MUVojlb/A9xu+/NZkIcOJi2qZ+aodLK7y5Ugjae//x3hnguWFyFY4hIE67HzB4Dx
-PukZiRBxpOq+uHdUmfHmGkX9XrchFJn+lQs5QkwSvjQge3HBWLxAwp3M9ic2+x+8
-5Rzs935w1wKBgCdchwMWiPLBxhJjxHjxgyiDSrURIcrTaDwraN6jew7V67hwUduo
-XomWZCq77sQUkznoQfwK7g4FTNAoNjXTw4u8UBZvj4H/Mne4ITdUibFrYMuBeXHh
-KIbGphVdn6eOwhjqDgBBoq1kyzI/Dl/ET/jXQBlWfKeOBXvhW8V0atVlAoGAZnyr
-5h8dwyODuB2bROk9gUQ7XBa59XFUqPN1nXPq9uGZ9mLbdeXuCk7NN9abli+dHmon
-CjAQx0XUyvWyYS7vyfx/wjT4fFQApJ4NHv/4iIE+TfPwqS8wPWW3MPc/MBuOw3jT
-cQbf4Bi3qPNlnvG8oVJhs0fGpQHvUOhhFEl9VBkCgYEA2Ush5K/bG4b2DDnzDFxT
-whAzUYrJO7/6u1N9cTvXmwqVkBFtnjA//sPJesPiYiz0nYFHtxco/H26hdY6vP8A
-xrlZiUZu5rxF1BgMdjl13NQUWdyTB3VHxKsNFytUijRbpZsj1vBZgh2IiQNVuzf3
-4m6vQeGHRETrgkJuX2sCWSU=
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDH7CvxqdHUTitA
+zm4ea07hoQElhzWFfYf+O03sM0rGLoKZM1M3wiRP9rDJNfGQQiKYPnx4szkVrbX+
+3R6x3jH7+H4B1nKydugPKIoZZZ+2s0RoGBVvAio3YYjv3J6tHN0R+oqnEjkuIMzr
+sLHhw6q9Tfptj8L+u4LMZ6ySIEZGlWtZ8Boyhu5XWg72g7F0To4Twy89ar0o3f19
+UD3FEK4qelM9oL6MV6MQk1ojjiOBiWZSKFF5klg2aen4CIk4zQU4Xb7BVQ7B8Xol
+oWvhHmZ3c5k1LYxGt6kYFEE5RD0X5EasLChM41FRaBcSPwlwAjpBKlKzK/g95bzE
+uiSQdk3FAgMBAAECggEBAJSDELHY4RLBbsgZkRvvww1BOUTTiCK9+cQV8fDAuY83
+BbUgt3T4N1lffDw8YlIzjtdhs71GBeXgwQ2u3RpsYN043wImd9WzFvgLuWrcXySt
+Tue03Fc2WH4DamIklYsrod21LEKn+uoVW6TKAZogbDlsL9grr0LjsGp/qWZzq0DK
+dCg9FmtasYsEpTaIDbgtJZO4NjpjoayqGbBACKdAXTMNTckttdFvq2mi7MFuY7oa
+RP/DdOaD70YGm2/89wbrV3B6Lnhv3lrj8P6CJiXmRvqK12DYtdCWgdS6RWoV00b4
+qoHUIg/BawoWMUo4b01x8P6xqWvKT4OJO93akFsOtIECgYEA/j/YZVF3dtpSzZCp
+8EO6/g/ksy/99L61+sxylW0q8BAKBar1R1laMZlJOtdoQlczsTlAFtGzVaGmShHj
+sqh7N3646vIYl1qzSJuMmBuiGljvb5zMbVEcGFARO0aUDgAYP2I+ofIkoSPRkGt6
+E1x7K7hz8SeYB0msKkC4welDKqkCgYEAyUyRE9fyb4dYJkTSOSoKejLeqeRbiPRp
+TrcKaY/SCB+kUJFTh4aXPIne9uk7PlzY1euJBwzJNbl6m/kdNUdXbZ+jRsPzK7BX
++ze9QslafeFiDpGVxmwRNv4sPOLKcqCvGXhAQCUXRFzH6omxHliC4RkdfLaHqUhl
+vvxIqBj5t70CgYBWlQtuRXX3ZZX5JyCYD5ioWGU7mEZViHSUefczZ7/NjMi88WEh
+8Q3EAj1r6ls47FVQLvziSHtX1/q3EqyF9NYxKdhzOgqh0GNpgH86dX6YllzDl5QO
+TibRKMMVeo2Ezwdy3lQR9lH/BiGhmtgxq7bORrxFDYS6Rp54rR29+1/CMQKBgFXp
+8uV4PkWxi0LSDrNNKSmceoIiL5sVTbjF0JDbTDYhYxzr2a23GOdCpMHXK2zjbbxn
+iZVTtLDUV+sn+Hpb14m3H5W9XhTgb7yNvp45mACv6Az1v+nvB63j73eRB/zCbdk+
+BJYb/oEz5DNKzyh3eGygLoCi2uW6O4q23D+6YSI1AoGBANGK6K4YhIf+uZQwtAJ7
+/vwgTiaBeidR/cjzI04JZiRAb+CW730WwX2WwkbwSwmbsDePN6nrppGr5Djek3L3
+mFoP7EdBgubg8LLmw7UPI76M3yn177OgkRBYSPzWITJdwXkIOy/LNgMUISY895PQ
+mciJVC+m5TcFzm+WIycVahx1
-----END PRIVATE KEY-----
diff --git a/jstests/libs/client-self-signed.pem.digest.sha1 b/jstests/libs/client-self-signed.pem.digest.sha1
index e4911a6c71052..e47e271418edf 100644
--- a/jstests/libs/client-self-signed.pem.digest.sha1
+++ b/jstests/libs/client-self-signed.pem.digest.sha1
@@ -1 +1 @@
-F65F9DD67A60BFB5AA7CE23A0E39869EBD23694D
\ No newline at end of file
+784240744D02ECF6DE86268DC036B34F1D5FE454
\ No newline at end of file
diff --git a/jstests/libs/client-self-signed.pem.digest.sha256 b/jstests/libs/client-self-signed.pem.digest.sha256
index d979ba81508d7..68c51501b6e3a 100644
--- a/jstests/libs/client-self-signed.pem.digest.sha256
+++ b/jstests/libs/client-self-signed.pem.digest.sha256
@@ -1 +1 @@
-902810474F735BF234C986943328A7248C3EB2E83EF7D5528F4A051306129CF1
\ No newline at end of file
+7070C6F79F11C2EB31E67AD0F3B11DA5257019CC6C60BA816416B478A8A10B9D
\ No newline at end of file
diff --git a/jstests/libs/client.pem b/jstests/libs/client.pem
index 251cef82ff5a2..462eaa2ae72a0 100644
--- a/jstests/libs/client.pem
+++ b/jstests/libs/client.pem
@@ -3,52 +3,52 @@
#
# General purpose client certificate.
-----BEGIN CERTIFICATE-----
-MIIDsDCCApigAwIBAgIEfxgIOTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDsDCCApigAwIBAgIEA6zcQDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjBwMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM3WhcNMjUwOTEwMTQyODM3WjBwMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxEzARBgNVBAsMCktlcm5lbFVzZXIxDzANBgNV
-BAMMBmNsaWVudDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMvcl8IW
-V298VV6n1Vwxx+vrW5UBgEsTucc2zT9so4QCxAWvBWDJEBgRoGXjEx0AFuwgaaqG
-ahfBFcTbYTfyg/qwUbdQk3cBbuJu+kzvCzxUVz0GkIJ+oy0PO1z3lY9C3XRq5D7c
-7C+7g18J91j5xBf0wKf6sjv0Cp+KG9UjTwHc3xu12tJPRU12wo4CqF5E4nyqTvz6
-Fs3L9Jq6n5nwScAjETE7jwV4W6WTdeUlrrxgHHR0yciyfELGYWSnLvvD/uLIE2PE
-I2fwlwpKeZx+dFghx/xTvw+d6pdTD7dks/oSzAiaT53SVJkuvJpyQ45Racs1yxm2
-BKFBNPN2GQVSzHkCAwEAAaNOMEwwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYD
-VR0lBAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFFB/REXzQ1TD3BT1LfdGSv1d+67w
-MA0GCSqGSIb3DQEBCwUAA4IBAQBTP+81kpm+2/Po0QEn6ELI7fOFbCDktptVf9fY
-gQbZgClAaNOzC6is8EIq7VPUwPimTTMAgzMs8AKpnnn1XlAxHnh4usXqAQWz2s3U
-+WnIRplFj8bbev0GeDQWK3eQqqvlFVIkFSItgPmTeukmPZe1548s6Dh83mNpy6J+
-1YP3yBjBQSnQTVy/9HdojaaLHRIHf0I+EDDTfJ/zbHZOGlwvthcxlb3Kz/1Ykhyo
-0FOAIaTC7V8+3fKV5lut4NV2JQbZ+r4ZcVZb0cJKE9SAnnhnWxowDlzOVaGya8AI
-dMGWAhUI9p4fAOr1HKR+06unGzN9v+OF2EsFtU/iWYr6sMcI
+BAMMBmNsaWVudDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOol5/l7
+63ZuThH0wFA4DwtvKjkHtoXU/YeUcJ8uzwRba6/KJxyK3gsfvSSkJ4QjoOLSd86q
+KGpDzsmIHU7gWTrBpjdK1mbDamiNDPKVJLFpiRYrK4ypdrcKYrW0IZYCKkI94i6m
+U5I3Evu73jTJ1Z5cusgb2PI8TLA+L5QHB2gWAEdE1qF4TYTt62dtHsJ7irIerxUO
++ZCgCMWRYvNt4T52B3DevltCGM2WkCl/guxL2BTCgDB4+VG88Jp7E1Gr+uSkhezH
+dSWemuiwB0QjAuhtbhdyW5AnoDD5auaXPVuTOQyPOCtxHL3EkNA7O9Uexg8Uuy1Y
+f11OEVR3i79wRSsCAwEAAaNOMEwwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYD
+VR0lBAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFP48R6f3r85r+VmIL0FKOorNgEXV
+MA0GCSqGSIb3DQEBCwUAA4IBAQCRvacDUQ6iweRiIDsaSkU2DlSPceILQ7B+5vn9
+lN2k2G61u08XbZIgETbwpsE5CvP79RjU147ooUYnHT7ifxrQT2ebk6xtffBKVKgw
+gapXxgVEk9RaLiYHgymwG5hel44euj2hhgMYki4HWOeDS+W8SH20B4BCIpAf2a1Z
+jPHzZsxtrwR4+/nqvvQnfKC5lAdsgmnpR4tOOnCAbyzA9eR4nSaxxzHTOq+vhxNj
+HUZ871yL1BMMB9P+bDuqE3Hg6Vo+oohdFCr6vbzdGPMm95HfVpxD0c6UFZo1jpPR
+Vtr1odloEyoKBfC2Eg/ecijeqcqcLJSXqfaPL2jmkpx5gaGM
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDL3JfCFldvfFVe
-p9VcMcfr61uVAYBLE7nHNs0/bKOEAsQFrwVgyRAYEaBl4xMdABbsIGmqhmoXwRXE
-22E38oP6sFG3UJN3AW7ibvpM7ws8VFc9BpCCfqMtDztc95WPQt10auQ+3Owvu4Nf
-CfdY+cQX9MCn+rI79AqfihvVI08B3N8btdrST0VNdsKOAqheROJ8qk78+hbNy/Sa
-up+Z8EnAIxExO48FeFulk3XlJa68YBx0dMnIsnxCxmFkpy77w/7iyBNjxCNn8JcK
-SnmcfnRYIcf8U78PneqXUw+3ZLP6EswImk+d0lSZLryackOOUWnLNcsZtgShQTTz
-dhkFUsx5AgMBAAECggEAPJH+RRx+PhGjC8yyCAKCdAYp38viYmwp9pbBxOZybvaj
-Z0zpPCiBL6WNEri1JRixttaqjpABVa142lSUPhtAO2vH27+FEQbL+1sd413i6Lnm
-catRpHQb53dvG+Az/6zOP5jC2CqrwkLkdYhwhW8wZC3EUCSccFPCFETkoB8tik+d
-L7Ri+mnKuYeAo0gNzOkXs9jl4ztCeFovobulIf8y1kwHdcPme5TYK7LdS/32A+et
-ZqglgzLGNglucAXbdknuUKXMA/khHpM3xQeI6oC4H57i6Iq9tIZm3fTwtCrJ/GRG
-ocFkjeEc3KRVASfgkmihEYtxLegm0U7B0IIpNPlYAQKBgQD+dEYb7gdVTsHPPPxW
-w1PgqnkwmGlGLItLrXXazHWlISAUJw0yeeLAXJ3INL5RIIis/EfifxFXohC+UUvK
-LHBmOZo7CSVfobzJL838pm9UajONd771jMCib2tIBMtOvir3vfCraoISTCHgsxI9
-52t5Cuxv3QuDS3LJHzV6HXluyQKBgQDNGaM/ORiKWcKzOjE3dMbgNGtCNCtZhb2B
-OUStsd2n6h9qZmB5lLDnNyUZnYjJEyRoVt3caRLSDa1f2T42ywxvtpZvQlyll6hB
-plPqvSMS4g8CrTXV7oLj+GuwcGWsPTDIOOMx/PfE9bznMNRTIxiXAcCXRm4SWsgR
-zrshBUPYMQKBgQDfxB7Xdvap9Y2IksgSAMnle+UBcDa8CHYjAhLva/hVVBEix8ja
-R8e5hHkY1BE9xM+M2Hra2wXEO/hfdZyh2Xyq2SMhoYRlWhVsE4chFLg40wAs+05K
-IBJThoogll64C5I7taRNCmgCcUMlmDSFjdq8YnfUU95JAvOZnMFpdzwPyQKBgFAj
-T1TZs0wDjH7JIwffGgHqmWoxxiphhk0imkIf+FKuuP2y6Mk4nvKI6ncaxaKDVztp
-jaCccZ0fePm1gYiZR8+ykQ8B5/9PxY95NNrIchbjuye5lAp1+jCnFTTIhgGJmzDw
-gV070Xfk6J2Qx7WARhBiv+SbwVBspjXHB/j7/KlRAoGAPANseZ33ViApVcNM/Y/K
-RzUeTP+sqLtZ/CUrSLFxmE+++BMU1fZ10Xn+al/4Unc7dMZxh5+mgKEmnBjYo1ey
-NfvoxL+aNtccVkueNu1A/UsmGZwMW2ThQfUvwbrJHIT0XJcjdlSnvnqzNwLiejo3
-/G+YoOXeyZHGkIQfo560RK4=
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDqJef5e+t2bk4R
+9MBQOA8Lbyo5B7aF1P2HlHCfLs8EW2uvyiccit4LH70kpCeEI6Di0nfOqihqQ87J
+iB1O4Fk6waY3StZmw2pojQzylSSxaYkWKyuMqXa3CmK1tCGWAipCPeIuplOSNxL7
+u940ydWeXLrIG9jyPEywPi+UBwdoFgBHRNaheE2E7etnbR7Ce4qyHq8VDvmQoAjF
+kWLzbeE+dgdw3r5bQhjNlpApf4LsS9gUwoAwePlRvPCaexNRq/rkpIXsx3Ulnpro
+sAdEIwLobW4XcluQJ6Aw+Wrmlz1bkzkMjzgrcRy9xJDQOzvVHsYPFLstWH9dThFU
+d4u/cEUrAgMBAAECggEBALyy79BP3eMD4kb2SEZd50H/xLdRT/drPycUqe6fepa4
+VoDFGeAWyfuNCJGO+Ym0bORfWc5js05wdyZTW8tFYqPHjHzjAwauVgMMKXMWXwvb
+UW9cOyyRJes9o29sS+ToucXIuY9+27rqR5I4RUulnVKSyLScPKp7jhI7C3zwbTej
+eLI2FkiOS1iR3b1ItnE/e3pDovk7WEMDe0AZQ93soIDGq3WzLsRnpExK9MMzfvu/
+7IG4mw7ziWHe/mUx6POb0xoqkRmZ2+LFyZOqnlQsyUhxRbO+3LuM/H17c1av1RNy
+3tYdewBRn5l8vsh6nEm8UimDJQQIZEGuRBsbFPGJlQECgYEA+4MrYKuohpH3TfKj
+Z7CwPUDsyh83FbptuLkKk5ztlSytsGRh5liicYXmxr68oBvTJiEa5mlMxjt16OFA
+E88sbHBYFPueiEUNc+R/g+BRfMCbFS+9agfe/5RgQNWEokWNcmdHVYQShNtujit2
+rBcff49bSwzGsRuy0O1hiGpXz9MCgYEA7lNsA0RZkY9Se+FUcCNyhWM9YrAN78mn
+WPM5AXq3NjZaO+xPYGNM3O1Xhvaz/EFQtzekq1iDLDx4WiQsbV/jBgLF/Q8Kuxrg
+0PcQFjPCfcErOy2jBs8Ks6OKHX4sX6pDkvPIEubmsqnRbvV0RZTGcxAtsiq90LBq
+mSeTGsUCtkkCgYBFY8X28MUZA3JoZfXhKKUm8R+jEAOhkgKtgRfC6/u8OUxeKwO7
+il6e1WN7F6pwvdx+W4nRYeHVmxgHvQVxsam+7SvP9i+hxvNUMwlfN+cjdPwUV0x5
+0VwbxTLdEEt8fZXtp0LN/Bcj4mpY/PLLvcFp7wIv7YFv4YVvEN7kxPofEwKBgBTQ
+vHJcmeYYun+PYqyYq/vyev4PmmgDGNawB56VdUMK5D6vmQ82HRR+tlJXYcj6e25F
+MeC1Tl+iuHBHEIpNAasYuuorUiidZF1b9s+5nZcWNAxrI/4IP0sJUZrZ1k5UtKo1
+GJhuCmA4bM5gKdOZ9us90n/pM8LyZZ0S08pWwrHhAoGAG/FNpaeMZ9g63+39DrcY
+OZKJfOMSREoG0Vux2nr8NwNB3Y9VRoHYJi2jOw010EPjvCiJN9ivnAHX2dhsnJkD
+BhEEVfjwtEsMFrOTfVWUoh3fvS2imxJxmG1gwLmQAdjsgBFw1u91BWjtR77sb8Kv
+OcI/pg4/nHDpMMulGK4WVJU=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/client.pem.digest.sha1 b/jstests/libs/client.pem.digest.sha1
index bdc762f36c78e..22f90499415f4 100644
--- a/jstests/libs/client.pem.digest.sha1
+++ b/jstests/libs/client.pem.digest.sha1
@@ -1 +1 @@
-5894EAB5181D5411D4916F70B2CDFEEBA7445EE5
\ No newline at end of file
+27AFCC0D1138C5A765A324057914ACF2435EE7EC
\ No newline at end of file
diff --git a/jstests/libs/client.pem.digest.sha256 b/jstests/libs/client.pem.digest.sha256
index 9de62d73900d3..d206d28deaaeb 100644
--- a/jstests/libs/client.pem.digest.sha256
+++ b/jstests/libs/client.pem.digest.sha256
@@ -1 +1 @@
-67A67B32FC253E12412FFC7D6A3C410F3BECDC10A45ACBC8BF422F790D4E818F
\ No newline at end of file
+6E5D1B2373C5DF89F78D1A1F2DD88A5C10218AA92885E4181CF44C90B08C42E8
\ No newline at end of file
diff --git a/jstests/libs/client_email.pem b/jstests/libs/client_email.pem
index 069a8a3b75767..7fd39cdfccc0e 100644
--- a/jstests/libs/client_email.pem
+++ b/jstests/libs/client_email.pem
@@ -3,53 +3,53 @@
#
# Client certificate containing an email address. Includes authorizations for queryable backup.
-----BEGIN CERTIFICATE-----
-MIIDxzCCAq+gAwIBAgIEVcV3GzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDxzCCAq+gAwIBAgIEVWSODTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjCBlDELMAkG
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM3WhcNMjUwOTEwMTQyODM3WjCBlDELMAkG
A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD
aXR5MRAwDgYDVQQKDAdNb25nb0RCMRMwEQYDVQQLDApLZXJuZWxVc2VyMQ8wDQYD
VQQDDAZjbGllbnQxIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVAbW9uZ29kYi5jb20w
-ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCh1x2IV4tVqxh2X/2FDKHE
-ub6bTLnqj0kSVDty/c0egluLcwGxoN8x5Yb+aoRscwh+hHsn00rIPLMEqu/MbKAO
-smKIdDnQXrQ5gbfvqcJbQyIYoFn8RafQXvDdzhTtEt9Pq4sgVGpEFytrQdjzV8eL
-nKAc7B4PubAC++byH67T+BR5iRXcjw1aDIDpIS82/8r3R/lEVdQSvQi5vUy7xhH+
-ANpYJvreNdzG9MKHAYH3pxak//eZyDbUL1xheXaSyNnCbCnnxBnHqdiuSHzmXXgn
-mjC++8gMlu2Bor+7FxGRBjqihACdcbnx7PF1fwiyBNaUJ3JWkve94kozn6TQV0d5
+ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCreZvMNPhjJuSExgmpRghh
+OlcfXYcUr96FqV1tL9PSXKF9MASRWN2YlGuPwHt/y1IlFauQb9SwHA2XDOODm9dc
+1neOxfL6bcAUzcIW1iVJcKV1HdIFZu3QDAtRu5/deV1kp5ZSrsTgdX4gmuXTv7xb
+/H8JcjLQ6MAMDeoVmoAa1y3g2mX1q8IeCJkZ270QreZDG+Z1u4Vz8bPFhFi2+0NX
+0oR2uYKBIFDU2QSbMS2hp7kkbg+XW4uFI/IKV7Bg7/tcMJI/+3ru4zUJQoDVF4YC
+zEbIITNGc5K1arEOXDkbl+8BTZLpKusjVtdNYWQNo+Zi83B1hUbWpNBD6UOCjbZz
AgMBAAGjQDA+MDwGCysGAQQBgo4pAgEBBC0xKzAPDAZiYWNrdXAMBWFkbWluMBgM
-D3JlYWRBbnlEYXRhYmFzZQwFYWRtaW4wDQYJKoZIhvcNAQELBQADggEBABEAJ6ns
-/6f3uILOMtPkrBxR84TFrei1NTFiPhEg1b2fo5JFaOh08WasrA+fmiCoIqbXakS7
-T1one0Ww0IGwUv7A6twKQ6NJW7Edi+T8qisubNeG1YXNCCyETrI3B712P/gkayjf
-/gu6pyTJqkFxQ0g85mg8leWN9TRKaIXYK1n3cnnY1L4Cndyp0JSFNg1gLTvj6yUb
-xYJ/msQH50C3xgLgURGYBlY40JO6Q5N0gKXfmSBWewq3JhQ9BIyMy0HdAcs6mqwP
-+WGKoUrYVo1h5GTB354AMHDkasu+6Rs92ovz3tjtiPsD2L+fFgfmThGSKEJrkZOW
-fsLvGYWqU0PuSlE=
+D3JlYWRBbnlEYXRhYmFzZQwFYWRtaW4wDQYJKoZIhvcNAQELBQADggEBADJLBoYC
+mgu4D8P6sHPcyWeb2eIin6U+jalP5Ad8jGM5E6uh+LAL4qjtGiC8i4VqSQNHeJlW
+nVS0ys2fM/bHc5Lz4SN2hMwP8He9ReXywudVw9ILUsukMYr0jRW1wsEXOg/pi1WI
++qVHLhOf72eaurH4e1wnPUiXqUCLcn/uCnQiiEzD+BnuJOI9INNXjdMp9EIkq3hh
+DpGh8PXM8mqFB0scgMFBZ/K4DCWPlfTU3t8jUa8uie9/Ggwg6ljiphO0mNbzJCX4
+drbd9KM7EpoGq3X+3650K4TCz7X6jZtQb+GRIowFqN60HuZaLiUHBR8/mO3yy33V
+1+xwd43H5eJMrpc=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCh1x2IV4tVqxh2
-X/2FDKHEub6bTLnqj0kSVDty/c0egluLcwGxoN8x5Yb+aoRscwh+hHsn00rIPLME
-qu/MbKAOsmKIdDnQXrQ5gbfvqcJbQyIYoFn8RafQXvDdzhTtEt9Pq4sgVGpEFytr
-QdjzV8eLnKAc7B4PubAC++byH67T+BR5iRXcjw1aDIDpIS82/8r3R/lEVdQSvQi5
-vUy7xhH+ANpYJvreNdzG9MKHAYH3pxak//eZyDbUL1xheXaSyNnCbCnnxBnHqdiu
-SHzmXXgnmjC++8gMlu2Bor+7FxGRBjqihACdcbnx7PF1fwiyBNaUJ3JWkve94koz
-n6TQV0d5AgMBAAECggEAQQw6JYOyUBN9uI5qUmC6YFybvMXA8AL5RrGuV1Clcf7J
-Fqp54tAbBW4QrQ9Y/FHb9yX+bgphw0uKVHTz/wEl7+JI8jlsx1BZNcfqixx7Lr0z
-5hwFLv08LucJ/syG4qa0NMxpFex37bg2Tlzf+yar4HRqclWA26cxlHF06JpNZYpr
-3iDbnqCMzMIPs9boKnM+O3nRo1PCl7KrXOgeCTMg5tdCsMR9U2mNOZQAGRPrOwiZ
-/cqh3rC0+Tkwpp6BWUM3Ftxsa3q3AX6WI5pQ6l7+2q1rysZpKmNCshO503mp4Lc+
-+jpaLcgzlgAsVqA5qJ8LBbnajDVeNDkCwmmrR4C4gQKBgQDQLCar3tQnmjZ1UeNk
-STIhGYW/BG5mYxKj5shZqi4zFjH+KiymC2JfyOSHqZYqzYcbssGReWh0CJInskK4
-y1yiImeDWc05pHGsIJAfijeUfPSCwr3ulbr+1pE/+isyhX+fYg/rz3X7lm0HeLxT
-1fGOrjWIvuOJif18T6MeIJ24EQKBgQDHBeV7k8Z34Gegc/JnnrBzxlYOsuHIXejQ
-/0dztkFiFDYOw94RiEP50+bK56Soh6ZF2qaCQklkSqBEtl//Wm6OWpoweTG3rTgU
-leEjp0Y7HlhzStCzDth2QBmxXZT2m/G/jmJoowDuBo+HVbrNa5gXF9yCXqWowIfk
-zZmhuN7A6QKBgBzyVrpFdOjA19u+dEkoqHDT1LY4DoXsNtZVq/xT3rK53l/CS40X
-PimljKmUmk3/YE8oryPkZvLjkjc04XRCyvG8qPopzZC7XhcaPBA2rv3V1kYsgC6h
-4Wu4OGBWEBWpXJK0FxSqN3SxeR212zIpKLq3XLhUGt2wM3BKvprc3DGRAoGAdsEU
-8kz18teq3bnxnVS6Ewr4lKK2SHmIjxSTzP6mOuC2dM74tdtqPCrtnorj3E+8rhfO
-nRDye+5vfTCZTWPnbfev41adjOzF9rqL8VtBc8simgC9UOp9zOloq0Wcuh3I/TT3
-kVoaFu1BPU6xPRuDT9xEDmJtVKk1LRhlIHOQLukCgYEAtzYL+ZFrFrsbvgDUj17W
-DcZXJA5f6XpPb4biMVAS7SuE3H9w9BaTLIIo7p10hOIKuiB7sG3yZ0oJKy+i9QWV
-AvHGeFy4jUGIho4/TEhBclmgPQ0+vCEA+dy6c0oGz5rfQuBdZnQ1qyU28XhLR6/+
-mHIvte5PXur+ki4euVWLino=
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCreZvMNPhjJuSE
+xgmpRghhOlcfXYcUr96FqV1tL9PSXKF9MASRWN2YlGuPwHt/y1IlFauQb9SwHA2X
+DOODm9dc1neOxfL6bcAUzcIW1iVJcKV1HdIFZu3QDAtRu5/deV1kp5ZSrsTgdX4g
+muXTv7xb/H8JcjLQ6MAMDeoVmoAa1y3g2mX1q8IeCJkZ270QreZDG+Z1u4Vz8bPF
+hFi2+0NX0oR2uYKBIFDU2QSbMS2hp7kkbg+XW4uFI/IKV7Bg7/tcMJI/+3ru4zUJ
+QoDVF4YCzEbIITNGc5K1arEOXDkbl+8BTZLpKusjVtdNYWQNo+Zi83B1hUbWpNBD
+6UOCjbZzAgMBAAECggEADVVfb/mOdudHTau4hJkVfbnznFTBdDJYszwAL8VQS0M4
+q2tYb5ThZkPkOUFWWRGOSaPtu0V23FrSE7TE3C76CBYFJezuf9qY3QVC2lO8goFn
+3Dt9YT4nr82/85MLU0VkpbcCUVuVo35/WDmzNUvrcZuopStkGCqBBG7wjM1+uJdy
+M18aB3jTCyWSO35AutrsHiJFjD8iY61G0UcXFF/Ue3CWuEdO6yUvoQrzUianTSfU
+gOUCXAeRa1WAQyEbal03xGxXAIu1ZAnMN1IjYQnLGrW9kR+CyBmfNGva6EnfAhAV
+uI+gNXng63f/B1pa2eCTHUjobK6CzTfHrGjLA813QQKBgQDbfvuKugdsprM3IQfs
+qreeP7RuOdvYS/FHO7o2GxdUsfgMoxlozcme3ZHO5iLGHQajLMxszPhD1udLk/E8
+f2Se8BeIcuOuM9dlm5oDI3o4pDqm8+5yNBzyozNKywl5FM8jD5tUKMdqy3NnFc+a
+8M/DxNjImn0Z/w7ll+GltSQPXQKBgQDH/iNOpTGrq/MNhd0/QzdwJgwOnOT4KtTl
+LQ89w1RqqmvXbnMhNoxogpouN5G/VtutcOyf7Npdo3C26Hdnpm+GIr+l43ZYmVQi
+9siBjZZDGJk/CDAlj+gkY6UlwN9x3Qi1ARnkeSZ+3nk3LqiVDpczM84CzWveSZdH
+S/TBvbcQDwKBgC2Q1d09sy7eCFRIeiGqawXiCa3cOwRS95qnDogO5bACZhERhsPX
+/KQLMSq3Yb6o5ejX8vQfNOa4ZTmuU6UQeS35f5km6JcQUgXY4IcIRWYeycJA3147
+7Up9kLdVIPlO1ZVctw0ojgVgiOt8fqWWmjr00WALmi7cYRZOilxhF0RNAoGBAMA+
+pYX1+fdCBRPcuD8Vx/bAPBrX6qo70gay99GxMDaQt7WQ8I7etr3HvZnrxOie//CL
+eoqIzafxcmoAsLcsIExanstCCgNE8MHjY/5VMjoxLS6QGmghG0/PkXLnImN7y+Di
+vBcJ9l1CUlcfPOJ78hBAHipHeQdmykq40wBh89U1AoGAdG0vbfHADzyJwcqQ9EM5
+M1dZAbGTRPkD9Wu5yWRx8JQobLhtmG6ow7QI1XGgpyBLkzvKga0b0JloPCX9fdj5
+gxsBE3is2LRbGqMKYeR3L/9PNR3OHJLTumyFuOIjiz4fVbEoDU7VtLLwI8aXNj4S
+7uohF8n4KqyJFbZWsap+nK8=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/client_email.pem.digest.sha1 b/jstests/libs/client_email.pem.digest.sha1
index da13d5bcbf589..cb8117946575c 100644
--- a/jstests/libs/client_email.pem.digest.sha1
+++ b/jstests/libs/client_email.pem.digest.sha1
@@ -1 +1 @@
-DE8636084C85D07B6DE32227BF5797F2B078AB10
\ No newline at end of file
+A6976ABCFB735B643160A63B49A08DEF5C45253D
\ No newline at end of file
diff --git a/jstests/libs/client_email.pem.digest.sha256 b/jstests/libs/client_email.pem.digest.sha256
index 33a6d18c1029a..529d387907c03 100644
--- a/jstests/libs/client_email.pem.digest.sha256
+++ b/jstests/libs/client_email.pem.digest.sha256
@@ -1 +1 @@
-449F68F4701C634CF90C2E1ACABB550499B31471EEF89F5439D67FA5F4F2BC80
\ No newline at end of file
+35C5F350766D572190D381F9F6F130E0A5615A141C20AEE6CFB4596FBF2C689D
\ No newline at end of file
diff --git a/jstests/libs/client_escape.pem b/jstests/libs/client_escape.pem
index b262f09b075ac..ae61235397a68 100644
--- a/jstests/libs/client_escape.pem
+++ b/jstests/libs/client_escape.pem
@@ -3,51 +3,51 @@
#
# Client certificate with reserved characters in subject name. Includes authorizations for queryable backup.
-----BEGIN CERTIFICATE-----
-MIIDhzCCAm+gAwIBAgIEDUtBFTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDhzCCAm+gAwIBAgIEIO+6PzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjBVMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM3WhcNMjUwOTEwMTQyODM3WjBVMQswCQYD
VQQGEwIsKzEMMAoGA1UECAwDIlw8MQswCQYDVQQHDAIgPjELMAkGA1UECgwCOyAx
DzANBgNVBAsMBkVzY2FwZTENMAsGA1UEAwwEVGVzdDCCASIwDQYJKoZIhvcNAQEB
-BQADggEPADCCAQoCggEBALG3OMek73tAvuG8yZEtzu+rg47Y1Y0NQazAnJ1eQdsy
-txLh1+CPgg2PrbDVjGdtcLrBbaOh8ETu9D8QMZFE4ypUSBuwwvCK0avcyVIjGs9P
-A3BRp/aAVTEHyKw+rhhBUsEShUTNMU9xT6X3JzJXf0GhvDXuIRYkDC45/XK9Avvr
-5tUj/9wEBy9dv7dAnbcja2YFhIATJPZuC7hyixs73DzAsth+lHOhSVl1VXfDV4tX
-DZxgMfQhbX7Lgrv845WinSFaKCyeqBm3bOjVNpEc0lg5J3PbttnlYCR8exchvZzG
-waZZoYCaWmcwqWJ9/kBp0liNEzf07Rwcj1vPoOA1FAkCAwEAAaNAMD4wPAYLKwYB
+BQADggEPADCCAQoCggEBAJx4W+j5ckLQkyec4zTBprwDv+6Ba+DSjEmdmKEIA0dz
+Ze7SvkzjXj1PTJnh65M2gF5OTGPhHWE+40GYPJEktehwVnrBxDRRP87S2Sr8ZsbV
+ib0cYjJpW3iF4uWntER3ez4iO5zHpLyPg2ZykmEuE2QMExtUO5cqdEXGuHfkCkgG
+3239bQ4B9K4kQ9ly7n3tIDObBiDFMu/PEgZrmHeyKdHfDrL3HDBEUeF6SL6nO+ws
+MkDVLymVACZeCF3ms6dGGOuq3H/940Ick5SqkWFUm9Ggwvk3M0oiOO5kMvQzJuZa
+Lxq8Gn+XTAfwsYVOhk8cQ/YJgBYpP2qgHdi37lTxXakCAwEAAaNAMD4wPAYLKwYB
BAGCjikCAQEELTErMA8MBmJhY2t1cAwFYWRtaW4wGAwPcmVhZEFueURhdGFiYXNl
-DAVhZG1pbjANBgkqhkiG9w0BAQsFAAOCAQEAJ8tGkfZwIR2yp+nmLXJHp/lJorC3
-DH61UhWtnmq7IB7hEx4ufUNB9KOshEO9nsbKBeuZXNbRQ8DTlvrboyv11xZmbFre
-GMquaomxNZ4w3xax9Kkuv7vlFeBQ0WAfup+p09DY/kUxKy65w3eYhe99AzbCQBpd
-nnlk2AkYZz5uwfQQffDkEknG6p1aifVzqRiY5GxaTeXmXRe37VuNfO39vAOGmKXk
-+MVb4uQXwGq6ht2oY7sLS5yplVEmISbHAa1ljnX+X0A2MxhhIIzrJv/TG17sbczc
-wwGQsPSDnqpxg1Kljtq/VFSAubEykZXHmwbsq385oYrXSA+zP4+9QconNg==
+DAVhZG1pbjANBgkqhkiG9w0BAQsFAAOCAQEABv+zGbzRqf1UEeDluN6KUEAl/DwO
+WxnwSlu7M0NVDJtgStElg7tI4QLiLaWCbHDfs3i+vjDy64a7AulJ3hj8KrCL8f9E
+0V7b2+9azc9/fhRheNPEVUfpcFAtr5v9WmWHGaKSo0m9Y+S3Wy+Eyq1SJ44GXzFd
+aSJBqvRCuMACKfXq6ZrBRcvKzrsyQqYIfUjj3Vc3exZvhpIlhfs/IRQsegRB2zEh
+IucMsmSVDFxgL6UwxRCZ0Zcq7akVv3j2Unu08ZqA/8o4kICZM1jJZ8RLQBfFwRka
+9PdibRufrRdX1rY93p4FfV8G8Sm7U3wXJp+yMr8Lmm38gR8BSoYEfmRzBA==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCxtzjHpO97QL7h
-vMmRLc7vq4OO2NWNDUGswJydXkHbMrcS4dfgj4INj62w1YxnbXC6wW2jofBE7vQ/
-EDGRROMqVEgbsMLwitGr3MlSIxrPTwNwUaf2gFUxB8isPq4YQVLBEoVEzTFPcU+l
-9ycyV39Bobw17iEWJAwuOf1yvQL76+bVI//cBAcvXb+3QJ23I2tmBYSAEyT2bgu4
-cosbO9w8wLLYfpRzoUlZdVV3w1eLVw2cYDH0IW1+y4K7/OOVop0hWigsnqgZt2zo
-1TaRHNJYOSdz27bZ5WAkfHsXIb2cxsGmWaGAmlpnMKliff5AadJYjRM39O0cHI9b
-z6DgNRQJAgMBAAECggEABc71PiE36pFdK8ed1u7mWpMCg8K2iBusz9ajx8jy4hUw
-Qp+7DI3kNElD5gm0wxSioJf922FJlxcwSHdhB1x3CEUk7jLCzFXdMH7Q3axkDe6J
-MDya/sWHA0k719Vcn1JTQBg43tzOBciwT75aO5z8fcqpeiHEVxLqiwA+NmfTFyS6
-vHMm1CbFsYwdunyv6x+jSOaRd1oXKtsfH/KGELAO38v5mbzJSmLu9mwvgIaZ7WMB
-Dv/XQgZJn8yFjXrQhOWPVs7262Q8yx+0gY37fhJbPAYu6lCHi8h9XdoaNKnP/rEL
-BPeVB3CgDKFqSteZncbeWvzk9q8UY9JPD43WIt2mAQKBgQDeAz7/jDsJRaWgVqwA
-mE7YSN8VCXnCJWX8+T/LrRYhE/bYSexIkbWq7FCYpRncm/1fPUbocJsjNIx2mEZG
-4GLkye6d2uGz+vWM27mLNMSztB+c9yVlUKJpDfpPWnM9/GgEDKmj8eNbDVljMO/5
-EyRBauNbaobDG/ojYxMeI/dO6QKBgQDM6/ZMKsiIRcSjfPmsrg/OFvP02dliObEN
-qbpGk2vRpTiYnopJHO8LBb7rlmoVWV4rYhlBpFNrRAObUVLv7LG1bdo+Q/mlE/EC
-FPAKfN7GogSiF24l/twLmoQTgOwW3xxTOzL6kSV9aHxVOL0N2zoTscGgbZMV0Hvz
-wacI/FOoIQKBgQDKDLI86GaiHZyKu9Z2BCddd5RvFNyW8GpNPJnux2uoVv8EAOLl
-eJaZI9CF21waBm0lTCNIT5MhCyX6mML8piHajlx3lUhsAC2RMDdAlZme4oMS22pR
-Nn1YlrwDlBHli90uMkgBSJbdutxrBZKgX+dTEfAwtlZHIyMXKYewmpLsYQKBgQDM
-0Yv71UdZ7WH9hoyG6/MgKhDsSlHcu40b+ukUYYUCpgFLiirtqN3ERpntHwopDInz
-ErnrwjMeo3x3YXFkHVAB5yqb0ZVUSfo5+nNyCB0irA21dXXCxFlrv6UDWXif46CM
-ED+D8k9maWjcRTmw/82soZ7Gmr7Irvk1SfmKIan8AQKBgBB68OsyhPFY2rpI+f5L
-G3aF4S6EJpYtbJvq/UDLCLiXh8mgE1UMTephqD/ZxfM3fN64Ux2Ezh+hKSDUfZEw
-xZU8PnhzFwicaZnGOCt267qO9fcylHIqJ3WMCCQyno9pWcMZDvKrTnZslfGeRWB1
-wKYreYBUywU7jSZVpFm2AQDJ
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCceFvo+XJC0JMn
+nOM0waa8A7/ugWvg0oxJnZihCANHc2Xu0r5M4149T0yZ4euTNoBeTkxj4R1hPuNB
+mDyRJLXocFZ6wcQ0UT/O0tkq/GbG1Ym9HGIyaVt4heLlp7REd3s+Ijucx6S8j4Nm
+cpJhLhNkDBMbVDuXKnRFxrh35ApIBt9t/W0OAfSuJEPZcu597SAzmwYgxTLvzxIG
+a5h3sinR3w6y9xwwRFHheki+pzvsLDJA1S8plQAmXghd5rOnRhjrqtx//eNCHJOU
+qpFhVJvRoML5NzNKIjjuZDL0MybmWi8avBp/l0wH8LGFToZPHEP2CYAWKT9qoB3Y
+t+5U8V2pAgMBAAECggEAdArfPx0gnGTLfelgp9OmsHGnel8Jfqcn28DBXUvwAqGW
+y1QIRkWpspaJHObsKzWUZlxzUu+1NlXJfPBGmu466rPFryzdMl+/jsfYv4SJRJVB
+nbrbKL/yUqQY7nE9xDlIcYA57cAycJWOwf/eh6wAPYam9PG3cBjQOp9yV5gL9/XY
+AT9M+IqVztRgVHxcEYDuEnTJsQtfTH26RW2ylYbeoZsOZxrXVXff3cokm4sGF3aB
+Jr5RdBud03aVBz2ilWyTyDKp/VhQRsOmWK74icTXhIwv9WUx54c5O2VKywHxdouL
+KX6yaa3QrQNqlvjbN1qYs08ev6xIImGiZSbJLly0AQKBgQDLC62F8CMgH6HnFB3n
+cYR0BhlLRCjLDanCdRfKe/P0VvSS/t+f6g0La0wfhbx6K05N0o1r9tp8HCdE48oc
+lTQICoIzORORMJNdMWAgoqREnYPYOgHAUs7SncR1+IjkpfZ3UiJjWkHN7BOJ28L5
+X08gdZULRpKa83K1MQymPSF8YQKBgQDFRxMWlY+07QKA9iTB3yYplD3OISlO6xLB
+l+abomf0a0Dc5qC7agPXNnA35eObKhQOM9cBGtpGx0HHmMXif/IewmhL/KM/uRIm
+apgyYQiGg/aMkEilW4UzcUQ00UsBwlVBkHcEO4SBJhpBP2Kf4D5RICHGE8/9vRfv
+qqIq+H6mSQKBgQCOtvaQXelSeulhclJSiwd+RYshzBagIkpf082VFOqzoyrk5yBn
+Vis1C4XF4kpH1IiFSqj8adXHxkITucglrvmTbU92kXefZXUu27WlOqwbTluNb7gr
+ZgjZIOslwDr3+27xD1n8W0RFaNmS3FR+0u7a8cqA0mnZX6QQlxk8/1q6AQKBgA7Z
+Y7ludouyz15vqKKjLlcw3loWqupSzW2fBm+ukM6YCCDYhz60Iyfe5CGA/1ndl/bd
+thBSOh3bv7rLaBG9ebcRARK/KHaScqhLm7snDKI7aqJ39c/kjKkrnGuxWUj/nLU7
+r4m1BStHd/BzWfQYx/gJSCGFukEqK5QRrvU3ESlpAoGBAKI//xqHWGulMRHPfTY3
+blriUuVxqkL6+uICw+zFLMgtQb+0c6uBFBD5Dewy4Cz6li4mtQ2KlxBfnDI6icU3
+8Ewk2oT6oXUdZl/WXHUaqDMcW87DgknBUldMZBh+7WA4xrvRktYCq3EvsVpIvncs
+YGN+GU8JqkNNlw2fCZOvipiy
-----END PRIVATE KEY-----
diff --git a/jstests/libs/client_escape.pem.digest.sha1 b/jstests/libs/client_escape.pem.digest.sha1
index dbcee0140ddfc..161ce10160dcf 100644
--- a/jstests/libs/client_escape.pem.digest.sha1
+++ b/jstests/libs/client_escape.pem.digest.sha1
@@ -1 +1 @@
-F591F020AA95C50F153085D577CE8571F30D8FAC
\ No newline at end of file
+666CD448820851EB1B094B7F9D98B7861AC5F6B7
\ No newline at end of file
diff --git a/jstests/libs/client_escape.pem.digest.sha256 b/jstests/libs/client_escape.pem.digest.sha256
index 2bdb65f770ad1..3e8f06cb4835c 100644
--- a/jstests/libs/client_escape.pem.digest.sha256
+++ b/jstests/libs/client_escape.pem.digest.sha256
@@ -1 +1 @@
-EA837C5070EEAB2FF64789387D7FA660D5639AE6AF262F43CC873A76F189176B
\ No newline at end of file
+95F5C4C32BFDA406D41616E23A07144C6629603E49A5C3A208E1C1CF1C2D3704
\ No newline at end of file
diff --git a/jstests/libs/client_privatekey.pem b/jstests/libs/client_privatekey.pem
index b883de2843b57..6c65b783090ed 100644
--- a/jstests/libs/client_privatekey.pem
+++ b/jstests/libs/client_privatekey.pem
@@ -3,53 +3,53 @@
#
# General purpose client certificate with roles.
-----BEGIN CERTIFICATE-----
-MIID4DCCAsigAwIBAgIEL2OwKzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIID4DCCAsigAwIBAgIEE2FVsjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjCBrTELMAkG
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM3WhcNMjUwOTEwMTQyODM3WjCBrTELMAkG
A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD
aXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxDzANBgNVBAMM
BmNsaWVudDEiMCAGCSqGSIb3DQEJARYTZXhhbXBsZUBtb25nb2RiLmNvbTEbMBkG
A1UEDAwSQSBUZXN0IENlcnRpZmljYXRlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEAv6/hbwlJjz8L3KWV/UkhoUTPg37fRp78Wt8xQDNWTn5SfgJRYxwQ
-Qz3zXwZBgqon7tYM5H+CE9SXtmzeQjW8ulgXSjFZNo7Q4C4owps6v7RVcMXgV+7d
-vQvesZsD7m00z9oEpPTs9HbiVHuq7Acv3CjJefGvX74CaDM123soVhCZ3kGr+Nyf
-zW/rkVtcMBjcg/d6g5umy0CQd606a/0QTkKt3bj3KK8hfp8m5R4VTtzW+gXNe1Y7
-+Jo6Owtu8S1m7KK08yXXeYyRHwo7Hy7x1JGh4nNwjtn4AJvnYpijsEBz/k3Lr+4n
-GNSx8lMYAm7RSr3Jx96bXMeaZ6lyqJgiRQIDAQABo0AwPjA8BgsrBgEEAYKOKQIB
+MIIBCgKCAQEAumqDjL1EEZdppEL/9adf4Tspw8Evs35mxVKWdYRTUMc/Zr8lbY6i
+7bhHTyvVjfsCzNGW5AwqrDHVh9IRseSYvmkb2MIl/00Feix5yr8la9BxRsToMT1i
+XRIuixygG2IedqC/FIHYJhkdFU6qsoFDgD1aCy4bnyg7JD/AVCwSsC+SMWRCSs0B
+8OzSEzRjgaLXVezWXZ3w7Phwfv8TsG+tXX1Z17okUuRZSZ4hCwMSpoSn7SU1F/pu
+saQDOJsS9fiLSV9f4jJdZxJ+4b1Yn5DzOfKBeJVqUvm9RxpZK1fTtU/Dv7SLGcCo
+SdpBsUVBz8y7zjmUehtDwJN6yzkYicadGwIDAQABo0AwPjA8BgsrBgEEAYKOKQIB
AQQtMSswDwwGYmFja3VwDAVhZG1pbjAYDA9yZWFkQW55RGF0YWJhc2UMBWFkbWlu
-MA0GCSqGSIb3DQEBCwUAA4IBAQAEUbXUsfXgXsRtqLEz5h+JUEfn5i5Ac+PrK+zV
-lamEOKTjDnLggkXwK0KktRJAxBxJBI/Flf92LiHgHeGVCb7KNP/EnSkzh0zJkbeE
-W+j9uXhTZt0MjVAVEOkgblp90CJOqfMoXPZ7EU9roz+8VwdZvriYYoiElA/cb/SK
-v+ezjc3oc2kX0ODCOhIoFRsy7TXdq9qQGjUHl0qKv9/9PTiItKkrbS8r3jwlyHUU
-OmuDUu+lOW6nOLkBmolEOtPARp/FuKNssS54IyFscbndGDteqSRZ2LpUeO6bodms
-h8VJvNInhCQBgHB/0+IyRiOR2CNhgMAbg1vTyckhtEAvqwls
+MA0GCSqGSIb3DQEBCwUAA4IBAQA/jyavKT5je2l6rPnh/1nXh80dVrQsYuuy99HE
+QESXVFjEFw+B5sCcaT3zVrgkvgD1F+lOEDYR7MG/uK6xxBJ3F+jJRWA9Z29U8ifw
+Jrcje4d0tKoSQN6tz76LOMRpelRq9dRizPCygwDAdbfJasPHParnneCjMdy9caP2
+eWjdVnuZqGXDbYQB65L2G4hZGqTczJ/aJLhf5obwJkVwCWdsfZntA3b8AHVu54bu
+tG2V9S8l1bHnU7C4DKmxuoAnRaYrQ/c0eT3mOVwD3IpOe8gIkU52T9U8BOKy0kzH
+mCdsHFInfPI4CdqmReX8Rl266SigIZDVhJEV20G8fjDECLRg
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC/r+FvCUmPPwvc
-pZX9SSGhRM+Dft9Gnvxa3zFAM1ZOflJ+AlFjHBBDPfNfBkGCqifu1gzkf4IT1Je2
-bN5CNby6WBdKMVk2jtDgLijCmzq/tFVwxeBX7t29C96xmwPubTTP2gSk9Oz0duJU
-e6rsBy/cKMl58a9fvgJoMzXbeyhWEJneQav43J/Nb+uRW1wwGNyD93qDm6bLQJB3
-rTpr/RBOQq3duPcoryF+nyblHhVO3Nb6Bc17Vjv4mjo7C27xLWbsorTzJdd5jJEf
-CjsfLvHUkaHic3CO2fgAm+dimKOwQHP+Tcuv7icY1LHyUxgCbtFKvcnH3ptcx5pn
-qXKomCJFAgMBAAECggEBAJgKW1cFGNGEAlabCGMEdKZDvAzivxp21FcHnTr8/UH5
-NFk6YW+pPMT5CGagwalwaYgpQ8Sh1n0ALO3HYGtH0FBFuwNgLRD3KnoGYtIo6epQ
-kUyHxzA4CK0AEzzwZafh+ve6R+DHXINzIIY+KQ5ZjP6lY1lT9/SK88HOjLNv9kh8
-NAEUoBtcMW9kDryDt92PAlXoguXNiGzd4JYnFg6iFLKn7tdgczz6dsrTPTIzmOtq
-KrN6G/yFv8dG0MnizcmlLRKY+Zkvt7YcWatvWnpDwZle0ABjIemkpWV0hFjKPnhk
-qOM4j9gw4y+Qga3agj8ZWXtJGX/8cOvbszejQlesPoECgYEA/IM39oU0y+NZOkUA
-0qujGbRk1FgRiOGvZf+xK51fiSGUBjomUp6aLBGx4tPUAfOgoXIA0Vy2x7YRSK32
-ZlgNH9XKSitzsPvZHGvXgCm2hU/NxPXHhsJvNuzGqCri3TP3xUzcH/hTDO2y4Y+O
-NYzIN3PjEQS8ba7t3hTEJBjgW/ECgYEAwlWbkzHwFIyeaLlOqqUi9Mr4C+AH2Gv2
-xE8SNV6Yd/kY02TkyPnJiChuzCHVLOu+HUsWCX4B4Ry6uRH7/N13q1lb94ePFNx0
-GD5CVRs+CU7qXja8Ey67i4jlPl0224VhHOQBKe+HMIFMLkKegp2LIzK6blVmWkbF
-lADCBBgXj5UCgYEAtF6Ssf/Sh7Uc/ldd0B4UAf9uapOB6vGylTxAdLQUEuMuVghh
-aXCrFcGJ/EltTfuViNzjIqmEUkGGNRE2SUKqFUxkE/jXydsL6ZZKt9yT6MPpasBZ
-RrRKNsSI3oTLylAdswxEzH1X7Ys41I/zd+LY/WtFDdoBMqPae2D//fza/jECgYBP
-SD+1OI1axNenn9ohMkflmJzDs31f5QQCqMOj9Fi9sWlYbPQNriJzIxO4wiDHN9IS
-/1wZOUgo/+CJunWUfwHgbQ9hF/PereXLMjM7p2aSw5hIIYpvRQSMc6ga3kqQGoU5
-FSgIZMlBl65tvQ4P+ZgXHw4CD2M3d2IJ9JkytGWAVQKBgQD5F+eiPzXWwmUbBxXH
-leIQ14QMfIkL+2XgqXYRndeZuoIiwEHjjIF9FDLas/gFWlu0lBi2v3oOVIZxQyG6
-jfwbneFu2xNZlD6mQhWPpTU2xqno7T1NEsop6Q8Vt6pPW6SgJ8gaqAlK3/LyX6Ej
-Y7eFhnHjLCwgq3B0lBkcVeg4lQ==
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6aoOMvUQRl2mk
+Qv/1p1/hOynDwS+zfmbFUpZ1hFNQxz9mvyVtjqLtuEdPK9WN+wLM0ZbkDCqsMdWH
+0hGx5Ji+aRvYwiX/TQV6LHnKvyVr0HFGxOgxPWJdEi6LHKAbYh52oL8UgdgmGR0V
+TqqygUOAPVoLLhufKDskP8BULBKwL5IxZEJKzQHw7NITNGOBotdV7NZdnfDs+HB+
+/xOwb61dfVnXuiRS5FlJniELAxKmhKftJTUX+m6xpAM4mxL1+ItJX1/iMl1nEn7h
+vVifkPM58oF4lWpS+b1HGlkrV9O1T8O/tIsZwKhJ2kGxRUHPzLvOOZR6G0PAk3rL
+ORiJxp0bAgMBAAECggEAWX6fTybiEev2DKUZyu6iyjekElWht/N8FUlT8HEpqoMt
+ff9QUauDrsqkeW7a5IHYU0pfvXXV2rtF/yGr2xKa/fbqJoL2yP1KHuBRTDO+HGeC
+qm4H5vPIUFRVQfXpK5xMcbk0Yvz/LzMpUjxlGLOUG3HY07s8CYm/8uQkSoSqKCir
+GmfWjvTkLsB3S0EtnEQY5ewrveTwXlKTMLExTR6bvKt14nK+HM+1Ys7om9u+fQF8
+eajsGk3sTUELWykMg2RhUwlcIZhLyv7EJlxtCB9bMg51uUrfFz0W1DzqBHKGTPxv
+K6oT+nFG1E6GZ8YdBoT/aDYyj04ctFQpKhbTyPbGCQKBgQD287hAo9jCSPBQtqF7
+ZJo4mx+g3keiiDGGVXBME7sC4oDk2fs7spa75sLmEXNN2wMeCD3sQlsiHxJUeFIt
+yP7wHiiRUiHET7+rVWg0QwT22JHMUglfoLbcqoSTp6/XujJ8lkEnLA0yi3qXJGts
+BStfyfX7y6Vfqi3jZzf4Oe9cJQKBgQDBPv+mgAqffcTJMXqYhrrPF9emoJoW8OWX
+6n8Vm0IWnN+NLo9UbDbpiCbhavdBLBIYvwhiycZ2pz2/zbss8+G8XuI55n7vOvUA
+smHr3XNtXGzn9QgF9cCcXinhATTtceTNDENQfoVv/IJzx9xslsrOvXzMVAJeR0pY
+oaewvLgwPwKBgQCkl4wWiR3PYA3Lve2i8EpZsApEjeHw0hUwE4HlMsFSCuUdoqtv
+/ne4hPUhoj/XhEFvos6iyRc0hsEQy6D6IxzHrVIciUE4Nm7pIuNw8bo9S9rg80yM
+D/HQ4VW7k+f/QHqlzv9dvF6PcacjVYLDXC6siU8Jo5F1UVeEWSdUqkh+gQKBgQCK
+6URQZZ49Q+UfUyfzWHYUw+jK6IubjhMcfv5Xg8GgC6hPDncNkrRubua0B3YxLQLd
+MwVOLgkx5cpng/XWvIE6LWKliGEaiuDvXUsyh3+fz16h5uubjSqlvLKSAZIQVVzJ
+YNVKmYhGFdeYbsSucj7bGi8JmiIRr2FsENhAHYixdQKBgFekRxAEJOUy24hZgs0U
+5hj1rs7kHMK0nq4MjaJ/p5t7zXhNTk5RMsevffLoFM9a59uxHiE7n/O5rWT41o/X
+Js4zyPvWfVgRyT+UAPZoO2ABJ+rDzSjH+l2o9l4Y0O/mSZLRBZPrtNEHdYzWcHwb
+7zJx0U9ZDtAE7wejbzA6rObX
-----END PRIVATE KEY-----
diff --git a/jstests/libs/client_privatekey.pem.digest.sha1 b/jstests/libs/client_privatekey.pem.digest.sha1
index 1402e8353765a..6af90649598ec 100644
--- a/jstests/libs/client_privatekey.pem.digest.sha1
+++ b/jstests/libs/client_privatekey.pem.digest.sha1
@@ -1 +1 @@
-C678FD6B6A52D0180CE518B4B74C9C92BA941869
\ No newline at end of file
+9FECF1AF0B0E9ACB0FA0AF7FA82842BAC63A6523
\ No newline at end of file
diff --git a/jstests/libs/client_privatekey.pem.digest.sha256 b/jstests/libs/client_privatekey.pem.digest.sha256
index 85a1cfdfcf854..29be0553086ef 100644
--- a/jstests/libs/client_privatekey.pem.digest.sha256
+++ b/jstests/libs/client_privatekey.pem.digest.sha256
@@ -1 +1 @@
-4F5C946F3C702A7675005C921D438A43D66DA45F540AF6945971001273604544
\ No newline at end of file
+9A64EA9620FC26C9D5D55A00A6A01C6AF62CE4A345DBE172750F13EEF10FDC1E
\ No newline at end of file
diff --git a/jstests/libs/client_revoked.pem b/jstests/libs/client_revoked.pem
index 7397be718381c..422a9918331c7 100644
--- a/jstests/libs/client_revoked.pem
+++ b/jstests/libs/client_revoked.pem
@@ -6,49 +6,49 @@
MIIDsTCCApmgAwIBAgIBBDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJVUzER
MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV
BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRl
-c3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjB0MQswCQYDVQQG
+c3QgQ0EwHhcNMjMwNjA5MTQyODM4WhcNMjUwOTEwMTQyODM4WjB0MQswCQYDVQQG
EwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkx
EDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOY2xp
-ZW50X3Jldm9rZWQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDXLYp+
-lWykS2LLUxAp48lEHXPYpE6rlyJqr2Sk2lji8Tneo3AHPtpx+8pPWJP+sqtM4ZIu
-42ag7AgORu9+J4xyWZ7mDAMS5NHOpVfbg3xDuTAC6dHDQq4YosjjFR4x8Ma67PKM
-dCDuMXlbp51EwNVnBnOPKXFdIbVF7yITHju4jv1f6y4bhO5TyhRqgtovtEOR93oJ
-v1m/wGAx4pmvr95alwKtRAZiti6nXE/CFLMjkCATljrLu8RDc3BndHSuCP57X5nn
-FpYoMRwe7r09vTKYuelYs1uLiDbzqC/fktfvzprme/rBY+f8IiVyl0bRTF9zhQzT
-ctPm1+hrljvd+8vbAgMBAAGjTjBMMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMG
-A1UdJQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBRerq6d7jrJ8x0Gg5ESvbLX18Zh
-3TANBgkqhkiG9w0BAQsFAAOCAQEAiZqiDWRHBjZQvwsKVLrs8Al9XoInB1aYjG2C
-dlja6mwaGikNYKCgotKoIChBc8EJ1X2fEuFa8n1bGOamDCVJCu0utQLe+EEok86d
-Y5uvUp6qt91ytBOlZk8bg+kDgP9Cd7F92CkNnaxW01/sHu115SUFM0VH7olBvdOR
-ouxg76qvL2gaEC4amQAdgYpJ3/A1esg+CcCiCFyTK3nAdd6BHEad/KH1umoHic2X
-S4eyd8D0GPRrRfpPQSiC3Y4pWvL1L40UbkzAx4bqSQMvY5hWsXmQiMQDpQN3sOOc
-31Us59mICifxAtd5OpN1oZkIRn46wx4okqXofh+bttkNyWDunA==
+ZW50X3Jldm9rZWQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKyLxu
+r1YYp1NEuM789WhvGN1gYSi9gxIYensRvO2U3gSpLG0BGhhKwXeB+ej71B5hM150
+wpsKSsFuEgNvImywmKJJESKmHrRxpPco1/Vjl+3fEONhmT56C2UX6SjcgTDZDaL8
+xyL83tJtH8/FCuguggW9TEVHEULjykQgnyeZFG4VNSy4RPUkIh4HTPkdZC9dERcn
+zxJv7zVJvnQJt/E8OJmNkRmD/YYNTPKWzjJhCKpDCtpxyh6RHz2PdYeXLvqNGgKJ
+uLB3+bAkS1IMIDwtSodOKv0ld1NbiaOUAgdTekUJskyu01DUqlK52b+wew82egX8
+52RNPwW31nAXAfHjAgMBAAGjTjBMMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMG
+A1UdJQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBRqXfMofLL++h8DYfshKWI5y9xn
+wjANBgkqhkiG9w0BAQsFAAOCAQEAL7arHIv5jRUX5Xdp7iWmvg5iY50iaDhDTMw3
+fCzwBKzluyJj3nDBNuVLaMD647pvpmszSHKeo/gP6yf0GmDoke8fpmdQN2P+9JxQ
+ETylOyWKQXJfkhLMdIzUHKIVZLkApsQ+NQO9yI5BbGUneZYL8DiS4qYCQTwfnH2p
+04RNK+jDkbGvDEDZUa6qJv+pujPG6AjT4gefaPwXyk+1vz+an6ZmUt9gTQYSoPMi
+ziB6GKZESjpMxUyloxRzS3AWFQ9MDI81+ip6ENJ7QOMhobdWpU4Z/gMe0LB0p1PT
+jay6qHsD6sM7aPHIbq2vRcl2qV00iqqxw+z7fO9VlF73PvWSJQ==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDXLYp+lWykS2LL
-UxAp48lEHXPYpE6rlyJqr2Sk2lji8Tneo3AHPtpx+8pPWJP+sqtM4ZIu42ag7AgO
-Ru9+J4xyWZ7mDAMS5NHOpVfbg3xDuTAC6dHDQq4YosjjFR4x8Ma67PKMdCDuMXlb
-p51EwNVnBnOPKXFdIbVF7yITHju4jv1f6y4bhO5TyhRqgtovtEOR93oJv1m/wGAx
-4pmvr95alwKtRAZiti6nXE/CFLMjkCATljrLu8RDc3BndHSuCP57X5nnFpYoMRwe
-7r09vTKYuelYs1uLiDbzqC/fktfvzprme/rBY+f8IiVyl0bRTF9zhQzTctPm1+hr
-ljvd+8vbAgMBAAECggEBAKZBtMmbkKbc7IenNUz4iFEN5K2WPobMLrZ5sGzDnHw0
-4H7Kj0u537TEVIu8KBVaXYr4myeJYomh9ZN6cZ9q1VShNGoRC+r0S7u7+0dLr57w
-Hk2R9hZFlhjI5ii2726s2BAj9kBZlfwU0+zidFxkFj8VOAJ8he/slJBldVGKQaCV
-yAYykH+XAYlUnaG8TYOLsqJjB2yROLjDKX/55D6aHMWeYOihEgh/51O1sYcw5iiP
-Fv/BWuZu0K3wP1FyzRjPm7rZfZs7Jgx/8CbBR2jPetvm2OibuyvHQr2guT2uYbcl
-e5PRj8CcfiqwJSlvPoXfi3YKItU+rTnUfycYENBY/iECgYEA8kaqJWK0kb1TSnrl
-vOdUUELZr9LYhH3yp/+/dJC587hA0PNlhH/q6RtKWy4Q51qc0VjYO8gXmlaFyR9i
-Gl0Yw5rq4yuW6S/47S4jYs3hdWSztzg3jTOvbXoN9D8sBgR96YuEigbHFkKVOMzX
-ESj8HS1lOjmnq3S0sS/UpfmFKysCgYEA413qhjnpNEce7D3QXyxKOHgX+Mx5IVqY
-x/VfRSFlRi/sPyDd6ZqCbnv2geM6jnK9wDaOj6+k4ERLXNMGJqE3tpwt1QROOG9Y
-LUn6VYR0lNZi0fzJtCXGKNq30aOb2eTt0m1wYePRyGGTmTouUhMJZeSF6S6h97uP
-suAC4k48yhECgYBNlKdDVXow+QlE3lnWxdTP5rhCfyfqNVKQzWqHbxi8nJHU1zv4
-0+VrQ9vpmrS7AN8agnKrogU83Nv2bWBCxPD7Ig55NCoc/cmeWZnnN7osl1SdJRlU
-0+onCmCRh/EW9DVFpfGJKWZzEFssVxu/WPpydWjm3jN8yeBapNBZXa9xBQKBgQCT
-hWoXcReofcTIj0rVlY0KmpEjMrgdNgUPKFuKJSLqqUllpFOXsFKUkQXePKrPpg8L
-2dohzFrz0Bv9vEWvZscZSrhKECMYZtYVfHxaZDioIXm4uYW9xwyNkWvXL4p9lBXi
-hHN4cDK7CuSTdLZ9NjDQ1bc1FrasYBvaeZIld5HloQKBgQCmb5LTVXvcjHRKFDsC
-9dmkaQl6ttOnbhvn+CjdfwgfiPdOLvHAp2Q3bad0xbMc8Cc7PYxbfEPuYIeLHxB5
-fc7WeRvwVLeS9F3ClkYVSq/D9ZU/U5tG/VsrxylUHcVAA1hK2aHaWN1zcJd7Azaf
-hhu9hM1a/hdaN2MKcOBSYFob1A==
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDKyLxur1YYp1NE
+uM789WhvGN1gYSi9gxIYensRvO2U3gSpLG0BGhhKwXeB+ej71B5hM150wpsKSsFu
+EgNvImywmKJJESKmHrRxpPco1/Vjl+3fEONhmT56C2UX6SjcgTDZDaL8xyL83tJt
+H8/FCuguggW9TEVHEULjykQgnyeZFG4VNSy4RPUkIh4HTPkdZC9dERcnzxJv7zVJ
+vnQJt/E8OJmNkRmD/YYNTPKWzjJhCKpDCtpxyh6RHz2PdYeXLvqNGgKJuLB3+bAk
+S1IMIDwtSodOKv0ld1NbiaOUAgdTekUJskyu01DUqlK52b+wew82egX852RNPwW3
+1nAXAfHjAgMBAAECggEBAKwBlYQ1icwjskfqkviSmWETMEReZZZKpYDJ87ZaERRj
+NoThQBzdK5nNfTds6ikiBhrg3dAgei5CtsxMz0jnZFZvQcjEliVeiiSTj2q/tFAk
+QQsjYhwZRptzKu8bQSO5Gdwi6wuLRqxDvS3++9fhpow8ke290k2z4I8jtKIPHiGD
+ZAC71sg93DYRbEiBGqzQSTc53LGPH61RhntaFq8mQpNF5naRBxDQkxrWY3/eKsEo
+pjDDdD1pm3IaXUZ73ul352z6istyslYtkushWfFk5fxmBhWiT9o9AJprdqwqUhhs
+u+6ab/Vov7/bN9uo2q16x03EOyQz+9bygvmDFzfX/RECgYEA/hUjax+GihYzX6kT
+mRPwx7rmofNBYbfIL4t6p/WBcFfI2wFHXqk09kfl1M5v5DDg8TTmjQAU7iP+Njmk
+Ki7wL0E3tK6jaJdK8JTwUclb5uFW3M9tb9h9zuuy8l0jtn7HfDaetICWrGRCWBi/
+x/SG1MZt9y462jZRx6Uaa8TuQSUCgYEAzFB+jOd6yPHHRqJ3Sl2QXvy8IM4LNi8B
+dF+IzO4IKvQJWtEcESWEc66zpvy4oLvcDdtpxWd50NDLYDXq1YfltUVDDyus7d7l
+bxYg3rRMXOFHODJsEiW5xzuIeTL1bFRP9NfydAfrN9HBJSiLb9pu8UvX9dO3Tljq
+tcLC3bA4DGcCgYEApnDMcdkF9iES6EBGwUlJulzZxg8mrk+IyHRzFeGCOEiZH7XX
+vc/UDN0OVngg3feS8w83U/hQvatAVN8vhh6XYi0zw51/F/27rpuyTbE9DaJhl2Ye
+B11nFIxb/d47jcnA/cJ99joh+a33s+QKhX7OcEXINVrIXLemnMSv6RbpCC0CgYB+
+yyluCi1hzOHNTP/Yz63LtO6PMS82Bf/SF4OPId1BwsaXbJQNdAn5vJ5S8B0n6s2K
+b/L5BdlMJHdCEtPUhgyg7QXLwWAFEsGxqbrmWl/VeBy5nFkC/hSHtsNf66bw/nXM
+TcNyDIQF1Q8XH01BMf6NknLIZQYqAfZOrDpAV57A2wKBgCsnOWwoIdPHaoBUoFhP
+aBmFqFx1x3aUWAhXAAZaB6/Z4RPh0oz1WRidzI3QCckgABjS1fE9WvrDMKc94Zir
+Cm8Yf4XuwrYaV4SOB0IkKuC6Itr1E4W1h2j5myRoxRSSszyaMpcJJHkDY9KNVawV
+CyGikkG42/FFg8tWWvjp7o4y
-----END PRIVATE KEY-----
diff --git a/jstests/libs/client_revoked.pem.digest.sha1 b/jstests/libs/client_revoked.pem.digest.sha1
index 954c131885578..93d6896fd9765 100644
--- a/jstests/libs/client_revoked.pem.digest.sha1
+++ b/jstests/libs/client_revoked.pem.digest.sha1
@@ -1 +1 @@
-B0AD6EF051052F1F5E25AD400848CAAEC237AC02
\ No newline at end of file
+C8884AD029E9D121BBD51CA0DB4ACA122604A282
\ No newline at end of file
diff --git a/jstests/libs/client_revoked.pem.digest.sha256 b/jstests/libs/client_revoked.pem.digest.sha256
index 4503fa28a3689..4217110212832 100644
--- a/jstests/libs/client_revoked.pem.digest.sha256
+++ b/jstests/libs/client_revoked.pem.digest.sha256
@@ -1 +1 @@
-619462638B204A36B830C672AD7596B15DE6653B731292C81F927A88A9AA6C99
\ No newline at end of file
+2D2702C05BA308219BE37E5F2CCA1625E5538F51639C7900657F6C4B6E9017B2
\ No newline at end of file
diff --git a/jstests/libs/client_roles.pem b/jstests/libs/client_roles.pem
index 085e7ae2da74e..93b01f9e9c9af 100644
--- a/jstests/libs/client_roles.pem
+++ b/jstests/libs/client_roles.pem
@@ -3,52 +3,52 @@
#
# General purpose client certificate with roles.
-----BEGIN CERTIFICATE-----
-MIIDtjCCAp6gAwIBAgIENm8bejANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDtjCCAp6gAwIBAgIEZVtXuTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjCBgzELMAkG
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM4WhcNMjUwOTEwMTQyODM4WjCBgzELMAkG
A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD
aXR5MRAwDgYDVQQKDAdNb25nb0RCMRUwEwYDVQQLDAxLZXJuZWwgVXNlcnMxIDAe
BgNVBAMMF0tlcm5lbCBDbGllbnQgUGVlciBSb2xlMIIBIjANBgkqhkiG9w0BAQEF
-AAOCAQ8AMIIBCgKCAQEAyPzl0H1+eLYJ92HXheslqxl9Ph+uI6VL+4fb0ES6ss8W
-v5zUO0s5jXIbFzFnGffSfzvy2OAAm02dOVyU1pVRaPLzj1sOg3gWuwaNyHooBug2
-Cn1QhIglc9+tsik7KWFkSS9LkKT8W9U0Q2bJXeux/UdqtPbggxHpyW5Z9PGGht4/
-PLVtWCNgPpIcH98hh/MXOtWY9yB6cFHqWfRza1p5DPUju5FqL9HLBPCWDUrM9s0I
-5p2+Sr1/jMrmSQ8TYAczQiniDzXFJUozSKsNQbICp+Qkmg7rQgkrOrKU8E/7KMlE
-CmdzuW8r0XCCmRcurQcSOVMqy4NWmjlGzg7wQi3iQwIDAQABo0AwPjA8BgsrBgEE
+AAOCAQ8AMIIBCgKCAQEAx1P0sz0dH97THiMIro1zs4ybwBrdUdJECY21BKeQwCom
+mOq0sVADddyGByzDKwXfkSd7E0RJ5DrLxm/TmIJqD0/+oisIdCff8B2WAIo9U/D4
+vqPhl1Kx1Dcv6814sKPhxCI83Vbd6HHrbfeQqHMNyx4YA81SVG6cVzdUTy9aH7V5
+lgGbYnwrQzAh6s6oC7BSU6XelsXL0eGzI5z0aMLGbuSgw2ogqGwH7iG4qRSbuUGy
+G73gFAR46b5FsGEHe2q/NWzDCmJz/DETM2g7DjSCtrraAAyeshp7PtOxJes2G6pb
+RV7WxzZ1Y1UyX/B528/uBW7wCoe6OHQYOEjZIYkGawIDAQABo0AwPjA8BgsrBgEE
AYKOKQIBAQQtMSswDwwGYmFja3VwDAVhZG1pbjAYDA9yZWFkQW55RGF0YWJhc2UM
-BWFkbWluMA0GCSqGSIb3DQEBCwUAA4IBAQC/a2dpkqo0jHBb1roMaVzw2f65cPHr
-I7MMC78+080GFqdLhc2h1o9UvvTS703NLVd/eu6VmSdrsLpbuun7/vXcONvMK38U
-N38RhkC0tZCbm5BNBf/8QMuHbW1ZjdPbbpMaIGZP5k4W5YYsRUnk9tfZ9y3EkPaf
-7poApuxDyzci0oPiPl1TK5KS6H4X3z9/l+9Mpv+UdG+/ckJGEoBf7B+s7CNv1nc4
-meLqea0iTA7+bdQLDpbQ1f6HSx64Bqb0w32hc/Y8541eaf/Z/wMjJ40iX+0MSKcW
-cobRQr9pJ4tBvW8p0iqhZjN1EYXnxaAulBw7B/6hsY/eDmavg49qh3g0
+BWFkbWluMA0GCSqGSIb3DQEBCwUAA4IBAQAnq7yMpYjQx91xDGdriHz0XyvH+UhJ
+Wi1vv9xLjdXuO2baYMTiPxTdFM+Fy3CEyXj/4bWyeCjPYg3+xBFstiAydPdUd6tz
+4rzX/UbELFv6i5LFLIchUPZg0EJx5/+3M9On19ZX0LzyevpJENJ+AzDkCqpkkBhR
+I/JlGDukTnlsI5CZk2nCzKX44QzefX76JaMeBI8QudEwFjHA2T6ZbdwyJ+TGzH8T
+yjq8cRZUH6ex9Z1ENHYYFn5uXPJYVP+135LWz+wy9Mj/4W1fxr9STJxtg524zNmn
+nuCMhFh9rKMNBe6L9jAKGnr7AoiJ3ME2mIiH8mWlU3qIDq3/TttAPT35
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDI/OXQfX54tgn3
-YdeF6yWrGX0+H64jpUv7h9vQRLqyzxa/nNQ7SzmNchsXMWcZ99J/O/LY4ACbTZ05
-XJTWlVFo8vOPWw6DeBa7Bo3IeigG6DYKfVCEiCVz362yKTspYWRJL0uQpPxb1TRD
-Zsld67H9R2q09uCDEenJbln08YaG3j88tW1YI2A+khwf3yGH8xc61Zj3IHpwUepZ
-9HNrWnkM9SO7kWov0csE8JYNSsz2zQjmnb5KvX+MyuZJDxNgBzNCKeIPNcUlSjNI
-qw1BsgKn5CSaDutCCSs6spTwT/soyUQKZ3O5byvRcIKZFy6tBxI5UyrLg1aaOUbO
-DvBCLeJDAgMBAAECggEALAIkGAjCfo/tkrtbw7j/YH2OTOIG8UsqXgMbgHjsIGbz
-5jTiy7DnAy+u+t8YYjk/YBiyTgaOW3MdMAgzluDPGJnJ9uPLQ8Ixx/XhabpFtW13
-F8jkroBZIwwhLleU2rS2jsfKVfuqPjlq3i9NfEE1ke3nUYSVGF39wNtm2xlfiXwR
-IAA7y9fEegEA26sRErlNWZF6Lya1muK5LqqxbKcknpM9em7pW96GBToGqxy5DEGr
-oOnQ0+FEkYVTIu3Lzb13k3KjeVh7CllbBa8O1sYgnZxwdqkjilrTm2miUqGKyT3Z
-zuHNWF4UwGgA0tt2M0mjHegMfBJrobP1lPDjLJLsQQKBgQDpfXzlRw/yVXTYCxYF
-IhZvt0uemW5ZeiYIwtWPlB8F7eleXS+rldk6G9ZQaVxzTEN8rTGk9pOV/TQdGBuh
-Wvwj4z5rBnE7NUc4JioE0/9TwR/iJ97nxIZBw7ugYVF6nT+CEjWxD5EK2CRjCHtH
-DmSs6+8i2nIJcfFRuUNhSeMT8QKBgQDcXUGrR+xIhcrCe0IfU80rDGP72nNtLhxl
-JruOivmiVIgvKf9YJt8Fg/NY/nXt5ZLu+X6fBmqgea8JRFgMYJLwhwUH233Hk6qK
-+crYKD+gvcmudrnW+KuWU8xb2Igp7+SdUEs7Qc0N10gIrdNLy07TxiUV4+K+uQo1
-gJMKSaO9cwKBgH+ezF5ReiDdnrDbVlzV5eeICzgT6uHTRuiw/aOttUs7X7vpOT4S
-+JmvT3PCMvr+K0EBNUpSUBbCP/LLE4cdQQWolpxO+CMBmvzQt1TOPxebTn8Bhe+u
-VieqAWrG5o8bVfQVWe1+cau4uiK7Jw6Oim6NwraNKZmKAvnFRlPt28UxAoGAHh/F
-fWOsolFf4Ww2ItAy6OKdvG0lgFRgX1eHJjpLW+yQXVoxZNyTek6Kcqz+dBIBHxgO
-PjEDXIKl8e9c1fxRuf3LK0LPE/xUwQDawOfnotKvAmGKkB+YXUmBU0DrhEgeCX9T
-eXtKS80VNNKGYwMc+IhKw45OjeH/ykaUU0Cz2I8CgYEAwCLQxklGrlsTPehgC/CE
-0ZKYRGACqF3T+jWIsUP8BUABHxQsIik3mXAu7tcqRcD+E8D0Ctlqc70xFanSEmYa
-NN4gxX8rFBwKTmgQb7+25r4G7koGNqIIx2cGJVVF3RVokPTdbX7NEER81wQlZ4uK
-RbnNju9uqfxBtl7itm+pGmM=
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDHU/SzPR0f3tMe
+IwiujXOzjJvAGt1R0kQJjbUEp5DAKiaY6rSxUAN13IYHLMMrBd+RJ3sTREnkOsvG
+b9OYgmoPT/6iKwh0J9/wHZYAij1T8Pi+o+GXUrHUNy/rzXiwo+HEIjzdVt3ocett
+95Cocw3LHhgDzVJUbpxXN1RPL1oftXmWAZtifCtDMCHqzqgLsFJTpd6WxcvR4bMj
+nPRowsZu5KDDaiCobAfuIbipFJu5QbIbveAUBHjpvkWwYQd7ar81bMMKYnP8MRMz
+aDsONIK2utoADJ6yGns+07El6zYbqltFXtbHNnVjVTJf8Hnbz+4FbvAKh7o4dBg4
+SNkhiQZrAgMBAAECggEAT4mBwC/nujro1UK3iSdqfr/humM94xNbENDZ5ZSTnwpy
+wlwNZB+AcfxgAubqrfU3A63UX5jFP2vyuikPLvEDLz8FZWJ0ih0LL74uh+KhnCgw
+qamtL/vQd9TZq2ce/KU4RlI/JTcvbzxd7FB4Ca93VpCYlpPMgWJtwiOS4dIMTVAh
+eK9AiD+6DK6kYXEbdtTE/qB3rzBhI81mkmVCONMC1DzVPLJaobZImQOpjG3wwqve
+pPe+aEuFLvu0J4z+r9xVub7D/swh0KnnOhGgKZH59KMw++YWGyC1PjHc7YVE+url
+sIMQ1ZGTu5nd7Zy/NnVSxrjcaH+8iUBxDb5AxkATGQKBgQDl9TWwzwe5+CMoFdHd
+bTnL4AKOwxTU7f+CYL8uC4yFhzDpwfMdCv0iAsyxcKh0qec24c3OLsaohInOEoGq
+/D99vOLL/8YzUQzcOrkQ3GAY/bY7z5OLG2NrvWiLeALhYu8ZhjrK4qMInHltWgxU
+vfBeIBZSjzeb0KoZitdr1b9npwKBgQDd5r5qLLQtgg9OHfZAzkqNqZyS4JBGVBzT
+IOdryu2Zyhy9WBCnay9/5GsY+i/lx2fcOGWlQOE7YK7zGzGUTpWe3CjqmoCF+A8K
+os3cn0/Z1WVSAhStamuZ29JaxdIku41PR/9kVT8n2TymLpkpINgPqlb9JiUGoxpG
+HjZ7AYSDnQKBgElDpqd+TmfhttG+oe56Lj/WTvpNDQYKDa3sRPzWkR65w8u2D07h
+gWSkn7KowgLpXtENgBSAsqpeD8mn+8gONexJkbiM9QjEeeRwkSXeEH/l1XmY2Nkp
+ELPy1KwawFAuxR7MtU6OhoLn14gPeH5HRZ23e2UyW/U4tfkNUzT4FNNhAoGAVhws
+2FbwtJg8CDxoS03CcKbZM8YhNql9ZhDmvVXKTlu4O2HpVI6rqB2j0dWEsM5o53Sb
+PT2oKbqjKYPsAXldZoBZAHFkXpicfawf5vWLryS4ZNKZ45Hgn5xGTSNnIQoHFDUE
+TpMmJviOKI78sJEmRy37A9HBnIm+sGcZDTLRkaECgYAOWKr0DpV1IZpUneen98mc
+XBkn4kz91d+RGOr7InZGB9h4wM8zlu2BI5WChK6YmNm81PJ10nCgce5yzcRgy4qd
+DqDWLiQxMFhA3G+ioHfC/DSa5DR5T/e45LvNiljUC19VqyIvlFWVTBMMn7YwI4zR
+BKNegkZ1VyooNu4FBkyoTQ==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/client_roles.pem.digest.sha1 b/jstests/libs/client_roles.pem.digest.sha1
index 8a0cfa649e467..5ba43611afc58 100644
--- a/jstests/libs/client_roles.pem.digest.sha1
+++ b/jstests/libs/client_roles.pem.digest.sha1
@@ -1 +1 @@
-065A489D101C23DFA6C30BD1956D743DEDA4A422
\ No newline at end of file
+1367B5639EE86E2CB4FF2DF5EBCC02A5B3A9CD2E
\ No newline at end of file
diff --git a/jstests/libs/client_roles.pem.digest.sha256 b/jstests/libs/client_roles.pem.digest.sha256
index e751586dd0078..7501be120029d 100644
--- a/jstests/libs/client_roles.pem.digest.sha256
+++ b/jstests/libs/client_roles.pem.digest.sha256
@@ -1 +1 @@
-AD26FEAF7FFA89165CEE4CCCEB4C2049B7DA4B8A39C65316A798B2DB5BCCB016
\ No newline at end of file
+6B8325B29E3C061BD6EA615A2779BC4A4D9FB56630C4F526A29CE0ACBC6191A5
\ No newline at end of file
diff --git a/jstests/libs/client_title.pem b/jstests/libs/client_title.pem
index 6ea6036c4b8e7..32243b069e5d6 100644
--- a/jstests/libs/client_title.pem
+++ b/jstests/libs/client_title.pem
@@ -3,53 +3,53 @@
#
# General purpose client certificate with roles.
-----BEGIN CERTIFICATE-----
-MIID5DCCAsygAwIBAgIEbhN+OTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIID5DCCAsygAwIBAgIELjth/DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjCBsTELMAkG
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM4WhcNMjUwOTEwMTQyODM4WjCBsTELMAkG
A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD
aXR5MRAwDgYDVQQKDAdNb25nb0RCMRMwEQYDVQQLDApLZXJuZWxVc2VyMQ8wDQYD
VQQDDAZjbGllbnQxIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVAbW9uZ29kYi5jb20x
GzAZBgNVBAwMEkEgVGVzdCBDZXJ0aWZpY2F0ZTCCASIwDQYJKoZIhvcNAQEBBQAD
-ggEPADCCAQoCggEBAM63JWBNHspk/W4Zkebwc4VXcnmqN6stOuwQfjEBjnixL+D9
-osIoE/FU6x/2TwHEOOYVW1FhP+rN/OQRcqY0T8z5Dk4zQACMZZQdNNMbBNmk1XIH
-6pFThn+olAsq7KbeNsQEtlF9EWIRxrpc+C7Oj69o+3dCJbhRNdj64pw7BrPfOo95
-nM8G0nkCJyKQkpWouDinSssNaeYFujtQWHQ6aTn/Sj+6ta0iwzm8Js/QD9rG6SOd
-2owC62yuK0eUrfkf6xADWe11decK9Y4oEZdbtFVZfHm494ovPpaEe++5kUSRp008
-rDvJBqmIZX9dnd5IGP7DU6PtSEB897Xmk/oiBEUCAwEAAaNAMD4wPAYLKwYBBAGC
+ggEPADCCAQoCggEBAMu3CZSHz1GkrDsDeMC+2e7eTI4VncmjwPV7mTY93rRC/n1J
+YEAqRyaX5u3yLNHb7rVVC7nQ/r0MpzMRThKl264YsVGtlrBDWiujyLk9Gim4aZqM
+RKFzHEptyUVoXSNC+yq6Ve65Fxq6gBZcUaOJHFOSw0fYnncW7eeFrvHR+/ehUnyx
+RUOGfxc4pbNthUqcE8PO/VS2iV9XtCUhZXoWccMxNCMKmK6+NRLLrzhIVCENwyVq
+Geeyht5WnjlZF0XKuJi0m/ZLXJRtg3EjXxHiWYgPdKght3RCOt2raqV126ZzTcsC
+k4Bjqc7fzd3dW7GQKuMt3zmNxFXi7opCBsDXPa8CAwEAAaNAMD4wPAYLKwYBBAGC
jikCAQEELTErMA8MBmJhY2t1cAwFYWRtaW4wGAwPcmVhZEFueURhdGFiYXNlDAVh
-ZG1pbjANBgkqhkiG9w0BAQsFAAOCAQEAcd974FGDy5YioMZGZMV0N1D5Nrima43E
-uX0NhhYWuThs4NnKcklp5bH4Q6FvqkbARlALDUIf46AWGET2Zg98fSRUeFNf0QEu
-OnoxPCjlm3f/YYbCsuEzcNpN0a26dEWAVU6jPc9Vz8zJOwmGOiH+x6WLhDPFzgn+
-5Btzo9672tGb+a8ZInKxaZNvpCZ+6zggWX9SxiI1ZG0MZTtHdiXopneKlB/8s+sh
-K7qOOx98pXICEB3VFd41chlU22UkfAuVe4ql/iLr84lyF4njbbp7NLAJuAoC4Scg
-L7jq3qaDOu7khniW+9TRLumpH98EDBO817G1y5BdcNuWs3iIxZiLKQ==
+ZG1pbjANBgkqhkiG9w0BAQsFAAOCAQEAbLZVPVdApKR2ZHkzTD0wcl6dR7T55q9e
+i4ibhHj2jbx55ctl8btEkUezcxdEtkk4t6TTXawfqG7NrQ2XEtueaOi4HbV6baD8
+wj/SecnQGdD+TLAB/PvYxL6jc8pisLCrghUzlIAdjeTdI/dXdRu18WKJ2ZI03alu
+qTKIalDdmUfbCyGpHI8sjs32x4rPyMVujUClHwM49SIHyROisaKL9AqEZYo7mUi3
+rYeR7QZXjf9cxFJFNfXSYKJGLdh22xGXZKtX+s404fMyemd2YGjbk0KvcWIHtkqa
+p2Z/KxJUDt+xWsyZTpUZPToLYwnc8/6h6/WgQTQr45Q2CYZsb2hRmQ==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDOtyVgTR7KZP1u
-GZHm8HOFV3J5qjerLTrsEH4xAY54sS/g/aLCKBPxVOsf9k8BxDjmFVtRYT/qzfzk
-EXKmNE/M+Q5OM0AAjGWUHTTTGwTZpNVyB+qRU4Z/qJQLKuym3jbEBLZRfRFiEca6
-XPguzo+vaPt3QiW4UTXY+uKcOwaz3zqPeZzPBtJ5AicikJKVqLg4p0rLDWnmBbo7
-UFh0Omk5/0o/urWtIsM5vCbP0A/axukjndqMAutsritHlK35H+sQA1ntdXXnCvWO
-KBGXW7RVWXx5uPeKLz6WhHvvuZFEkadNPKw7yQapiGV/XZ3eSBj+w1Oj7UhAfPe1
-5pP6IgRFAgMBAAECggEAIVlguUmX1xU14aZUaIqQSInUGRbCBma0o9MBWMykfIox
-bD3fHS20EfIeQLjPBDzw5QW7BsUGt83lp7G86l10JmBj02/nOm9sD9oqZIhuXuJ9
-4Piv+iQchcnfoHGbXkxpT3RnKXxg6o5tnjFThNGkUqtALnxIc5T+d/P5zTSkny3x
-BnR/cHHOo+FPA9X4wUjdpKKH6WuRTmb+rPUTlrY7n5D9OF0JtcTMhPyAYGbe3H1I
-iRVFie3+GIilMmFnUaN6kuL2iLMNRNR+OIKjabQuLrLS2x7B/1SaBya7qGnnU/d1
-P+akrGJeX5QBhXBupgzQGF5Ef0QbDoxrIq9D9bluHQKBgQDxvpq8IrwScFuzZ5Kt
-MdIBC2yED1Fz7czOO40r4HrUAFhqwHz1aHzXGXBE834hqaRE8rp/n7Cfedf990Ku
-HRA81nQRIdhgv3vDHj4GaQBcfsU1DDtnz5GQ7AJka2zjK/jF10Bz3a3G0e9bp0xp
-XVdEB4ecldWgd56VD9NEaoML/wKBgQDa570m6ktm3cVZk8q4cfbAJb5iApGYaazl
-/tkrkLnihEjWHkNwcZ9zW9vj32OZajNxv0mEcWGS//dUomknbAjMje18R5FptTV8
-8HZhwdJNbgE47PqwfItjoT2TJJLOWbXJC74sbhwmr8JFJ3YlNr37XNIUI7+Df5DK
-o7FhHYu/uwKBgHP26SdCylFWEMc48gWfFoxlvIKFDp+7/TkZHBlmL8Wu/LsI52iZ
-3PkaPN72pxTi2egxQAGkywdVXaV4jUYUrwtgHIFzaObQNEBfK4XLrN6x+Uv/OCgO
-TFfmvycI2U0IdOgC3+o09v2fC2E+GcuZeRkrO4SD49x8RVhgJQp2xbJlAoGAMFDP
-1gQGinjOczwElXtJ7BUolTdd7Vb9u3Hpew0hihaKgQADAJGDkRGoUf5fXGAtZKDE
-2D+yOqDWdU63iOT2eDenQDQHq148pQvqBR+jjEWIbYkYt9V1apMPJSgAYx03210F
-J4dSHVPTvDG+iO4xNobBM4LEZYDFc4R3xSihTLsCgYEAgt0K8VoBbsbzWT5AbtUt
-Nj3K1SYNOekYo0tylmoxEMWU8XPVzzrALNUALet/0fW4RKG1g0/19ZaJdXCofVaX
-WalZagr49BT3QMBnGQJSOJGFX3Zje5QbmBRZgHVrkADXuNgDMac3sBTcthR39b4E
-+xCGpMK6P4VV1Bj10fyK3Mw=
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDLtwmUh89RpKw7
+A3jAvtnu3kyOFZ3Jo8D1e5k2Pd60Qv59SWBAKkcml+bt8izR2+61VQu50P69DKcz
+EU4SpduuGLFRrZawQ1oro8i5PRopuGmajEShcxxKbclFaF0jQvsqulXuuRcauoAW
+XFGjiRxTksNH2J53Fu3nha7x0fv3oVJ8sUVDhn8XOKWzbYVKnBPDzv1UtolfV7Ql
+IWV6FnHDMTQjCpiuvjUSy684SFQhDcMlahnnsobeVp45WRdFyriYtJv2S1yUbYNx
+I18R4lmID3SoIbd0Qjrdq2qlddumc03LApOAY6nO383d3VuxkCrjLd85jcRV4u6K
+QgbA1z2vAgMBAAECggEBAMrEVXTR+Tl6fzP+MKMpVCK+gdSrD3M020t8yvOCgblL
+NaPe9T7o1glECUeuVY5NKzyyeglqTSQ/A+2AvAUzsYWIbnauId8wKwBk9dXrS69b
+L0H1fBp3SSGMRVoglxQSB45wCjijYqvoFrYrDdeJEfjdjCJnKtO+ru5T0d5A9Ft1
+HrXQsW3ztH3jcySdY4nB1i6ytINSArCY53oN1Z5/ehVkfcuTHkWPz6bciaZRY7Pl
+7YTuehovozl3n1eSccUFMGRHF2W69WZ94pHKW+/Qix5X02CTfCv/wWIBsgc5GcOc
+R9ArHNc1S1VhKk1DJwj8fhqyajBU44FG5/vX/wAg+QECgYEA+5n0fu2FWYcPUMab
+5H1sLbpFrRMCOa1H0rlyXfxLDenF773n8/TuCuAeNkO99ljo4y9K8bzEhd+gfZiq
+ubkFt9b0EUm0BuIKZhOpIO6rbsasL1ugLn0QxYb4kuNqim8n3nrX+0hngaFjyGLc
+lLI2ex6T3hEw0fOzgZBJeLFgk2cCgYEAz0bEJ9sqI8x5AoLjTaNIw6AjQ9z2J+Ar
+CZhu251bxrn3V4vPqFVq3K/2DqNvA0diV4lB4l9ijf0JSpNMsDd4wMxVoAki4EV+
+hNivn80EE3LTCEupq+ErYLx4npU6+NGcpFKqo0LG1lGaaePPyIUE4/tqk/PvnLZI
+4fIfQSwInnkCgYBPYmYYXk0C0HBuphut3jzxuKIfV64OELRmsoh5Sw9LVoVTfXHg
+MVmiKNCfgoQ/ZBInDFbzKwI+0y5KMo87hjtdo/7iLYUV3uA1EOL+Nw/0Jc9SKfDC
+ekd+a3WmswZ8o1HLCqt254NxDUD0iuzaJdi2xBEXsgjbVa8/pAzqfdzg5QKBgFI/
+PVSnVYhQ+W9yqmlDMntVjsi94/scq1cqYkrInQM1BZ16f9LG2hlpxRQ367P8Xlxn
+vXnq6Xt3/XjXDCYcTNEMA6n2Fh2x1as3JDEfs89Xz79J7rCiL4k6IA6lHnpfCm31
+03nm5GlkhgYgfHGPvsoMGFVPBTRE7JSjc51zsJPZAoGBAMoU0DbQJsdvOyReahd2
+0LSyT8mcLppUZp3lujXdIvQOdxIdXZThcz8OwEXrTP2SUmR5SAqmKFpCxryxwqex
+09vLGsJShewHlyb5BFDkYu0mgdvfpVSUzGcUUd/z2wH3K7cEVhgj+jCUoHA/AbYe
+Vxgkq989MMYc4gFUhOyzIVF/
-----END PRIVATE KEY-----
diff --git a/jstests/libs/client_title.pem.digest.sha1 b/jstests/libs/client_title.pem.digest.sha1
index 9314837da5e80..cadd1783de934 100644
--- a/jstests/libs/client_title.pem.digest.sha1
+++ b/jstests/libs/client_title.pem.digest.sha1
@@ -1 +1 @@
-689F782F2CE3483FDE3CFB70E9E22800C604FF14
\ No newline at end of file
+A8354AC4225473798EE2FA5BA6822731D1617F8E
\ No newline at end of file
diff --git a/jstests/libs/client_title.pem.digest.sha256 b/jstests/libs/client_title.pem.digest.sha256
index f465f2a44ed7a..407c95b086980 100644
--- a/jstests/libs/client_title.pem.digest.sha256
+++ b/jstests/libs/client_title.pem.digest.sha256
@@ -1 +1 @@
-183BD149346634BE397BACE2B8231CCDF8A435104CAC12C6E9C1814BFACFA8BB
\ No newline at end of file
+B9D245F1A0E281F0D2ACCFAD28246374F309A303550E1AA7730D423D97B4C865
\ No newline at end of file
diff --git a/jstests/libs/client_utf8.pem b/jstests/libs/client_utf8.pem
index eb34840b07639..05d890d2893d4 100644
--- a/jstests/libs/client_utf8.pem
+++ b/jstests/libs/client_utf8.pem
@@ -3,52 +3,52 @@
#
# Client certificate with non latin-1 unicode characters.
-----BEGIN CERTIFICATE-----
-MIIDqjCCApKgAwIBAgIEd+OXqjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDqjCCApKgAwIBAgIEMzr3GDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjB4MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM4WhcNMjUwOTEwMTQyODM4WjB4MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxFTATBgNVBAsMDEtlcm5lbCBVc2VyczEVMBMG
A1UEAwwM0JrQsNC70L7Rj9C9MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
-AQEAvMqJXShCsc4p+P7CQ0KMOb4h/tkAovPsv/5tWNZGaa1aCtHrkH31vTCdz5mQ
-nRMm8tYCkn7ODHYbE7c6YMYshboPnVIdLSwGhGAmblF0N/IpGhrgHJcmnlLPVjW2
-5R/I1HLe7NtCZbO+rMG5zPJIyLqdSJiDLWX7vygsLx7PjgVFqymQKSrvtVi1v2Zx
-siyfzrT31hT7mEvWhW11JXYNZLZxLHgNSnZL9FDe/RXnAHbF2gYhmzoOxSDceefD
-vPylZXy9lqgT5puGvpE7nqXf1tcmoIsyPRTFf499E7g6Q3TcOTJQPWRytPvR8h0J
-pvUQk5b7EvrPFCNbQHfOeQgVjwIDAQABo0AwPjA8BgsrBgEEAYKOKQIBAQQtMSsw
+AQEA6t8zc4oEvXct4oub19eLtrGssh1yoydgd5CzR0PNL4yI+BVrx3y3J2y6NV9a
+XmJhWuBozl+dgEvWMbH/BRCVQVq3cTlvN8/RSm+4Nb/7GEqK7dwv5XKn893vzX8u
+MO+q3BlZnrSrhNA/PFvXJg+cAf3qCd1uZP6DQtEe7xTvWkRK3k6MGp1zRimD1Gb4
+NB78Nr+N+fNr8/1ke6yyV6rHPKLkTEX1miTIhZctAmsGc/UNU96nYdNTjhB+yWjZ
+8lLTwdAV4P9W6GBtqPvWtUmClbs5hJQ8hSl1Bc3ItdkfzupbE9cT0bi6KJsATyS0
+DSlcUq6ICER3E1YiI1sI5GSzXwIDAQABo0AwPjA8BgsrBgEEAYKOKQIBAQQtMSsw
DwwGYmFja3VwDAVhZG1pbjAYDA9yZWFkQW55RGF0YWJhc2UMBWFkbWluMA0GCSqG
-SIb3DQEBCwUAA4IBAQBu+46q9zu+N0/XeVZftDwhizAg8eSvouryfixTXjXR5FuX
-GHt+FGlEs6ZNDUIsK/wYnpocJl+YhdACP8sWRMDKXOVwnyNWuMR1wBJ5bEUYrrDW
-HIXnJa/mtPq/oZd9bVA9W/aGfEpZN5tyfmCvlIN4n0fwVtnEeMoXGR8PWTs4s35g
-NJ+SzE8JY7JIKKR8+dPeVv7qv1OlbQF6yXVx8CvPv9FuPIcboFOpWIpliLU8ROVR
-N/Aq2Zf7MCXpK+8nZnA4O6mTNaeQZDlLpu017q5XGXqhgOqyFIKWOOJbWEVN14JZ
-nkYA6KgzV9IczoBimE7Sc002KoPkFjX0UlbmX/MS
+SIb3DQEBCwUAA4IBAQA7ItpDLulGN0Yo8vzs9eBn91WamTposa4lBosW2AHm+BU8
+QPWpNGLa1iOrKF/wdbPGAiJB5TlLZfv3qCUouFZUFWigP9ndrbsU3v426iuf1ufq
+thecoD72N5BGUl5iRmmqcbmB0XYB3xoxj+KI0bg5zxypEipH5p1yGTQkEz0KVsMX
+KL1oTHbIKOa+Rd3UIePRgA8ihNsSEMzK57z3Qzpu24QNC6S8baXNzKtQFjMH29Pb
+yckGKAmmSClk2JStUFxRCyQH2VqaxSS1evmqGyzv8AP50caW9IdAzP0MTgvowhr3
+d32nfa+VEN5fpLrTQyuVLIgBi3j/E16l1UqNYa+n
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC8yoldKEKxzin4
-/sJDQow5viH+2QCi8+y//m1Y1kZprVoK0euQffW9MJ3PmZCdEyby1gKSfs4MdhsT
-tzpgxiyFug+dUh0tLAaEYCZuUXQ38ikaGuAclyaeUs9WNbblH8jUct7s20Jls76s
-wbnM8kjIup1ImIMtZfu/KCwvHs+OBUWrKZApKu+1WLW/ZnGyLJ/OtPfWFPuYS9aF
-bXUldg1ktnEseA1Kdkv0UN79FecAdsXaBiGbOg7FINx558O8/KVlfL2WqBPmm4a+
-kTuepd/W1yagizI9FMV/j30TuDpDdNw5MlA9ZHK0+9HyHQmm9RCTlvsS+s8UI1tA
-d855CBWPAgMBAAECggEAVOHTZPknE36YTIVunoMbPRMJgeXbbWiyt5gTXe/pdLYm
-V9sOSMb5Z/il26M17AmiZ2hle6xbxCqN/g1RyBUSC4YeYyN6TscOnxID9XHWCRlt
-9XjD7tbe3CxrYF2CP/5Mc4i6RlBGyCdKwa3Qdh8xLl8JPIyl3hGOYOIVC5KUy0s5
-W4aQhgh9AlqI+wv7QugKrC3ZAdcRT/FwakcpmC+C5sUN5RxkldhGuYwaBaC1z0Vh
-8xk1EvvDf7oqoJRyUHbZW8hAXY3Ei/RrQnwTCU58Y24NBCJNxF7rV9ww1SwJ6HtC
-ApKBwM6iVtJjzi1GkgAXIS96RJB73f2YNLy45C/iYQKBgQDr546tqBL7G8lIEa2r
-nl+wsDjmpQPdeRxSgaSRgmHvFMVtuSKivVTD8w/xVjzi44D6fHf7hnt9sUPR72M9
-XKUTf3rjg45jQIfbwkRJQ1hpznLu5QMZWhD2VwUJAbCnzNw+yISpBOuU4W66FA1r
-TPYhZdb1k95pJnQlWa6XU0BlNwKBgQDM35BI9xFKmoCls8uFDH3F2Wkiafj0f2PH
-ikuyT/Wzdw4cyIITN7h9GCXgsQ70t9tALgoSlWODMtkBwn5iKCxA0RcIKEfOEZLQ
-i2W6nrVEh4cNePzUa211IHKbiWmHRMTm7K+hmMQua7k0kU1QaMbr+G90MsoISQjV
-g6F2Y17+aQKBgQDXSjIaJcN8y7T8QrX+Y8DsnUawp3RJkRfZ7FrmONlrucccBdOo
-NaXAVnj8RTm3zuyMrT9Km0bkRPyiARjOjVhR3Qunyw4NYn0af8aWHhH4LeMSRop3
-fozwZCZCO/qeiQWPfqwjHExrSPkmdNpyTIBrpmdxI4vc5q0k0R3XGLEyLQKBgQCs
-DK+B5yK5USjqfyRSNpxFFACrqu0sfvLPdv983pOLRFcwqt45v1iKjUX5/Rd/Qgu5
-STCiTJyGaKQ/SxOR4QTQ5gb+jb1HfBCcXARxhajaxSoQkWNaDGGetEIrBmn99C2b
-dHdGMSHHjiW51LBl76fmMPuATMrHzHXDwUhOQMNcyQKBgBwawHWxGAGm9Yzpjg86
-p+eK+KZX/INuLOtkFBNiaGhRbycvI05B0uWe1VGmhNaO5V5lrWniMuKuMQpNXIkZ
-73kMEssOlCf/z4Q7TB/LXoLdCtFjb06ZB55if9uTglewy+260TaQHxtKlpiazI3G
-JwDm0XRKm0TavwjfFinDJ0II
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDq3zNzigS9dy3i
+i5vX14u2sayyHXKjJ2B3kLNHQ80vjIj4FWvHfLcnbLo1X1peYmFa4GjOX52AS9Yx
+sf8FEJVBWrdxOW83z9FKb7g1v/sYSort3C/lcqfz3e/Nfy4w76rcGVmetKuE0D88
+W9cmD5wB/eoJ3W5k/oNC0R7vFO9aREreTowanXNGKYPUZvg0Hvw2v43582vz/WR7
+rLJXqsc8ouRMRfWaJMiFly0CawZz9Q1T3qdh01OOEH7JaNnyUtPB0BXg/1boYG2o
++9a1SYKVuzmElDyFKXUFzci12R/O6lsT1xPRuLoomwBPJLQNKVxSrogIRHcTViIj
+WwjkZLNfAgMBAAECggEASpCNvxKEKrpZYOC9myqc+mUx1GIw8MchgxP3Npq522yc
+V1ZFn1FivCtyeltJT0mgar9vxoTodcCdwa0mry7yk4r/TfzT6D4bzavmKXpHdxDD
+Rnxu4EMkOvGsnerQeUy9S57oqs/k3AdY7AqkJTszZhZ8ZKFd2MZMWbwWPATDcD4+
+XYiqadNS7uZfNpdwh8sWWGAynNkMvm8T/Hp+XgTCb2tQd8tdLfNI3EELIUGyF7nk
+ro09BSHxPVJ/kZlRAVq/1Qo3wTlP/GcQVFgxOByg8CVuA4nAoO77iI4jDTAXZ9To
+89Xe/tD1odlAVcKy/HMYkQ92Way1nGRGMthNW3JfkQKBgQD8EMwIr7IZviZLKc2h
+XaBwmGKU4UBF3ZfCljmb1vW0rfyNIRlCZbPgfyapYFpx7hl4XQXcAwbnhZS1hgCg
+HcaEEbDLn3meC/6EIfX52CCr+UdxN6yaFec3d8CubbS/VfMfRPlLcZlSVZHojJSR
+tPSWJE+vJrf5JOIPSwnKYNcdKwKBgQDuibOIoOnD9ZoRmegSaWLJGMITiZtVPkoK
+ldxHJaSOA8nNbCR+VH/7M2AbqMwacsMK22jbSYy7LrQUOA1EKFFV0WqnzIV8WkP6
+wrELGWC8RU9PnAX5dzZGa2giMHq+n/D/gjOOljZVwH6epnKYU8BEGY0xHMSVPx7r
+8zuP0p9wnQKBgQDj9DzQzjN+3Fu9Nbzk5csmiGj+wwZhKJkKPNk0eh4SnBX9e6Vg
+SqF1sQfBvYWN6wnVtCqMYaBo2IE3+Euwgbz5yxXb0AkoSSiPTjcpD6/cVr+pP/Q0
+FQLZQs9Z75S6RHxuFu5sr/s8Xm5ppFjnvIJFFemKOLW/9v8JXY8pGpLvqQKBgDdz
+lWPGnHEscAN1dsiI7Sj4c21dSNnZIBcz3UBA1O8anFf/ssAXXqUDvWzQoZnoYcTt
+WoPXpjqBUeV9XkAg6z967kNRvRo6VaE+jy+YV6+MoTJNf+oNN3XJVCzWfkJAJiMP
+nnKgIsNue0DgSrV0iraKBhOLr3tOcB7DrW3ytZPVAoGBANevYlW0zvAjg8YudaJE
+jKRCR7yTthG8pZLxS13Zs/H9HKCrk5q0Hw9SnJ9CifV1oBJ1TVeLti41JNFQbFOT
+K5xMVAE4cg2zhd7Af690B2UJEhi8IvUeKgDHuV7QOnuXNeRwqdHv8+9nJsuT5aUD
+IMzPSf4+7qRTVVPaUANsWDOy
-----END PRIVATE KEY-----
diff --git a/jstests/libs/client_utf8.pem.digest.sha1 b/jstests/libs/client_utf8.pem.digest.sha1
index 41b06c70d103c..a44054184ab2f 100644
--- a/jstests/libs/client_utf8.pem.digest.sha1
+++ b/jstests/libs/client_utf8.pem.digest.sha1
@@ -1 +1 @@
-A70A1D8408B98911E5C98B8986581ED8CB630EDA
\ No newline at end of file
+6C41B2B5610C5C7B81AFC363D139004837066824
\ No newline at end of file
diff --git a/jstests/libs/client_utf8.pem.digest.sha256 b/jstests/libs/client_utf8.pem.digest.sha256
index 1665d1eaecc4a..8d19e36d26696 100644
--- a/jstests/libs/client_utf8.pem.digest.sha256
+++ b/jstests/libs/client_utf8.pem.digest.sha256
@@ -1 +1 @@
-DD74B0EA2C4AE0AF8E0F57E2F095B4E7236FEA6959D82F5965956725C402BBC7
\ No newline at end of file
+F102A76AF3E51980D5F5365C923B9173E2438CB539ED558B7EE7B4624739BBD1
\ No newline at end of file
diff --git a/jstests/libs/cluster_cert.pem b/jstests/libs/cluster_cert.pem
index d6cef5a0733c0..add923b572e38 100644
--- a/jstests/libs/cluster_cert.pem
+++ b/jstests/libs/cluster_cert.pem
@@ -3,51 +3,51 @@
#
# Alternate cert for use in intra-cluster communication.
-----BEGIN CERTIFICATE-----
-MIIDYTCCAkmgAwIBAgIELTFGAzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDYTCCAkmgAwIBAgIEeEZt1zANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjBxMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM4WhcNMjUwOTEwMTQyODM4WjBxMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEUMBIGA1UEAwwL
-Y2x1c3RlcnRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDheRo7
-28ofWqHTuHpzsunBG5MYcup2TsxxqbOeNSblGsHbKaP1sQqN+ZF2eVqYeRkDPWy8
-yA9iLbr8lz2dLSHdsZJUjsjggJjIKYHwPFV42Hu0yO6MYhElO0FHUBMPpw1k0ovL
-zW6VpaFiE45qJdpFEppCoqBk2QvEGkQ5mCl+BcQ2Y8oIlz40SHIrqmvbmXd8UXdW
-EV+9axIifb4oUWhpQ+ATJEGj0LL9c/PAtDw4c+fQjQ71HSB1s6EQNDlBqJIrgImZ
-pQOqsd5vMqQRpQ1ZQBAStmDtvHnqidrvsJMB5drCsF6ru4yAuQeCNqdOKipItPvJ
-3Ivf3C+nCSpkX1kVAgMBAAEwDQYJKoZIhvcNAQELBQADggEBADkg/GMf/49G8jCA
-6RphR02g0COWsI1nEaBxnvTlb1+sBVWwqYqZN9WhUxg5YTtQg9vCLC2pFG3E+EZJ
-hO/YdyOUuqR/V7QA1qucu+H3Sf8F95NCrc6dT75ibZsP1TtUgun8AWNRd+98Rrna
-ezIs+T7mE8FrvJoZX+OMOw/JpEll2sSL7Y7BtbV/EBqLCfI8NTUgJfXem+prIuHa
-/0ocxrS4cG+JArfasI3NtZWCLPXPTLBVxX/GL1xwVHE8G3GVXLxczNTaRzcKOG9P
-U82mA3oLMNmJl66FYs88uIeOLm9ub9rP6BDsRw9hh7hOJy/YYByRpMM9Yan8/k45
-c1qrwPs=
+Y2x1c3RlcnRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDBYgSW
+90TPI2bhEI/rgefJfV34FrHiqVGnUFrKPM1IId4Jkwp+uRexeHOf7GbPWlUiFB8k
+Cpt9YEa2cr52NiK1xxbBCQr58Efm1dJEtO2sR21kg4tyqaATe6uTWHjxPRaOMeMP
+Ponluixk+Fx2s2As48hBIlyrxpbbHphEThNzTa+8u5MevVsfozal1zvmDKg7Fgkn
+UCC72ujO1T8cx7sF6fS48GtygSnIC6x4N8pAF/SaMFXDOJK5qBGCYcMQ1JY53dKg
++zfW/GEiLG9hllv/1JKfEgKwsKyODoPXcS+gn9GGZs8wG/kq/8W87wlL0dw1MR7G
+U8N8JJFuNhUJ1X3FAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAIwMWBo9I3WWwF9c
+AqzlQ3o45vraQYvB8qnIoflLI1T+VDEgcUk44pwGY1/GcvirMYm31aB0GJhk+hiS
+9nxqLSEqZCNb/cNxA72z7OUTc+Sc+/IJBxY2vaEdUmqtXRb0NZWAVzO0lxOkpvqF
+yAEl+y3Pzlq1f6ogLdeBVobajkWVkGUmk/YboGd1dEYkmrInwj0dsjJBdoiVQJp6
+k2YLn1PIS8VcTCRL10x7iwMV311KQJPOTIxoF2g0XE96cCMWCUt+LA++Vn2G9rtv
+uz6P7Sj/hPmkEqQaYpd8MRTbCr/CS+v/r2NbsZU7bKb//N6UzypkUIwbrNh1bpt+
+YCQpMYs=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDheRo728ofWqHT
-uHpzsunBG5MYcup2TsxxqbOeNSblGsHbKaP1sQqN+ZF2eVqYeRkDPWy8yA9iLbr8
-lz2dLSHdsZJUjsjggJjIKYHwPFV42Hu0yO6MYhElO0FHUBMPpw1k0ovLzW6VpaFi
-E45qJdpFEppCoqBk2QvEGkQ5mCl+BcQ2Y8oIlz40SHIrqmvbmXd8UXdWEV+9axIi
-fb4oUWhpQ+ATJEGj0LL9c/PAtDw4c+fQjQ71HSB1s6EQNDlBqJIrgImZpQOqsd5v
-MqQRpQ1ZQBAStmDtvHnqidrvsJMB5drCsF6ru4yAuQeCNqdOKipItPvJ3Ivf3C+n
-CSpkX1kVAgMBAAECggEAJwjiQ86vVXJJZfAgMvucgHQXqTnr4YOO3Xa1xAQLscpK
-GXlCC30VCLNWPZ6Q0qjUbb0qoBw6nZKxNp2waw+vN0RQwxbdLBDXYn/dIQww9/Ty
-pb/LnL41na5+hKwWQLV5GiVhUm1EYeAE6ofcNgBLo32u+y5QemUhJlgty5LOTCS8
-NRdoSdr+TUPRO9qCkBNuxQfzU4Y4S56sp5eJsZ6rqME7Mjb03ZxBsZxDEeXBWoQH
-ABVFGoeEWOC5Xb1VAHNRy6Dcu5tT3sg0XgKLJr/OnWb+x7guyYcATAcVNA1dw6Mu
-SbCLIKBLhpg1/SlIYSo1umx6kguAbb5MyG3CSL2nwQKBgQD3CovK40JnnSCE79/r
-VVWiQGQPnFU2LOLQVPe4AppJdF+f1HGLpZ36gm5/x/5i2u8aD2zt+83y3CrZEPOe
-Zpyrk+zF8WBLuTfdgevcXk5iQtKj8EhSMoUJr75Nklmpe1IbJdhe5gH/cc1rqLLL
-1LM7qPy/B9qxm0CHrPYbi3WqTQKBgQDpplMRwR9nzNpvHRxxCHQk03HAdIWb6uXX
-nYFNSPNV18PljdiAjQq1o/8qRvC12FIXzcEztKiZIE01mQaxBHcGP8rPVZieoAW7
-p0+CIdShIfGAj3pSBVF65r1DvQn5+VtAm73xZVk/paWyBU3C9f/HfD72FCKO7K+i
-lyVc8v896QKBgA38HNnJb7Lp5DNiWOy7kfNzbbashO5iMBzXEobqLs3FtrwXjK35
-HC5YP+Uf7zitaCezg9kdDhWXfR14pjHrYScdYqP5/BeNTqpNdoqtFAbf/YimS5HL
-plhcCIvfow/DGWzMAamtn6NUT+quTCDZ06Om91fhG/I99bM4iV4Z0PlpAoGAYDhT
-q1MZyNhu+CVH6jCuC+BbzwLtZulUX/gIILizJR3nGajRiRcMWwM/eLygMnL0U8Mz
-FkUGzZCk2za6r7mD/rnUno/Ee0axNbdQIeoms3jUCLqNiCuSg4d0V+oIqr4K20+H
-6FpxZ6mi0+4lOO2vuQosr5BZLvy/07hk9aNIg8kCgYEAp0hCTHC6HTnF0/r2Gjev
-OttVaF015nKE0h25xi24JpUXrjddU40QPiIYgTHt80daYz01TPwz3CPnN7Wxh8yE
-z631hB9hBLr6ZY3cH7XcPPWx/ClAp9oyY+lceTRVhKJbWZcboEUO1k7HsD2Sgd+p
-C9lp6d7zepnwPkiFv0BBjyI=
+MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDBYgSW90TPI2bh
+EI/rgefJfV34FrHiqVGnUFrKPM1IId4Jkwp+uRexeHOf7GbPWlUiFB8kCpt9YEa2
+cr52NiK1xxbBCQr58Efm1dJEtO2sR21kg4tyqaATe6uTWHjxPRaOMeMPPonluixk
++Fx2s2As48hBIlyrxpbbHphEThNzTa+8u5MevVsfozal1zvmDKg7FgknUCC72ujO
+1T8cx7sF6fS48GtygSnIC6x4N8pAF/SaMFXDOJK5qBGCYcMQ1JY53dKg+zfW/GEi
+LG9hllv/1JKfEgKwsKyODoPXcS+gn9GGZs8wG/kq/8W87wlL0dw1MR7GU8N8JJFu
+NhUJ1X3FAgMBAAECggEBAJdijAVCfPNK23bE6GWMxE7PfYiA+7BcrDc2iFxZTLpz
+CMal/UlGzG15xcoj3jfkl1CeP+KPCzoS7EhXexI/PtzehcmHsrgGicMDECAJIyYC
+pHhTIg8ZCt81qRMn50WydL3L/9wnMPxNygMhCgFCWr/JcTLMG4+9XssaIEkYddGE
+Wki8ORGuE3N3KxjgQoFEP82AmhuotOc2jG2rEN3Ld41XlN1rIQ8cAxmIXJRViMj4
+q30zgNl/2KfD1Bg7C+SxaeaIWmL+9Qwq0cHw0GY6ddet3cvW5cDncZ+XKK7Uyg32
+ZGA1Te8TsJyZhu1nXUSKNMBaAZ79cExT1bYKsI2SpgECgYEA7PlUuhPSS1eg41Oq
+0GY425ww6hh+vQiB0M2iZGfIJteCzzRosKbJACYsyMTDPvtQkHP/vQG441NK4S+i
+0AqlFV0zYOP7jEKgIr40ed9HEpi/6ndivLb8Ioe/g9na2ZOrLnsr6swsZ1lbqxLm
+U/dwm3xJGUUWtu2nrZL8WRsXoDECgYEA0Oi7vxDe0iOHs6Ul76nJ0HG0bBSDJUSO
+SZpUg2ZqkyOrRvHePdahwdDCE9Hc6Aw03dUo0n5aB+bro95ETD6GS0qC64Og5wsK
+OH+o0/temGQ6Wlgz4ENUZtxjy93TDGLwUGD3rY9Rz8WI8RLzDxZ34WIWwbexS+Yr
+SR8LtvEARdUCgYEA5bpMLdu7WTjJCrf4dvEyG4vOS5KVgtH1byN6U1XczfLMp+yJ
+tP7rCo73iWZeVPczQeaCPIun3hDIHYedkYtQQGbKwRoiqPWJ4kR8AM24S8ny+uzj
+tki6IwtwWPTgWV7zaysTBxsJzOLun+jBixLsgn85Khs1Cv9XN0iwA/3kqHECgYEA
+gBiEroZwqldg6RV1qnvopGhkIfV96McdnCIGej+9T5WKe0jpZe+KZeZUaoS/OIXr
+kK4YhuE04S2GBYfPRxT7kYURu8mNSr4pOTWF1t2GRlkGssjsnjGKujue8a2FsE2m
+XxLmK8T6fDT6YB+na/Px9AAKRiQVkZ2DoyVnMHicGzUCgYEAgycmQO5q09G7aPZR
+7GyzerNlpuhqPiiH1cXIw0b1jTtTLgTm5pfsGw6Cip8MLBDbOiSyhl7wHJ7uAhUK
+zjR6lEGhV9Xr8tI+OsI7nXVuprZsBK5ujjGK0L9T6/YpJYYJ4826jmP/SvQuRdnm
+K0MRxiXWsoMefgwv5cD+jAZ3fas=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/cluster_cert.pem.digest.sha1 b/jstests/libs/cluster_cert.pem.digest.sha1
index 611b67d97c28c..0e859909e72e5 100644
--- a/jstests/libs/cluster_cert.pem.digest.sha1
+++ b/jstests/libs/cluster_cert.pem.digest.sha1
@@ -1 +1 @@
-DCC5666EE339AB3F62BEFBA3D0DB7D94121B951C
\ No newline at end of file
+4136812A5C5A442FC49C4018862F98FFE54648C5
\ No newline at end of file
diff --git a/jstests/libs/cluster_cert.pem.digest.sha256 b/jstests/libs/cluster_cert.pem.digest.sha256
index 84449a1793f2c..ee3203dfc2f42 100644
--- a/jstests/libs/cluster_cert.pem.digest.sha256
+++ b/jstests/libs/cluster_cert.pem.digest.sha256
@@ -1 +1 @@
-18CD5ACA178A2A7D8275976FA81341106BBC0F775F6DBF47035D846A50101CE6
\ No newline at end of file
+5A35ED3B01DA3990EBFE0BF2ACC3B1DA4DDB35222A0A2964FCE1345F85051058
\ No newline at end of file
diff --git a/jstests/libs/cluster_server_parameter_utils.js b/jstests/libs/cluster_server_parameter_utils.js
index 60f370b61ae22..dc4f5df8ac9c6 100644
--- a/jstests/libs/cluster_server_parameter_utils.js
+++ b/jstests/libs/cluster_server_parameter_utils.js
@@ -16,9 +16,9 @@
* when the featureFlag is disabled.
*/
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
-const kNonTestOnlyClusterParameters = {
+export const kNonTestOnlyClusterParameters = {
changeStreamOptions: {
default: {preAndPostImages: {expireAfterSeconds: 'off'}},
testValues: [
@@ -38,7 +38,7 @@ const kNonTestOnlyClusterParameters = {
},
};
-const kTestOnlyClusterParameters = {
+export const kTestOnlyClusterParameters = {
cwspTestNeedsFeatureFlagClusterWideToaster: {
default: {intData: 16},
testValues: [{intData: 17}, {intData: 18}],
@@ -58,21 +58,25 @@ const kTestOnlyClusterParameters = {
},
};
-const kAllClusterParameters =
+export const kAllClusterParameters =
Object.assign({}, kNonTestOnlyClusterParameters, kTestOnlyClusterParameters);
-const kAllClusterParameterNames = Object.keys(kAllClusterParameters);
-const kAllClusterParameterDefaults = kAllClusterParameterNames.map(
+
+export const kAllClusterParameterNames = Object.keys(kAllClusterParameters);
+
+export const kAllClusterParameterDefaults = kAllClusterParameterNames.map(
(name) => Object.assign({_id: name}, kAllClusterParameters[name].default));
-const kAllClusterParameterValues1 = kAllClusterParameterNames.map(
+
+export const kAllClusterParameterValues1 = kAllClusterParameterNames.map(
(name) => Object.assign({_id: name}, kAllClusterParameters[name].testValues[0]));
-const kAllClusterParameterValues2 = kAllClusterParameterNames.map(
+
+export const kAllClusterParameterValues2 = kAllClusterParameterNames.map(
(name) => Object.assign({_id: name}, kAllClusterParameters[name].testValues[1]));
-const kNonTestOnlyClusterParameterDefaults =
+export const kNonTestOnlyClusterParameterDefaults =
Object.keys(kNonTestOnlyClusterParameters)
.map((name) => Object.assign({_id: name}, kAllClusterParameters[name].default));
-function considerParameter(paramName, conn) {
+export function considerParameter(paramName, conn) {
// { featureFlag: 'name' } indicates that the CWSP should only be considered with the FF
// enabled. { featureFlag: '!name' } indicates that the CWSP should only be considered with the
// FF disabled.
@@ -88,7 +92,7 @@ function considerParameter(paramName, conn) {
// A dictionary of 'setParameters' that should be validated while considering the current CWSP.
function validateSetParameter(cp) {
if (cp.setParameters) {
- for ([param, value] of Object.entries(cp.setParameters)) {
+ for (let [param, value] of Object.entries(cp.setParameters)) {
const resp = conn.getDB("admin").runCommand({getParameter: 1, param: 1});
const hasParam = resp.hasOwnProperty(param) && resp[param] === value;
if (!hasParam) {
@@ -125,7 +129,7 @@ function considerParameter(paramName, conn) {
validateStandalone(cp);
}
-function tenantCommand(command, tenantId) {
+export function tenantCommand(command, tenantId) {
if (tenantId === undefined) {
return command;
} else {
@@ -134,12 +138,12 @@ function tenantCommand(command, tenantId) {
}
// Set the log level for get/setClusterParameter logging to appear.
-function setupNode(conn) {
+export function setupNode(conn) {
const adminDB = conn.getDB('admin');
adminDB.setLogLevel(2);
}
-function setupReplicaSet(rst) {
+export function setupReplicaSet(rst) {
setupNode(rst.getPrimary());
rst.getSecondaries().forEach(function(secondary) {
@@ -147,7 +151,7 @@ function setupReplicaSet(rst) {
});
}
-function setupSharded(st) {
+export function setupSharded(st) {
setupNode(st.s0);
const shards = [st.rs0, st.rs1, st.rs2];
@@ -160,7 +164,7 @@ function setupSharded(st) {
}
// Upserts config.clusterParameters document with w:majority via setClusterParameter.
-function runSetClusterParameter(conn, update, tenantId) {
+export function runSetClusterParameter(conn, update, tenantId) {
const paramName = update._id;
if (!considerParameter(paramName, conn)) {
return;
@@ -179,7 +183,7 @@ function runSetClusterParameter(conn, update, tenantId) {
// Runs getClusterParameter on a specific mongod or mongos node and returns true/false depending
// on whether the expected values were returned.
-function runGetClusterParameterNode(
+export function runGetClusterParameterNode(
conn, getClusterParameterArgs, expectedClusterParameters, tenantId) {
const adminDB = conn.getDB('admin');
@@ -229,7 +233,7 @@ function runGetClusterParameterNode(
// Runs getClusterParameter on each replica set node and asserts that the response matches the
// expected parameter objects on at least a majority of nodes.
-function runGetClusterParameterReplicaSet(
+export function runGetClusterParameterReplicaSet(
rst, getClusterParameterArgs, expectedClusterParameters, tenantId) {
let numMatches = 0;
const numTotalNodes = rst.getSecondaries().length + 1;
@@ -250,7 +254,7 @@ function runGetClusterParameterReplicaSet(
// Runs getClusterParameter on mongos, each mongod in each shard replica set, and each mongod in
// the config server replica set.
-function runGetClusterParameterSharded(
+export function runGetClusterParameterSharded(
st, getClusterParameterArgs, expectedClusterParameters, tenantId) {
assert(runGetClusterParameterNode(
st.s0, getClusterParameterArgs, expectedClusterParameters, tenantId));
@@ -265,7 +269,7 @@ function runGetClusterParameterSharded(
}
// Tests valid usages of set/getClusterParameter and verifies that the expected values are returned.
-function testValidClusterParameterCommands(conn) {
+export function testValidClusterParameterCommands(conn) {
if (conn instanceof ReplSetTest) {
// Run getClusterParameter in list format and '*' and ensure it returns all default values
// on all nodes in the replica set.
@@ -342,12 +346,12 @@ function testValidClusterParameterCommands(conn) {
}
}
-const tenantId1 = ObjectId();
-const tenantId2 = ObjectId();
+export const tenantId1 = ObjectId();
+export const tenantId2 = ObjectId();
// Tests valid usages of set/getClusterParameter on a serverless replica set and verifies that the
// expected values are returned.
-function testValidServerlessClusterParameterCommands(conn) {
+export function testValidServerlessClusterParameterCommands(conn) {
// TODO SERVER-69663 Add serverless sharded cluster tests once supported.
assert(conn instanceof ReplSetTest);
assert(
@@ -413,7 +417,7 @@ function testValidServerlessClusterParameterCommands(conn) {
}
// Assert that explicitly getting a disabled cluster server parameter fails on a node.
-function testExplicitDisabledGetClusterParameter(conn, tenantId) {
+export function testExplicitDisabledGetClusterParameter(conn, tenantId) {
const adminDB = conn.getDB('admin');
assert.commandFailedWithCode(adminDB.runCommand(tenantCommand(
{getClusterParameter: "testIntClusterParameter"}, tenantId)),
@@ -426,7 +430,7 @@ function testExplicitDisabledGetClusterParameter(conn, tenantId) {
// Tests that disabled cluster server parameters return errors or are filtered out as appropriate
// by get/setClusterParameter.
-function testDisabledClusterParameters(conn, tenantId) {
+export function testDisabledClusterParameters(conn, tenantId) {
if (conn instanceof ReplSetTest) {
// Assert that explicitly setting a disabled cluster server parameter fails.
const adminDB = conn.getPrimary().getDB('admin');
@@ -495,7 +499,7 @@ function testDisabledClusterParameters(conn, tenantId) {
}
// Tests that invalid uses of getClusterParameter fails on a given node.
-function testInvalidGetClusterParameter(conn, tenantId) {
+export function testInvalidGetClusterParameter(conn, tenantId) {
const adminDB = conn.getDB('admin');
// Assert that specifying a nonexistent parameter returns an error.
assert.commandFailedWithCode(
@@ -514,7 +518,7 @@ function testInvalidGetClusterParameter(conn, tenantId) {
}
// Tests that invalid uses of set/getClusterParameter fail with the appropriate errors.
-function testInvalidClusterParameterCommands(conn, tenantId) {
+export function testInvalidClusterParameterCommands(conn, tenantId) {
if (conn instanceof ReplSetTest) {
const adminDB = conn.getPrimary().getDB('admin');
@@ -539,6 +543,13 @@ function testInvalidClusterParameterCommands(conn, tenantId) {
// Assert that invalid uses of getClusterParameter fail on secondaries.
testInvalidGetClusterParameter(secondary, tenantId);
});
+
+ // Assert that invalid direct writes to _config.clusterParameters fail.
+ assert.commandFailed(conn.getPrimary().getDB("config").clusterParameters.insert({
+ _id: 'testIntClusterParameter',
+ foo: 'bar',
+ clusterParameterTime: {"$timestamp": {t: 0, i: 0}}
+ }));
} else if (conn instanceof ShardingTest) {
const adminDB = conn.s0.getDB('admin');
@@ -594,6 +605,19 @@ function testInvalidClusterParameterCommands(conn, tenantId) {
// Assert that invalid forms of getClusterParameter fail on configsvr secondaries.
testInvalidGetClusterParameter(secondary, tenantId);
});
+ // Assert that invalid direct writes to _config.clusterParameters fail.
+ assert.commandFailed(configRS.getPrimary().getDB("config").clusterParameters.insert({
+ _id: 'testIntClusterParameter',
+ foo: 'bar',
+ clusterParameterTime: {"$timestamp": {t: 0, i: 0}}
+ }));
+ shards.forEach(function(shard) {
+ assert.commandFailed(shard.getPrimary().getDB("config").clusterParameters.insert({
+ _id: 'testIntClusterParameter',
+ foo: 'bar',
+ clusterParameterTime: {"$timestamp": {t: 0, i: 0}}
+ }));
+ });
} else { // Standalone
const adminDB = conn.getDB('admin');
@@ -607,5 +631,12 @@ function testInvalidClusterParameterCommands(conn, tenantId) {
// Assert that running setClusterParameter with a scalar value fails.
assert.commandFailed(adminDB.runCommand(
tenantCommand({setClusterParameter: {testIntClusterParameter: 5}}, tenantId)));
+
+ // Assert that invalid direct writes to _config.clusterParameters fail.
+ assert.commandFailed(conn.getDB("config").clusterParameters.insert({
+ _id: 'testIntClusterParameter',
+ foo: 'bar',
+ clusterParameterTime: {"$timestamp": {t: 0, i: 0}}
+ }));
}
}
diff --git a/jstests/libs/cluster_title_foo.pem b/jstests/libs/cluster_title_foo.pem
index 51c5a1b0e118c..c304bb2e89955 100644
--- a/jstests/libs/cluster_title_foo.pem
+++ b/jstests/libs/cluster_title_foo.pem
@@ -3,52 +3,52 @@
#
# Alternate certificate for intracluster auth including the title attribute set to foo.
-----BEGIN CERTIFICATE-----
-MIIDjzCCAnegAwIBAgIEWd0RDTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDjzCCAnegAwIBAgIEfAUPhzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjMwMzIyMDIzODIyWhcNMjUwNjIzMDIzODIyWjB/MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQyWhcNMjUwOTEwMTQyODQyWjB/MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEUMBIGA1UEAwwL
Y2x1c3RlcnRlc3QxDDAKBgNVBAwMA2ZvbzCCASIwDQYJKoZIhvcNAQEBBQADggEP
-ADCCAQoCggEBAKtYXLqpGhTggA2fItGZDqGwmPmUWpMpazBZ1vMxyvWeLQvso9Pk
-Ubz+zXT0MP+XtjteoqUwNcfRViSiv2wiIttBD3VlGH3dGJXSnQaMMjE1MORkkjHJ
-qeSZZA75QEpfyRhx7Tc+JEIwQx1Ptrrt1k9rQv58x1N8zN27Eqsqw3f9dq4XjpCs
-XRBcOOSjVyHRKli5j1wxFLDNxBtr5+i5LfmWOgPY/KSQtE0cRqFXTxajHuMaRUtl
-z9QMKRKc2uN3E7fA1Fa8IboT4mhG6mY9xO2rMf0cV4ZuMa3LimwG4KnTnii8cz8g
-fXPDENvdI4/Wm6YuUlQRlfu6v77Mb0UEfW8CAwEAAaMeMBwwGgYDVR0RBBMwEYIJ
-bG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQCdZZaJZN0X1htNTL1I
-/ENBqZ5NYXyQi24yMJbRz+hVTFaR0gaecAG5A388YbcjmO1yLZcpzI4oHDSInc0Z
-1kS8Zsriqfkh4ZQsWeHV9LImclecpYK2l0VB6YOpTOS2f75+PEaRSEYiYWEwERrk
-q5IVodd59c5Mn8GUUrJVlVpNiwX1w0J9+qiUtmuQqrORpt6hbV1DGwXrMQgpprys
-tZiEOxRUEyGTTtMoxktsGbT0o6Z+YAQRl90UVB7rPCpzwuJECFi0JXH28cIfncnr
-8HVnEhxsPg4HHQmb5Ykq/gxNbAWSQAey3fP2NKosGnRKDJbd1ivyvvQNWya3DLIW
-dbnN
+ADCCAQoCggEBAMRFDgHwS8ElNPJ+vzGP13Z6gu/jXcKuoL88uCxIamXPDKJMuGPu
+pWy2YAZk3mYCFzEky4XFu8WMQsKUqFhvhQc2/YnV7bEQ0IBlRuAEoTJG3lP3TnhL
+dxvNpkol0A35wK9QHfzwAFT4yBFM7evZeC46j55wEVSAl5CYrioh1v5Og/O4qixt
+OhzEh7/kzB4pAfbwrL0NxqMSkwxWAJO2tlIMD0EgODRc/a6rFjZ6fhYwx1YehBRZ
+mYwF777XxiyYogwxqfjaIIbmK9g93rcJA5xmKOlhlhnI+S2JmNQDp+60S4C4ZMGO
+9Th03H/eGHEcC9Nn6ZXnS9OiARqcb8R2U8sCAwEAAaMeMBwwGgYDVR0RBBMwEYIJ
+bG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQABsEvdQJ4UdLxUcy7o
+gNoDYmddQrMwgkLoE0KzkWS471JcxAxY+9x+EA8PP5+7G3tR39UcDPus+dnLsm9Q
+Y7cq5/SEV3LuUXlBtxlEfnOflikOkFyvyYfEikEhyqj/cLMyOgUGVZzQW0DBwu5l
+lZvEatST8Ag3+M/kY124yY5Z8COnPvOtVTD0qYr7eFHtVPTmpqnZIvJzjBKQqmYd
+wv4suZNbEYBetewUJ5Ko8FUwlna5iuMsE92ME7WpGT3rRr6PHQ+jJfAt0WFMN6RF
+Bj0IHe7bq6IEBKo9iiV0EfX0zCSrWg94sQa22KSAGsa2IgNY4Xicr/YiZJMMNDBx
+NbPQ
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCrWFy6qRoU4IAN
-nyLRmQ6hsJj5lFqTKWswWdbzMcr1ni0L7KPT5FG8/s109DD/l7Y7XqKlMDXH0VYk
-or9sIiLbQQ91ZRh93RiV0p0GjDIxNTDkZJIxyankmWQO+UBKX8kYce03PiRCMEMd
-T7a67dZPa0L+fMdTfMzduxKrKsN3/XauF46QrF0QXDjko1ch0SpYuY9cMRSwzcQb
-a+fouS35ljoD2PykkLRNHEahV08Wox7jGkVLZc/UDCkSnNrjdxO3wNRWvCG6E+Jo
-RupmPcTtqzH9HFeGbjGty4psBuCp054ovHM/IH1zwxDb3SOP1pumLlJUEZX7ur++
-zG9FBH1vAgMBAAECggEAIQ63NTwS0BxQGFCvgwiojgFoQh6hKus+xuFOWzUsFx8h
-Sb+qC+Ns8a2nLf0+xtEaU3H6pywZ9CcrG35auB4N44c12Exc4Uuaxq0Ppoe910iP
-2kCdBAYIRRZi+5CTGsZIIfM49QOEM1DkYe9TLdVdF412K2sfebgGPnEtNODXPXrU
-P0iLqxXRJmiWMZoxzbxNATMS8LkUG4gjfDeuGJZD1QFoun6hbCT4W6B2CgVnSpM1
-8Njys39V55wjAsfaKm4gpzeNRj5V0iw/G++G43uCVUQntTR/kzMABsfYFIWmfuOq
-E50VteYwzlxskQZxqAPcw/7QmZNCANIfEEWFw3hmAQKBgQDgCiYlquODGUTcDydF
-jzmZ3nnpacvBQ9KVO8IwpOg0v63EGXokvhVBigUszYTnqdlnihTJxkccXeCr21G7
-pL1tqq8qKQga46MH1B4DF7xYftwwYewEzXYIfYfMkJaxPw3Q9xrWU0y2pbHUY7zg
-0odpV5IFkhyRpig3vdS15gqjQQKBgQDDydQx1XniYh72lX89Od5fXnfubzJJiG5J
-GzSP0Z4GMusEX6cV4VTjZydDunv22nmHUj2yVtXIyFST1VJ5A2/OmSwjN8Dm/91E
-/fTaSa3Eh/H5EzUV6EtuZXnIdYWBM/tQfQwViA2gph2mIMLUD7kxVo5G6y6wL+kw
-kgDqWRnkrwKBgQC0v1thXkoo9VT5mPwdAVz+R1/hsSniZR5aqZiUeCaij9XX9Jn3
-VKd/daORLsm/wOcVwm/dDatHNnHRFKMPGOx+soqZH/ta/jYEVdxUsGySlN595jJs
-+Xn1hZjur+PzYaR65zDuosusO2eJq2GxnAgFM9IpzmRgGUYvGmamzc3dQQKBgHrB
-2iTgx4oUoXtUIrI9zVqYfbPmzm3id9uojh06fc0/MbHNU5LZdIMcUzcY/s65Dwe0
-nfBql6JLURRb5VjwubKcwVrXg0CS3qZ6YIJZPfWCk0nrLBavTlRKlcAFR47KC+Hc
-da4uXvUCEobt9ZpGvYPc1FpM7ToU4C3O7XoCIcULAoGAD7W2C2tiHepHUlbLCiEt
-fHoyoWVc1v1xPRdw/lNHVkopHyxB7Zg8nf2ei9kv+6ECdqmNk6qiYVtFMd+gxK3e
-G5sgEZ2GazACraR9snz+iBOyYm+CoKJd1YzeyuFIs3hdq0++QQAm9XDaTu6C8HEM
-bkhlGRJcQyaN32bPtRXkymY=
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDERQ4B8EvBJTTy
+fr8xj9d2eoLv413CrqC/PLgsSGplzwyiTLhj7qVstmAGZN5mAhcxJMuFxbvFjELC
+lKhYb4UHNv2J1e2xENCAZUbgBKEyRt5T9054S3cbzaZKJdAN+cCvUB388ABU+MgR
+TO3r2XguOo+ecBFUgJeQmK4qIdb+ToPzuKosbTocxIe/5MweKQH28Ky9DcajEpMM
+VgCTtrZSDA9BIDg0XP2uqxY2en4WMMdWHoQUWZmMBe++18YsmKIMMan42iCG5ivY
+Pd63CQOcZijpYZYZyPktiZjUA6futEuAuGTBjvU4dNx/3hhxHAvTZ+mV50vTogEa
+nG/EdlPLAgMBAAECggEAWhMLC665RYoK06OBoYBPNQuine9t71PvZ/S43XHaiqDM
+PowRL6OtfcfxJ94RByW8eQxW7yzBMTdeyCgrXnLHzXjaQPDSJin+Vn6kWVkmlYla
+rZRZCfIwVq05vNu/sTpGyO5u5M9Mh7KpNu9kXZIvip/Lm7345hwh8vVvMNzY61pU
+A3suFgpZ24XmQO27/mLK+H9uG28uGCg/3lIu+b70Z+lIQKfAN1Li05tWptFJV6P+
+oQZ9B6wtXKCpPsIUNZcMbet5zM3ypxm4XkchihTmB1IQoMW3VKDxf4+NRWmjivmy
+We6rEcZi1Xa3M65i2Z0wc4SnAy3txUDaHUj1SvXt+QKBgQD9OQmji4ks0Y/kxb72
+51GqncR/Z/0QKP/3qvseUw4V5D1K5WdHdKayNhVojXkV7SqiDxDBdMXe5wURXNj9
+n7nle5K9l5lS0W7eezmja5QQbLuLO1WpZ1eb+jbzbtW8IBtABZptJq7Hmq/sHKSI
+8RFwpX5G20XRhbfBBUYvovz0XwKBgQDGbBzNXq0eoft3hNnghDZgn5WuKc610Y8w
+esuf2uVdBOOk8WNtNjMbIU515mzHJilJiUwzNkEQsoT5/76Qg3IWWCgJi99PICeC
+86GtvQfH2jOrGGllmYtdlnSozwMQ7/pUD8DEM0l6JPPbb/ywvM36cUEwVgi0m0Mb
+bFc5cy64FQKBgGewd3X0qVMk7Nss7imERziqKdBR8Joxb78m0HV2ZQopz79feI8W
+ATxwUQvjAnYsC40Yxi+xdWT1DGozrtcMFL8XftsLvMjg4ZlQtCVq49Jl89XrkgQw
+QPup1d6QwAysyPvKT5XqhR9PBEKW5/j7XbzWx4KUP16wdrIfSsIu543ZAoGAOi3Z
+BB6OhDzajuDVQY+CojIooTiA867OXVij++si9XNJjEN687rAoWPSrZ8ypfH6iSVI
+wntV+J9ffi4OojDocsTGeIYapAi5jRwCe/7BGUhRfglaXf/3bSmAaz7Hl1/F9n/n
+9Z9UHAxZtC3R2cCCZLxwcMvJIauksZXCvYWYXUECgYBct0htlUnnI/9fU2VjTQIg
++1+16hgbERvLt6hdfiV3UQhlFjNK+sU800C4e6/wETJJwYHh50SzLzO/Jnd4XdT2
+CI6VfkOQTLd+lzGYHiAKDEAEbs5r5xNAPuRfVyYzJWn/1vpeEof0NkFNEDeB23h3
+JVHMGEFxjKDjiq6NPSQHWw==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/cluster_title_foo.pem.digest.sha1 b/jstests/libs/cluster_title_foo.pem.digest.sha1
index 2e6c630d0f444..05925e5d1890e 100644
--- a/jstests/libs/cluster_title_foo.pem.digest.sha1
+++ b/jstests/libs/cluster_title_foo.pem.digest.sha1
@@ -1 +1 @@
-AAA79606BF68AE2AFA2A0F37F4DCD09FFCFD8295
\ No newline at end of file
+E4F45DC0E7557AECD0C092C0E3825AC18B2F3EFE
\ No newline at end of file
diff --git a/jstests/libs/cluster_title_foo.pem.digest.sha256 b/jstests/libs/cluster_title_foo.pem.digest.sha256
index 21bcc294a5239..0fb8281c5f66d 100644
--- a/jstests/libs/cluster_title_foo.pem.digest.sha256
+++ b/jstests/libs/cluster_title_foo.pem.digest.sha256
@@ -1 +1 @@
-63EF60AFA384EAE126790C8CE5EE438F5956C77378D8997AD1644DBCC310F3DB
\ No newline at end of file
+0E1A9DBF21569470B698A2FEC88766FA73A6F9C37493E456C2E1E491EFAB0EBB
\ No newline at end of file
diff --git a/jstests/libs/cluster_title_foo_no_o_ou_dc.pem b/jstests/libs/cluster_title_foo_no_o_ou_dc.pem
index b3dff850c96ee..a2ae24b730191 100644
--- a/jstests/libs/cluster_title_foo_no_o_ou_dc.pem
+++ b/jstests/libs/cluster_title_foo_no_o_ou_dc.pem
@@ -3,51 +3,51 @@
#
# Alternate certificate for intracluster auth including the title attribute set to foo without O, OU, or DC.
-----BEGIN CERTIFICATE-----
-MIIDbDCCAlSgAwIBAgIER0TcWzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDbDCCAlSgAwIBAgIEMjcc+TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjMwMzIyMDIzODUyWhcNMjUwNjIzMDIzODUyWjBcMRQwEgYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQyWhcNMjUwOTEwMTQyODQyWjBcMRQwEgYD
VQQDDAtjbHVzdGVydGVzdDEMMAoGA1UEDAwDZm9vMQswCQYDVQQGEwJVUzERMA8G
A1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkwggEiMA0GCSqG
-SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCwpF7T1FoPnfAHotkAv5NRotQekIebAqHW
-ohdeQiqmJoIMJ58qZOaTaNm+HMRiPo6/PYuKqup7w9nkbBO6xRK6+N8nn3IsrOVl
-MOuERahyCcjEBRStZL/QiDSOK7FzBwLsnx/wSgNWisOzi840h0+OLmtpEK4kjxgg
-sbH5GFEObfWX4OgHtjdf4MTn/EZkyb643MQT6aD8/qQ0/Ai0ptKCbuCfycondK6U
-Tzps9vA61gXy/KwPhYfs9BVeWQAP7XHZvv7Lqgg5yciEC+qBwR3/pCYUhNqECndj
-VY0Uffp/uH5snBRNfS1+/p9jIo+t0nq3UXVjfz+Fl1Uwndp8wtaNAgMBAAGjHjAc
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDcjlSKJhnubIEYuoVuJ1A7Kn2xOVZvp3oB
+IAEScUY46vKoJOi4Hn+r3KXDejiGk8/s50DzObn9TjtNvJlrg4UCdiAcUZ6LCwDp
+2mfNy8ysYVRJi1sRGHGcfZChpadMDXCvuPj+V/m/l1RO0Ic/OPEG+yvZ8b1hCozN
+jhNePRlyaH6Tq7ZJWmmkyayeLkQ5PO6DY7CV2WEtpUs00IaUraMNnhIWOZFsBY7p
+hDUhFF7Y/PBjKozgi/WOwACsNvyGo9BZEq8YRj4zmSOY+z1lYtcw4SK7NyAooF5F
+szBY50arOCuz/q/oSb/yv4pFRQoMheV58tVgl7MIYmGDVxOna/afAgMBAAGjHjAc
MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
-E4q9YQX7PJ3IwibFNDpmwnb/mDHXQyhYdJsh7eRmdrF60TEMiXdYV+NMpVf/j1qV
-bXiV7TskcNkqIK+88wbgDeR0gen+MhAyHCSe5B7QwRsuQ+8elsN84urmu9fddSKw
-XycjivcqpqTGSyndWy5FAIfJ2SepZswgUofKcYOju36y6Ai5UBCQA1lNTwQHpQ8L
-nZbf/mcqtQ0Op9y+UaT8r+L/ju9rNTVw96fDq4oJNXHZQgFUKZrv73RsJJaj8v5X
-w0rYEQn0i3hIlap9clp4dXqFeqwrRxa5nI838p6DvjyMzBagMC6RVEHHI/JHAfzF
-yy9y0ma7HQ32Lg5XspPrGw==
+e+CY9ogOd42JptjeYltsTitqpuSRtp8J8/zes/yxeG0DEFhyqoWhlO3zDTM/p3zS
+uYYENre8dx5LBzTED/Bqz0/nV2Gn/vx3x3QWXDsG1yfNJEuVPexhTgqOKN2Mg88E
+vYyXIV+P7/8/pJQnp7tgUsG+FYLq7AkGrgoIX0e8KbJ5aXtTIvTKJ9VCAmsdsIZG
+Aw1x7AJUCmp16bvSX7CRkKVZGgFR1jTzDQENgaJChscprI+foohQ/JzHrXSxMO6k
+4H0BoGPPWXObYb2FjmAiK/a2wKJHk9E+TM5rz0Esmo96WPA/HMVupP8+8l/ypQu8
+TrSMtoHM7xBDZP/9Ng72MA==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCwpF7T1FoPnfAH
-otkAv5NRotQekIebAqHWohdeQiqmJoIMJ58qZOaTaNm+HMRiPo6/PYuKqup7w9nk
-bBO6xRK6+N8nn3IsrOVlMOuERahyCcjEBRStZL/QiDSOK7FzBwLsnx/wSgNWisOz
-i840h0+OLmtpEK4kjxggsbH5GFEObfWX4OgHtjdf4MTn/EZkyb643MQT6aD8/qQ0
-/Ai0ptKCbuCfycondK6UTzps9vA61gXy/KwPhYfs9BVeWQAP7XHZvv7Lqgg5yciE
-C+qBwR3/pCYUhNqECndjVY0Uffp/uH5snBRNfS1+/p9jIo+t0nq3UXVjfz+Fl1Uw
-ndp8wtaNAgMBAAECggEABZWsydW04zmDFTq40aU86x/SxQScxPHYXAjT5E8DOi2N
-fwThq111TMPL3o7aRqDjsngnqUKuFyuh/+7K0OTaKr8jjwUjfvYYapKZX500LibR
-CiF+/dxplBY6UyRef9yA4ypEwDwWzu2kMlEBO/frM/uTucalOtKrWJ1FmzKBnYse
-8H9zLyKbc96xk6IiFYlBqe6O6JT6mZtBHwz59zVmuJ7eP0V8Se8ZTA1MEE3P+ORR
-/9xLURQc0hvfDFwSnM/gKAuwB3tpnJsEUmRCX0WBBCEiEJ+FaQ5yAihmfRv9AH8c
-dFR/7XuKEMN5jetR4khjB2eBY26SXRzTQ8qE9fujoQKBgQDonTDu6EAa0yoySjQ+
-q3KW2Ir4Egqw3kJfBQ3ZBjtsRvbBsl0S0rEfq2EfgusKvIz9sVDLEGhrOnn8zhKM
-CWkaikZORwniRtGUpMdsbw7UfUHaSDi/12kqD7vKXs3bJWrQsRVl3yHMYaDHWAUF
-L9q9rvD7AD12bFMH8cBnGjmuFwKBgQDCZp+0G8fUlVgACwmMNnToOT/mzQEjsUlG
-4ReS/o889pPvtpm+Ul5XK1Pl1gvcwfSo2hkzXBht95Sj5t7L4qBkK2naoN1LgbfX
-R/fLuMQLCYgUOs3UbOUfyOy1LfgEHINuDHVaK7RkiWhuHE3/a+VKZvOnffL8Copu
-xo1LyUHK+wKBgD8Rh5fu/pqHUHSMK/gl8g62LY+vDJkB2gr7StLh3rCv2O2Rl6yn
-1YBZrh6mF2Y00yFhtx8nlrgkBbkmgl7XmliozwEgP6zLOL3No4hh4Cp6v6UYWdKh
-7BCMbYUkCTp2vaxRpxSU2AwbGEWUNuA+JlexnALiAMgf/K81u834jVUHAoGBAIBP
-K+m8zFBLoiGlJ1AcQV1lLAAyHyZnxW2688xZqEEcntgBNcigpRPzzRROCtZSTiGE
-kk2L47PxTXJA15zKoAJ9hQiAVI+ZtrWpEqyr7vk5+U8g4OnsVe58t39+L8zG5Ril
-sG8rmY0iBINouzJzDIvnF7rdLpuceXJUKr5yv7IxAoGBALxfb5KmvbdmBe43a5zo
-J+Ig8oURUXbaWPb+8rpp+GaK3Hqf0Asjqlq2Fulz6TlwtkoRPl1yyebkNqz23p+T
-0K52WJWnpxmXi5dRqDTJie/E8Tvm8ff/Xey04jDdS+J56WAAnC0P5O+Lq9BD6iNG
-U3G/2LmJ+zn2NPeSxPyW3PSf
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDcjlSKJhnubIEY
+uoVuJ1A7Kn2xOVZvp3oBIAEScUY46vKoJOi4Hn+r3KXDejiGk8/s50DzObn9TjtN
+vJlrg4UCdiAcUZ6LCwDp2mfNy8ysYVRJi1sRGHGcfZChpadMDXCvuPj+V/m/l1RO
+0Ic/OPEG+yvZ8b1hCozNjhNePRlyaH6Tq7ZJWmmkyayeLkQ5PO6DY7CV2WEtpUs0
+0IaUraMNnhIWOZFsBY7phDUhFF7Y/PBjKozgi/WOwACsNvyGo9BZEq8YRj4zmSOY
++z1lYtcw4SK7NyAooF5FszBY50arOCuz/q/oSb/yv4pFRQoMheV58tVgl7MIYmGD
+VxOna/afAgMBAAECggEBAMw/VpTU8gC1JNxUpwo6h/cyw1Gi6qPdsYNnMvb0RXai
+RXNnMsiHHe53GF8tSTl/mvcltSVJWS72Cr9Tn7RMCJod8GCpSSw0VXU7GBQh3nno
+bFjrH7t2KogkVBMOSB6K98cTgipwKE3AA4g1Xnoy7ipr4dEkKB+82GXnY5JK/MzP
+tNoRD4YLWKbI60mgK1Po7DkeiAPub4dFwzpmdvfpHpB/vuuL7R+pieg9TeGAnrLg
+tjk8xsVTszxAlBlbw81y1IT3FpKhvmxbfNPDWLNRm19gxqPoPQF+QVQF8XxFQ9jC
+dmddjPpmZgkd/R3OPN7SLfN/fblXStTtHFyk0Ge3qLECgYEA/5ssCDQHzrf3ZTwc
+vQMZfxW8hI/0WFyTuT1+z/UJ5SA+WE1wQUu8yZD3brQjq3o0cSqdCY6XIC1EgH29
+yfp9bFvpB/mMytW1PPy552dcx9N9ZjtbiUEtvhllfdHzN4EHjvtCaEPWzuXUMRir
+meQ9jZQSqkbMamyUUyft6Rg39bcCgYEA3OVVBjLe9YmZ4wyhENFeAuR8bN/Kb/z1
++SxwWh14EwH4NCD1LaQIkKEQ/qqH2Lk5yB9R91eqambepTiIvYyoXeyWV51MqnnE
+mPzxBpi7wWblg6DX/vt6dZyDDupAnwsvhfLqbscrs1IfBe1IzE4y4wutIf2Kj4BI
+OS4dwUt0xlkCgYB7T+AJQRi3KNdodnyizxkAz3q4NT67VkZKKpnAN8YDTO/m580N
+Iz27vH0yYiOHOZiNM/K4xpqwAka2+nKSO49AtIKSv1imDj22Y3JIafw/xw8LP/2k
+FNa9jEEDV0NP9qav2xWpeXjrLcOVwAIhZxQu9k2e6jL58NIJ4AyN1IpCtwKBgEhn
+HuRGPlBKxtbl59EkXuUh0Sq6e5cTWehPSZAwApBwX9NuTg6kqm4FbRWb/aTqqWbP
+5UTmRiRUDXwoOKGwfeszRd/33g4ulWLz3WilHT16JZZsXP/lm7D7GPYFkCLRPsVv
+5qlivQ8sxqBhsy/MHd/PjvMKMKVyjbm4ROZ7fg4ZAoGBAP1oDVm7TkG9c5WQjhGi
+mNLOICxHJgwbKvBohyNNSks/ghla8CzVaZrwuNNUhU6mD2vE254+GaFaIfT0pFVJ
+cGO88GVJENIoQ28E4nvcmlyAAb5tep0TqWvAoYkMH6VZ2pjMOtpqt3HuvZKg1Znf
+PEP+srnIlrQ1OgN7lpisW0e8
-----END PRIVATE KEY-----
diff --git a/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha1 b/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha1
index 2550111023592..2b7e1e0bb1544 100644
--- a/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha1
+++ b/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha1
@@ -1 +1 @@
-ECA9EA58F05E2C92503D0F0B776BA5264A7D9D4B
\ No newline at end of file
+49FC3CB680E39E396B89E69712F90AE3881D5A12
\ No newline at end of file
diff --git a/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha256 b/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha256
index 87c6d2980e612..ad839f370fced 100644
--- a/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha256
+++ b/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha256
@@ -1 +1 @@
-D2AAD57CB4C330806DA153860BD0E908E3CFE4C41061986C3F27DBC1DD80B2D2
\ No newline at end of file
+20B576CDF1772A6CF29AE384ED6E0E2CF60D98A2B0DFE4F13B81054E118F242A
\ No newline at end of file
diff --git a/jstests/libs/clustered_collections/clustered_capped_utils.js b/jstests/libs/clustered_collections/clustered_capped_utils.js
index 5045fa3d07e40..0ede4d5464408 100644
--- a/jstests/libs/clustered_collections/clustered_capped_utils.js
+++ b/jstests/libs/clustered_collections/clustered_capped_utils.js
@@ -1,6 +1,6 @@
-load("jstests/libs/ttl_util.js");
+import {TTLUtil} from "jstests/libs/ttl_util.js";
-var ClusteredCappedUtils = class {
+export var ClusteredCappedUtils = class {
// Validate TTL-based deletion on a clustered, capped collection.
static testClusteredCappedCollectionWithTTL(db, collName, clusterKeyField) {
jsTest.log("Validating TTL operation on capped clustered collection");
@@ -308,12 +308,7 @@ var ClusteredCappedUtils = class {
{getParameter: 1, "ttlMonitorBatchDeletes": 1}))["ttlMonitorBatchDeletes"];
const ns = db.getName() + "." + collName;
- const featureFlagBatchMultiDeletes = assert.commandWorked(db.adminCommand({
- getParameter: 1,
- "featureFlagBatchMultiDeletes": 1
- }))["featureFlagBatchMultiDeletes"]["value"];
-
- if (featureFlagBatchMultiDeletes && isBatched) {
+ if (isBatched) {
const ops =
db.getSiblingDB("local")
.oplog.rs
diff --git a/jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js b/jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js
index f9495c913c5c8..fbfde4f31370d 100644
--- a/jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js
+++ b/jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js
@@ -1,10 +1,10 @@
/**
* Validate bounded collection scans on a clustered collection.
*/
+import {getPlanStage} from "jstests/libs/analyze_plan.js";
-const testClusteredCollectionBoundedScan = function(coll, clusterKey) {
+export const testClusteredCollectionBoundedScan = function(coll, clusterKey) {
"use strict";
- load("jstests/libs/analyze_plan.js");
load("jstests/libs/collection_drop_recreate.js");
const batchSize = 100;
@@ -23,6 +23,27 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) {
assert.commandWorked(bulk.execute());
assert.eq(coll.find().itcount(), batchSize);
}
+
+ // Checks that the number of docs examined matches the expected number. There are separate
+ // expected args for Classic vs SBE because in Classic there is an extra cursor->next() call
+ // beyond the end of the range if EOF has not been hit, but in SBE there is not. This function
+ // also handles that this stat is in different places for the two engines:
+ // Classic: executionStats.executionStages.docsExamined
+ // SBE: executionStats.totalDocsExamined
+ function assertDocsExamined(executionStats, expectedClassic, expectedSbe) {
+ let sbe = false;
+ let docsExamined = executionStats.executionStages.docsExamined;
+ if (docsExamined == undefined) {
+ sbe = true;
+ docsExamined = executionStats.totalDocsExamined;
+ }
+ if (sbe) {
+ assert.eq(expectedSbe, docsExamined);
+ } else {
+ assert.eq(expectedClassic, docsExamined);
+ }
+ }
+
function testEq() {
initAndPopulate(coll, clusterKey);
@@ -36,11 +57,12 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) {
assert.eq(5, getPlanStage(expl, "CLUSTERED_IXSCAN").maxRecord);
assert.eq(1, expl.executionStats.executionStages.nReturned);
- // Expect nReturned + 1 documents examined by design - additional cursor 'next' beyond
- // the range.
- assert.eq(2, expl.executionStats.executionStages.docsExamined);
+ // In Classic, expect nReturned + 1 documents examined by design - additional cursor 'next'
+ // beyond the range. In SBE, expect nReturned as it does not examine the extra document.
+ assertDocsExamined(expl.executionStats, 2, 1);
}
- function testLT(op, val, expectedNReturned, expectedDocsExamined) {
+
+ function testLT(op, val, expectedNReturned, expectedDocsExaminedClassic) {
initAndPopulate(coll, clusterKey);
const expl = assert.commandWorked(coll.getDB().runCommand({
@@ -55,9 +77,14 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) {
assert.eq(NaN, getPlanStage(expl, "CLUSTERED_IXSCAN").minRecord);
assert.eq(expectedNReturned, expl.executionStats.executionStages.nReturned);
- assert.eq(expectedDocsExamined, expl.executionStats.executionStages.docsExamined);
+
+ // In this case the scans do not hit EOF, so there is an extra cursor->next() call past the
+ // end of the range in Classic, making SBE expect one fewer doc examined than Classic.
+ assertDocsExamined(
+ expl.executionStats, expectedDocsExaminedClassic, expectedDocsExaminedClassic - 1);
}
- function testGT(op, val, expectedNReturned, expectedDocsExamined) {
+
+ function testGT(op, val, expectedNReturned, expectedDocsExaminedClassic) {
initAndPopulate(coll, clusterKey);
const expl = assert.commandWorked(coll.getDB().runCommand({
@@ -72,9 +99,14 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) {
assert.eq(89, getPlanStage(expl, "CLUSTERED_IXSCAN").minRecord);
assert.eq(expectedNReturned, expl.executionStats.executionStages.nReturned);
- assert.eq(expectedDocsExamined, expl.executionStats.executionStages.docsExamined);
+
+ // In this case the scans hit EOF, so there is no extra cursor->next() call in Classic,
+ // making Classic and SBE expect the same number of docs examined.
+ assertDocsExamined(
+ expl.executionStats, expectedDocsExaminedClassic, expectedDocsExaminedClassic);
}
- function testRange(min, minVal, max, maxVal, expectedNReturned, expectedDocsExamined) {
+
+ function testRange(min, minVal, max, maxVal, expectedNReturned, expectedDocsExaminedClassic) {
initAndPopulate(coll, clusterKey);
const expl = assert.commandWorked(coll.getDB().runCommand({
@@ -92,8 +124,13 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) {
assert.eq(maxVal, getPlanStage(expl, "CLUSTERED_IXSCAN").maxRecord);
assert.eq(expectedNReturned, expl.executionStats.executionStages.nReturned);
- assert.eq(expectedDocsExamined, expl.executionStats.executionStages.docsExamined);
+
+ // In this case the scans do not hit EOF, so there is an extra cursor->next() call past the
+ // end of the range in Classic, making SBE expect one fewer doc examined than Classic.
+ assertDocsExamined(
+ expl.executionStats, expectedDocsExaminedClassic, expectedDocsExaminedClassic - 1);
}
+
function testIn() {
initAndPopulate(coll, clusterKey);
@@ -107,10 +144,12 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) {
assert.eq(30, getPlanStage(expl, "CLUSTERED_IXSCAN").maxRecord);
assert.eq(3, expl.executionStats.executionStages.nReturned);
- // The range scanned is 21 documents + 1 extra document by design - additional cursor
- // 'next' beyond the range.
- assert.eq(22, expl.executionStats.executionStages.docsExamined);
+ // The range scanned is 21 documents. In Classic, expect 'docsExamined' to be one higher by
+ // design - additional cursor 'next' beyond the range. In SBE, expect 21 as it does not
+ // examine the extra document.
+ assertDocsExamined(expl.executionStats, 22, 21);
}
+
function testNonClusterKeyScan() {
initAndPopulate(coll, clusterKey);
@@ -127,6 +166,12 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) {
function testBoundedScans(coll, clusterKey) {
testEq();
+
+ // The last argument of the following calls, 'expectedDocsExaminedClassic', and the specific
+ // comments, are for Classic engine. SBE does not have the additional cursor->next() call
+ // beyond the range, so in calls to testLT() and testRange() its value will be one lower.
+ // This is accounted for by delegations to the assertDocsExamined() helper function.
+
// Expect docsExamined == nReturned + 2 due to the collection scan bounds being always
// inclusive and due to the by-design additional cursor 'next' beyond the range.
testLT("$lt", 10, 10, 12);
@@ -139,7 +184,7 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) {
testGT("$gt", 89, 10, 11);
// Expect docsExamined == nReturned.
testGT("$gte", 89, 11, 11);
- // docsExamined reflects the fact that collection scan bounds are always exclusive and
+ // docsExamined reflects the fact that collection scan bounds are always inclusive and
// that by design we do an additional cursor 'next' beyond the range.
testRange("$gt", 20, "$lt", 40, 19, 22);
testRange("$gte", 20, "$lt", 40, 20, 22);
diff --git a/jstests/libs/clustered_collections/clustered_collection_hint_common.js b/jstests/libs/clustered_collections/clustered_collection_hint_common.js
index a65939698e2df..6315d5967f64b 100644
--- a/jstests/libs/clustered_collections/clustered_collection_hint_common.js
+++ b/jstests/libs/clustered_collections/clustered_collection_hint_common.js
@@ -1,12 +1,10 @@
/**
* Validate $hint on a clustered collection.
*/
+import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js";
+load("jstests/libs/collection_drop_recreate.js");
-function testClusteredCollectionHint(coll, clusterKey, clusterKeyName) {
- "use strict";
- load("jstests/libs/analyze_plan.js");
- load("jstests/libs/collection_drop_recreate.js");
-
+export function testClusteredCollectionHint(coll, clusterKey, clusterKeyName) {
const clusterKeyFieldName = Object.keys(clusterKey)[0];
const batchSize = 100;
@@ -196,6 +194,91 @@ function testClusteredCollectionHint(coll, clusterKey, clusterKeyName) {
}
});
+ // Find with $natural hints and sorts: we should scan the collection in the hinted
+ // direction regardless of sort direction, and provide a blocking sort if needed.
+ validateClusteredCollectionHint(coll, {
+ expectedNReturned: batchSize,
+ cmd: {find: collName, hint: {$natural: 1}, sort: {[clusterKeyFieldName]: 1}},
+ expectedWinningPlanStats: {
+ stage: "COLLSCAN",
+ direction: "forward",
+ },
+ unexpectedWinningPlanStats: ["SORT"] // We shouldn't need a blocking sort here.
+ });
+ validateClusteredCollectionHint(coll, {
+ expectedNReturned: batchSize,
+ cmd: {find: collName, hint: {$natural: -1}, sort: {[clusterKeyFieldName]: 1}},
+ expectedWinningPlanStats: [
+ {stage: "SORT", sortPattern: {[clusterKeyFieldName]: 1}},
+ {
+ stage: "COLLSCAN",
+ direction: "backward",
+ }
+ ]
+ });
+ validateClusteredCollectionHint(coll, {
+ expectedNReturned: batchSize,
+ cmd: {find: collName, hint: {$natural: 1}, sort: {[clusterKeyFieldName]: -1}},
+ expectedWinningPlanStats: [
+ {stage: "SORT", sortPattern: {[clusterKeyFieldName]: -1}},
+ {
+ stage: "COLLSCAN",
+ direction: "forward",
+ }
+ ]
+ });
+ validateClusteredCollectionHint(coll, {
+ expectedNReturned: batchSize,
+ cmd: {find: collName, hint: {$natural: -1}, sort: {[clusterKeyFieldName]: -1}},
+ expectedWinningPlanStats: {
+ stage: "COLLSCAN",
+ direction: "backward",
+ },
+ unexpectedWinningPlanStats: ["SORT"] // We shouldn't need a blocking sort here.
+ });
+
+ // We always need a blocking sort when the sort pattern does not match the provided sort for
+ // the clustered collection.
+ validateClusteredCollectionHint(coll, {
+ expectedNReturned: batchSize,
+ cmd: {find: collName, hint: {$natural: 1}, sort: {a: 1}},
+ expectedWinningPlanStats: [
+ {stage: "SORT", sortPattern: {a: 1}},
+ {
+ stage: "COLLSCAN",
+ direction: "forward",
+ }
+ ]
+ });
+ validateClusteredCollectionHint(coll, {
+ expectedNReturned: batchSize,
+ cmd: {find: collName, hint: {$natural: -1}, sort: {a: 1}},
+ expectedWinningPlanStats: [
+ {stage: "SORT", sortPattern: {a: 1}},
+ {
+ stage: "COLLSCAN",
+ direction: "backward",
+ }
+ ]
+ });
+ validateClusteredCollectionHint(coll, {
+ expectedNReturned: batchSize,
+ cmd: {find: collName, hint: {$natural: 1}, sort: {a: -1}},
+ expectedWinningPlanStats: [
+ {stage: "SORT", sortPattern: {a: -1}},
+ {
+ stage: "COLLSCAN",
+ direction: "forward",
+ }
+ ]
+ });
+ validateClusteredCollectionHint(coll, {
+ expectedNReturned: batchSize,
+ cmd: {find: collName, hint: {$natural: -1}, sort: {a: -1}},
+ expectedWinningPlanStats:
+ [{stage: "SORT", sortPattern: {a: -1}}, {stage: "COLLSCAN", direction: "backward"}],
+ });
+
// Find on a standard index.
validateClusteredCollectionHint(coll, {
expectedNReturned: batchSize,
@@ -280,25 +363,39 @@ function testClusteredCollectionHint(coll, clusterKey, clusterKeyName) {
return testHint(coll, clusterKey, clusterKeyName);
}
-function validateClusteredCollectionHint(coll,
- {expectedNReturned, cmd, expectedWinningPlanStats = {}}) {
+export function validateClusteredCollectionHint(
+ coll,
+ {expectedNReturned, cmd, expectedWinningPlanStats = {}, unexpectedWinningPlanStats = []}) {
const explain = assert.commandWorked(coll.runCommand({explain: cmd}));
assert.eq(explain.executionStats.nReturned, expectedNReturned, tojson(explain));
const actualWinningPlan = getWinningPlan(explain.queryPlanner);
- const stageOfInterest = getPlanStage(actualWinningPlan, expectedWinningPlanStats.stage);
- assert.neq(null, stageOfInterest);
- for (const [key, value] of Object.entries(expectedWinningPlanStats)) {
- assert(stageOfInterest[key] !== undefined, tojson(explain));
- assert.eq(stageOfInterest[key], value, tojson(explain));
+ if (!Array.isArray(expectedWinningPlanStats)) {
+ expectedWinningPlanStats = [expectedWinningPlanStats];
}
- // Explicitly check that the plan is not bounded by default.
- if (!expectedWinningPlanStats.hasOwnProperty("minRecord")) {
- assert(!actualWinningPlan.hasOwnProperty("minRecord"), tojson(explain));
+ for (const excludedStage of unexpectedWinningPlanStats) {
+ const stageOfInterest = getPlanStage(actualWinningPlan, excludedStage);
+ assert.eq(null, stageOfInterest);
}
- if (!expectedWinningPlanStats.hasOwnProperty("maxRecord")) {
- assert(!actualWinningPlan.hasOwnProperty("maxRecord"), tojson(explain));
+
+ for (const expectedWinningPlanStageStats of expectedWinningPlanStats) {
+ const stageOfInterest =
+ getPlanStage(actualWinningPlan, expectedWinningPlanStageStats.stage);
+ assert.neq(null, stageOfInterest);
+
+ for (const [key, value] of Object.entries(expectedWinningPlanStageStats)) {
+ assert(stageOfInterest[key] !== undefined, tojson(explain));
+ assert.eq(stageOfInterest[key], value, tojson(explain));
+ }
+
+ // Explicitly check that the plan is not bounded by default.
+ if (!expectedWinningPlanStageStats.hasOwnProperty("minRecord")) {
+ assert(!actualWinningPlan.hasOwnProperty("minRecord"), tojson(explain));
+ }
+ if (!expectedWinningPlanStageStats.hasOwnProperty("maxRecord")) {
+ assert(!actualWinningPlan.hasOwnProperty("maxRecord"), tojson(explain));
+ }
}
}
diff --git a/jstests/libs/clustered_collections/clustered_collection_util.js b/jstests/libs/clustered_collections/clustered_collection_util.js
index 0018721551b45..e53220858dbbf 100644
--- a/jstests/libs/clustered_collections/clustered_collection_util.js
+++ b/jstests/libs/clustered_collections/clustered_collection_util.js
@@ -2,7 +2,6 @@
* Utilities for testing clustered collections.
*/
-load("jstests/libs/analyze_plan.js");
load("jstests/libs/collection_drop_recreate.js");
var ClusteredCollectionUtil = class {
@@ -193,4 +192,4 @@ var ClusteredCollectionUtil = class {
assert.eq(1, coll.find({[clusterKey]: NumberLong("42")}).itcount());
coll.drop();
}
-};
+};
\ No newline at end of file
diff --git a/jstests/libs/columnstore_util.js b/jstests/libs/columnstore_util.js
index 9d80d3ae87d11..58b6831ef18f5 100644
--- a/jstests/libs/columnstore_util.js
+++ b/jstests/libs/columnstore_util.js
@@ -6,7 +6,7 @@ load("jstests/libs/discover_topology.js"); // For findNonConfigNodes.
// For areAllCollectionsClustered.
load("jstests/libs/clustered_collections/clustered_collection_util.js");
load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
/**
* Updates server parameters to disable column scan query planning heuristics so that column scan
@@ -17,7 +17,7 @@ load("jstests/libs/sbe_util.js"); // For checkSBE
* for the planning heuristics behavior is included in unit tests, no passthrough tests, and perf
* tests.
*/
-function fullyEnableColumnScan(nodes) {
+export function fullyEnableColumnScan(nodes) {
// Since the CSI query planning heuristics are OR-ed together, we can set any one of
// [internalQueryColumnScanMinAvgDocSizeBytes, internalQueryColumnScanMinCollectionSizeBytes,
// internalQueryColumnScanMinNumColumnFilters] to zero in order to fully enable column scan.
@@ -29,12 +29,12 @@ function fullyEnableColumnScan(nodes) {
* expected to succeed. Otherwise, logs the reason why the test will not create column store indexes
* and returns false.
*/
-function safeToCreateColumnStoreIndex(db) {
+export function safeToCreateColumnStoreIndex(db) {
return safeToCreateColumnStoreIndexInCluster(
DiscoverTopology.findNonConfigNodes(db.getMongo()));
}
-function safeToCreateColumnStoreIndexInCluster(nodes) {
+export function safeToCreateColumnStoreIndexInCluster(nodes) {
for (const node of nodes) {
const conn = new Mongo(node);
if (FixtureHelpers.isMongos(conn.getDB("admin"))) {
@@ -79,7 +79,7 @@ function safeToCreateColumnStoreIndexInCluster(nodes) {
* Checks if the test is eligible to run and sets the appropriate parameters to use column store
* indexes. Returns true if setup was successful.
*/
-function setUpServerForColumnStoreIndexTest(db) {
+export function setUpServerForColumnStoreIndexTest(db) {
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping column store index test since SBE is disabled");
return false;
diff --git a/jstests/libs/config_files/disable_moveparanoia.ini b/jstests/libs/config_files/disable_moveparanoia.ini
deleted file mode 100644
index f21b50f9513c9..0000000000000
--- a/jstests/libs/config_files/disable_moveparanoia.ini
+++ /dev/null
@@ -1 +0,0 @@
-moveParanoia=false
diff --git a/jstests/libs/config_files/disable_nomoveparanoia.ini b/jstests/libs/config_files/disable_nomoveparanoia.ini
deleted file mode 100644
index 4696304134f36..0000000000000
--- a/jstests/libs/config_files/disable_nomoveparanoia.ini
+++ /dev/null
@@ -1 +0,0 @@
-noMoveParanoia=false
diff --git a/jstests/libs/config_files/enable_paranoia.json b/jstests/libs/config_files/enable_paranoia.json
deleted file mode 100644
index 218646b1662e0..0000000000000
--- a/jstests/libs/config_files/enable_paranoia.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "sharding" : {
- "archiveMovedChunks" : true
- }
-}
diff --git a/jstests/libs/config_files/set_shardingrole_configsvr.json b/jstests/libs/config_files/set_shardingrole_configsvr.json
index f09d10d3f18a2..be6c3853e0bfe 100644
--- a/jstests/libs/config_files/set_shardingrole_configsvr.json
+++ b/jstests/libs/config_files/set_shardingrole_configsvr.json
@@ -5,4 +5,4 @@
"replication" : {
"replSetName" : "dummy"
}
-}
\ No newline at end of file
+}
diff --git a/jstests/libs/config_files/set_shardingrole_shardsvr.json b/jstests/libs/config_files/set_shardingrole_shardsvr.json
index c605dce50cc9e..333a33528b9ad 100644
--- a/jstests/libs/config_files/set_shardingrole_shardsvr.json
+++ b/jstests/libs/config_files/set_shardingrole_shardsvr.json
@@ -5,4 +5,4 @@
"replication" : {
"replSetName" : "dummy"
}
-}
\ No newline at end of file
+}
diff --git a/jstests/libs/config_shard_util.js b/jstests/libs/config_shard_util.js
new file mode 100644
index 0000000000000..a0c8f1584a405
--- /dev/null
+++ b/jstests/libs/config_shard_util.js
@@ -0,0 +1,53 @@
+/**
+ * Utilities for testing config server config shard behaviors.
+ */
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
+export const ConfigShardUtil = (function() {
+ function isTransitionEnabledIgnoringFCV(st) {
+ return FeatureFlagUtil.isEnabled(st.configRS.getPrimary(),
+ "TransitionToCatalogShard",
+ undefined /* user */,
+ true /* ignoreFCV */);
+ }
+
+ function transitionToDedicatedConfigServer(st, timeout) {
+ if (timeout == undefined) {
+ timeout = 10 * 60 * 1000; // 10 minutes
+ }
+
+ assert.soon(function() {
+ const res = st.s.adminCommand({transitionToDedicatedConfigServer: 1});
+ if (!res.ok && res.code === ErrorCodes.ShardNotFound) {
+ // If the config server primary steps down right after removing the config.shards
+ // doc for the shard but before responding with "state": "completed", the mongos
+ // would retry the _configsvrTransitionToDedicatedConfigServer command against the
+ // new config server primary, which would not find the removed shard in its
+ // ShardRegistry if it has done a ShardRegistry reload after the config.shards doc
+ // for the shard was removed. This would cause the command to fail with
+ // ShardNotFound.
+ return true;
+ }
+ assert.commandWorked(res);
+ return res.state == 'completed';
+ }, "failed to transition to dedicated config server within " + timeout + "ms", timeout);
+ }
+
+ function waitForRangeDeletions(conn) {
+ assert.soon(() => {
+ const rangeDeletions = conn.getCollection("config.rangeDeletions").find().toArray();
+ if (rangeDeletions.length) {
+ print("Waiting for range deletions to complete: " + tojsononeline(rangeDeletions));
+ sleep(100);
+ return false;
+ }
+ return true;
+ });
+ }
+
+ return {
+ isTransitionEnabledIgnoringFCV,
+ transitionToDedicatedConfigServer,
+ waitForRangeDeletions,
+ };
+})();
diff --git a/jstests/libs/conn_pool_helpers.js b/jstests/libs/conn_pool_helpers.js
index 55977524f705d..9beb49da1d135 100644
--- a/jstests/libs/conn_pool_helpers.js
+++ b/jstests/libs/conn_pool_helpers.js
@@ -32,16 +32,17 @@ function launchFinds(mongos, threads, {times, readPref, shouldFail}) {
}
}
-function assertHasConnPoolStats(mongos, allHosts, args, checkNum) {
+function assertHasConnPoolStats(mongos, allHosts, args, checkNum, connPoolStatsCmd = undefined) {
checkNum++;
jsTestLog("Check #" + checkNum + ": " + tojson(args));
- var {ready = 0, pending = 0, active = 0, hosts = allHosts, isAbsent, checkStatsFunc} = args;
+ let {ready = 0, pending = 0, active = 0, hosts = allHosts, isAbsent, checkStatsFunc} = args;
checkStatsFunc = checkStatsFunc ? checkStatsFunc : function(stats) {
- return stats.available == ready && stats.refreshing == pending && stats.inUse == active;
+ return stats.available == ready && stats.refreshing == pending &&
+ (stats.inUse + stats.leased) == active;
};
function checkStats(res, host) {
- var stats = res.hosts[host];
+ let stats = res.hosts[host];
if (!stats) {
jsTestLog("Connection stats for " + host + " are absent");
return isAbsent;
@@ -52,7 +53,8 @@ function assertHasConnPoolStats(mongos, allHosts, args, checkNum) {
}
function checkAllStats() {
- var res = mongos.adminCommand({connPoolStats: 1});
+ let cmdName = connPoolStatsCmd ? connPoolStatsCmd : "connPoolStats";
+ let res = mongos.adminCommand({[cmdName]: 1});
return hosts.map(host => checkStats(res, host)).every(x => x);
}
diff --git a/jstests/libs/crl.pem b/jstests/libs/crl.pem
index c835a93e2ab5d..44fb2e5ff3e88 100644
--- a/jstests/libs/crl.pem
+++ b/jstests/libs/crl.pem
@@ -1,12 +1,12 @@
-----BEGIN X509 CRL-----
MIIBujCBozANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJVUzERMA8GA1UECAwI
TmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdv
-REIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0EXDTIy
-MDIwMzIyMDAyMVoXDTI0MDUwNjIyMDAyMVowDQYJKoZIhvcNAQELBQADggEBAHuY
-cW87HAG5icgtgJ6X/z/uYf9oVQCCRU589Vr5HCETIobfkAWOqTM5yxO5Oa+o5G7C
-ZgrEPT2JkDx/Us4kYwY3J3rQSpCSJhMqIAHQIKIojzkyQI6PguyS2x7JU9uEp/Z2
-qeM72ogBsFFX6Ior4YczeC+KAmD2OJS0B2Zed6nSqfmbk2WZf1q4i9a6BgU+46Hb
-HBnfHQv8/utrmVQs2ibCnapBH4ihPz7ZZNRd+0cmv3C/P0rqJF7wGlcaseIZULyo
-7GcA494HcpN+nj1U6Cjh5nscXzn/2hvt3miZ+P32Y7SVzezqoIacKinf26V5qxq7
-fpW0No+7nMxkW0zdRKg=
+REIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0EXDTIz
+MDYxNjE0MjkxNVoXDTI1MDkxNjE0MjkxNVowDQYJKoZIhvcNAQELBQADggEBAHfk
+rEkGCjmhkghxGHuUwBXpHkntgsKNZQnUAUjRp6DCJBA+OpSrFOGOuRvVZcV/8C/7
+IS5H0qf0qybnEAqvXU0bZD9e8+ipLa73879YbODzHkBiyL9fLg2BGJLXPvi9ETV1
+UspQf25h7Wh23MqHj9yiqo1aKazcOmyvxUnYsRxnzXQNIMJ4QfAOa4hZuOs+qmDf
+rEYqkrUNzmvrzVU2zvgTqT2fJyPUz/s2IDj7BJCXrmysGUPcPRftx45kmfjU9tdm
+4Po3EspAjuUoAcYvGzpz68c2Y8CbenrEfJoDjAzm8mRL57xcXz/xtRLhOYMhKvlh
+okBs7OL7fQCUxCC++Mc=
-----END X509 CRL-----
diff --git a/jstests/libs/crl.pem.digest.sha1 b/jstests/libs/crl.pem.digest.sha1
index 69b575b83e177..ddbb042f90a50 100644
--- a/jstests/libs/crl.pem.digest.sha1
+++ b/jstests/libs/crl.pem.digest.sha1
@@ -1 +1 @@
-2231880F51FB2A6A2E9B97AA5D7CE4FAD5DD3FC2
\ No newline at end of file
+BA3069FA4495A4594EE9FED9A482ACD54DD31EC7
\ No newline at end of file
diff --git a/jstests/libs/crl.pem.digest.sha256 b/jstests/libs/crl.pem.digest.sha256
index 91a9465bb2e7c..c5c2a80863e27 100644
--- a/jstests/libs/crl.pem.digest.sha256
+++ b/jstests/libs/crl.pem.digest.sha256
@@ -1 +1 @@
-2A185DD7786D83315C2CE2493BF1FB308686FC5C80D575D930C36AF2A0A6741F
\ No newline at end of file
+13E613C19075A68CAA7F40E479AFB2B8F0D59A3DFAB5171C16BB8298D37FB591
\ No newline at end of file
diff --git a/jstests/libs/crl_client_revoked.pem b/jstests/libs/crl_client_revoked.pem
index 17e3d19f57a4e..2eede2bdc4aa1 100644
--- a/jstests/libs/crl_client_revoked.pem
+++ b/jstests/libs/crl_client_revoked.pem
@@ -1,12 +1,12 @@
-----BEGIN X509 CRL-----
MIIB0DCBuTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJVUzERMA8GA1UECAwI
TmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdv
-REIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0EXDTIy
-MDIwMzIyMDAyMVoXDTI0MDUwNjIyMDAyMVowFDASAgEEFw0yMjAyMDMyMjAwMjFa
-MA0GCSqGSIb3DQEBCwUAA4IBAQCOZeTs2vJh5Eg0u+0Dpebx6tzJXShp3+p8AbDr
-vrxkyDhKP4GuIHmBgJ12KaJUmBWtm4MucDxinWtNiTp3CeL7nuwmX7TGR8YQNL8/
-pDy0SfkT4AKe9V32OtySIFtMFhzcqNNFDu9H2p+Um3lywxoSyw+H64M3NL93IyeZ
-3Dy0q25fPxpiP1tz9y0Q1TVIIX/SWxHpaCdPfANRYVSTwD+gRM27u3dezJ4K3w7w
-wYGYXK019wIln4QouAm5mvKb3TxRlCn4ggM0npHmX9e/dr3acmlQ1QnoD8+Q8eER
-+9mbf/yyH2rw2XZN/V67Xgri3ctoT0tUpdh91xP63KLitu/v
+REIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0EXDTIz
+MDYxNjE0MjkxNVoXDTI1MDkxNjE0MjkxNVowFDASAgEEFw0yMzA2MTYxNDI5MTVa
+MA0GCSqGSIb3DQEBCwUAA4IBAQBX1uEdyMk0u0n3wraQAXHME8WMDq5daV9FLysD
+OVovDObVN9XHuqTNH74ncpsxs+uq2x/E2QHu3wFpQ8SptuqQ7My9h6nXgtKfCrAS
+QpYzF/8ucPGlFaTdF4HbMdVWRkL1DpLBJYbmBzh5e9CKJB3QpI7htPFeL6B9zqCc
+JtDdFCwGdqlbnowdonCVV9p+ii8Mjr25wSnyPkhsDKUy1bEDnr5VxFVrIJ6JdVYv
+8oN7moeUPT3cOxmtfj8HologrL/A16/9px063yW0J4Acrz1ZanyyFGz/gLsWRlOc
+AMGdGrCI2Cqw8dDDCuCDYGUc4heJhkN02H8Wgjstn2l85evQ
-----END X509 CRL-----
diff --git a/jstests/libs/crl_client_revoked.pem.digest.sha1 b/jstests/libs/crl_client_revoked.pem.digest.sha1
index 31500641823c2..11f70f48fb3c4 100644
--- a/jstests/libs/crl_client_revoked.pem.digest.sha1
+++ b/jstests/libs/crl_client_revoked.pem.digest.sha1
@@ -1 +1 @@
-38C199A9F96DBBD3FE16E14BAC0C31D27F25AD3C
\ No newline at end of file
+0CA30BC9C4C5159DEBEB97E5C16B44EF8F761F24
\ No newline at end of file
diff --git a/jstests/libs/crl_client_revoked.pem.digest.sha256 b/jstests/libs/crl_client_revoked.pem.digest.sha256
index d5121c25a8b5e..b2bc5df045b07 100644
--- a/jstests/libs/crl_client_revoked.pem.digest.sha256
+++ b/jstests/libs/crl_client_revoked.pem.digest.sha256
@@ -1 +1 @@
-BE4CD9973F7EAD168485E79475BC615FA2D3156D1CFC87AA918C09372E714BFB
\ No newline at end of file
+A45552882279FAEF4079226CACA2E9934550ACFF0E6A2D45AD8D906335621433
\ No newline at end of file
diff --git a/jstests/libs/crl_expired.pem b/jstests/libs/crl_expired.pem
index fc72792780b93..08570617efbfa 100644
--- a/jstests/libs/crl_expired.pem
+++ b/jstests/libs/crl_expired.pem
@@ -1,12 +1,12 @@
-----BEGIN X509 CRL-----
MIIBujCBozANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJVUzERMA8GA1UECAwI
TmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdv
-REIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0EXDTIy
-MDIwMzIyMDAyMVoXDTIyMDIwMzIyMDAyMlowDQYJKoZIhvcNAQELBQADggEBAKhw
-Ph7SF8TwsEk4K9L8WrEooJCMqm52SuSjqg23HdpnMxnGw8QyrYd8zXPBOOj+K4Oi
-QoVYUjH1tsEZsWdpP8ixFPPzKZUx2e/40XbMWKyUMeebCUHe3VPkchCzKIdrP26Z
-1ZcLPfr3qhJyr3Jy2Cs4z4ysNm0wRO5P0bgE8FhBhOXvyhLTvomvKpMSlaU4Wy8/
-O3GGUlPOwtZ3xgW6kzJibd+CqCKxCgPxB8dlY3/Bbx/ECGh/n/k9u2AE/rIwsx2G
-mO94LD/phdfN7gvJoDhwEtGKrQDC8NynNzF0NPPTMcO7lNP4ydcMmhyhXTUxsTaU
-/K6+UalKCs2thRGPCXg=
+REIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0EXDTIz
+MDYxNjE0MjkxNVoXDTIzMDYxNjE0MjkxNlowDQYJKoZIhvcNAQELBQADggEBAE9w
+lGiE612zemJtKOdYx2OLQMQt7lsz4Sqvgwt3/NGcrCB+wTiAnb3e/fcGRnk7xwtA
+JN54PC4PgYbwUn48IEWEn3hqzjMHef62q3N5s6b1M/uVN9V+QXbkOi/f8Q71J9M/
+cLcj3gItc/r9nnxSAJwEDyk8kv5STrWfPN9fPDT73xqcch0GsHyOC+VuVycvjH9q
+CukTvdEVWqtfijjMlpeucnCrjc6OioWZBTrcAgbv9jQIHXg3AjwUvSXnnGh3S+li
+32pWZOA6mqEdWLmqq0Z74erypsvhHCb+MDSHt7tPxCwp65uBKVjzfSY6TK+bbPLN
+XnyoWkX1aKaNi5mmcKk=
-----END X509 CRL-----
diff --git a/jstests/libs/crl_expired.pem.digest.sha1 b/jstests/libs/crl_expired.pem.digest.sha1
index e73095bcc0b01..46cf7e611c34b 100644
--- a/jstests/libs/crl_expired.pem.digest.sha1
+++ b/jstests/libs/crl_expired.pem.digest.sha1
@@ -1 +1 @@
-DC31CEE7C62EFA0FBCB8C1FA0947C5F4A1DD0006
\ No newline at end of file
+6DCD7E3784AA03399ABB9227AFDC64541F13F00D
\ No newline at end of file
diff --git a/jstests/libs/crl_expired.pem.digest.sha256 b/jstests/libs/crl_expired.pem.digest.sha256
index 08d3c8f417ed0..919fd64492583 100644
--- a/jstests/libs/crl_expired.pem.digest.sha256
+++ b/jstests/libs/crl_expired.pem.digest.sha256
@@ -1 +1 @@
-FDCDEE458365A80D4B422A2350F5CE37223197A07C6905D44B40895D5921551C
\ No newline at end of file
+F4BC10F4A4D00603B6EA5D3FEA2B338E34CB910E320F002E215F4B786C664BFE
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-ca-ocsp.crt b/jstests/libs/ecdsa-ca-ocsp.crt
index cb1ba34d06089..107d07f9de1f5 100644
--- a/jstests/libs/ecdsa-ca-ocsp.crt
+++ b/jstests/libs/ecdsa-ca-ocsp.crt
@@ -1,13 +1,14 @@
-----BEGIN CERTIFICATE-----
-MIIB9TCCAZygAwIBAgIEZKr7xjAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
+MIICFjCCAbugAwIBAgIEVEMOBzAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV
BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl
-c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjB6MQsw
+c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjB6MQsw
CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr
IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UE
AwwUS2VybmVsIFRlc3QgRVNDREEgQ0EwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
-AAS1km6C590H9n4iIbd7X1BxsXzCcgSIksEGGXDiAWKskZH9VCAwO6wOB0wssrk0
-bzTzw2yFj60wQqMux1qm6CPXoxAwDjAMBgNVHRMEBTADAQH/MAoGCCqGSM49BAMC
-A0cAMEQCIFClJcZHoOYm/V5B63yLTTGToc1AsUgDA70OOPdH4V26AiArujh92W2L
-BIicxzY8674zDv4QbI+I7KO7ejyoh57ftg==
+AARJ0xOKeXlNeEj8uXEjyD9EzQX9UuxfvagpA5TqPHaeMCB7u69usoJS3Wjfn74n
+UzVYRtoN4NAmYQyMoaWtg+fNoy8wLTAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBRg
+exXgwe8CGDS9DRVEaZUiJqV5bDAKBggqhkjOPQQDAgNJADBGAiEAgcN9+OxIxXxQ
+Q7EfAMhOYxVJMEEy8YTRqzLUxqD8SvECIQDR2En79y1wRmnyu7KqgaVf0mBJD1z8
+fXA5eXg8cYp1vA==
-----END CERTIFICATE-----
diff --git a/jstests/libs/ecdsa-ca-ocsp.key b/jstests/libs/ecdsa-ca-ocsp.key
index 3292d5b68899a..944ad4aa04763 100644
--- a/jstests/libs/ecdsa-ca-ocsp.key
+++ b/jstests/libs/ecdsa-ca-ocsp.key
@@ -1,5 +1,5 @@
-----BEGIN PRIVATE KEY-----
-MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgtKR7JQBHBc471ZOg
-vjKYMWrNnl4poaypv0J4HXtf0a6hRANCAAS1km6C590H9n4iIbd7X1BxsXzCcgSI
-ksEGGXDiAWKskZH9VCAwO6wOB0wssrk0bzTzw2yFj60wQqMux1qm6CPX
+MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg3B+jo0PJFdvRJW5Z
+eItzbqoEvvLI1WWmRu/AxE/QhZmhRANCAARJ0xOKeXlNeEj8uXEjyD9EzQX9Uuxf
+vagpA5TqPHaeMCB7u69usoJS3Wjfn74nUzVYRtoN4NAmYQyMoaWtg+fN
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ecdsa-ca-ocsp.pem b/jstests/libs/ecdsa-ca-ocsp.pem
index 53911568ca665..eb9029a96c9f4 100644
--- a/jstests/libs/ecdsa-ca-ocsp.pem
+++ b/jstests/libs/ecdsa-ca-ocsp.pem
@@ -1,18 +1,19 @@
-----BEGIN CERTIFICATE-----
-MIIB9TCCAZygAwIBAgIEZKr7xjAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
+MIICFjCCAbugAwIBAgIEVEMOBzAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV
BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl
-c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjB6MQsw
+c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjB6MQsw
CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr
IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UE
AwwUS2VybmVsIFRlc3QgRVNDREEgQ0EwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
-AAS1km6C590H9n4iIbd7X1BxsXzCcgSIksEGGXDiAWKskZH9VCAwO6wOB0wssrk0
-bzTzw2yFj60wQqMux1qm6CPXoxAwDjAMBgNVHRMEBTADAQH/MAoGCCqGSM49BAMC
-A0cAMEQCIFClJcZHoOYm/V5B63yLTTGToc1AsUgDA70OOPdH4V26AiArujh92W2L
-BIicxzY8674zDv4QbI+I7KO7ejyoh57ftg==
+AARJ0xOKeXlNeEj8uXEjyD9EzQX9UuxfvagpA5TqPHaeMCB7u69usoJS3Wjfn74n
+UzVYRtoN4NAmYQyMoaWtg+fNoy8wLTAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBRg
+exXgwe8CGDS9DRVEaZUiJqV5bDAKBggqhkjOPQQDAgNJADBGAiEAgcN9+OxIxXxQ
+Q7EfAMhOYxVJMEEy8YTRqzLUxqD8SvECIQDR2En79y1wRmnyu7KqgaVf0mBJD1z8
+fXA5eXg8cYp1vA==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgtKR7JQBHBc471ZOg
-vjKYMWrNnl4poaypv0J4HXtf0a6hRANCAAS1km6C590H9n4iIbd7X1BxsXzCcgSI
-ksEGGXDiAWKskZH9VCAwO6wOB0wssrk0bzTzw2yFj60wQqMux1qm6CPX
+MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg3B+jo0PJFdvRJW5Z
+eItzbqoEvvLI1WWmRu/AxE/QhZmhRANCAARJ0xOKeXlNeEj8uXEjyD9EzQX9Uuxf
+vagpA5TqPHaeMCB7u69usoJS3Wjfn74nUzVYRtoN4NAmYQyMoaWtg+fN
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha1 b/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha1
index 25678664c4fef..0dd82c6fc9832 100644
--- a/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha1
+++ b/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha1
@@ -1 +1 @@
-BD9FC892A958AE5A5753EBDA314675D528B05156
\ No newline at end of file
+EB3A609AE91AE0373E050066CF9DCCC2992FB044
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha256 b/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha256
index 98047d84bda96..56c7b038dddd9 100644
--- a/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha256
+++ b/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha256
@@ -1 +1 @@
-31ED10340FB5812D5720BF34902548EFB6ED81E5B5AA11CA3647CDF079E25124
\ No newline at end of file
+920526AD79D3CC2EC14E1C59ACB4BEE76BDB6803375C4785A457C3CB97B1B8F2
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-ca.pem b/jstests/libs/ecdsa-ca.pem
index 0fbe387263f69..58cc22e890a96 100644
--- a/jstests/libs/ecdsa-ca.pem
+++ b/jstests/libs/ecdsa-ca.pem
@@ -4,23 +4,24 @@
# Root of ECDSA tree.
-----BEGIN CERTIFICATE-----
-MIIB9jCCAZygAwIBAgIEKUUq3TAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
+MIICFDCCAbugAwIBAgIEKbnGsDAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV
BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl
-c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjB6MQsw
+c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjB6MQsw
CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr
IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UE
AwwUS2VybmVsIFRlc3QgRVNDREEgQ0EwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
-AAS6/8jcHeK3U6THarZbEWw7UJYKA45ahLNpOQxTni6CwpFjyaFKRqnv8OUK09v5
-58X4U4yGKpTJi6MB4U8c7+8zoxAwDjAMBgNVHRMEBTADAQH/MAoGCCqGSM49BAMC
-A0gAMEUCIQDe1DKE44zvfcc1PYUqTqtPNtVb513kSitIizNsRhkhRAIgHcbGd1j4
-cZ91P8FlKg8NizbShq7hrbpni7urkmNoFXQ=
+AARFSZplmlIJErVVOGWJPbQ0Fv8cIrwaCkL31rQb3Dea0rCd+5vyFk7qOhX/OfZB
+bC7Q+9ZhDDfNZVDZXGl1omdooy8wLTAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBTm
+gV6I/ikYioQ9vv2qei0uiY5guTAKBggqhkjOPQQDAgNHADBEAiBJnsgI3P+AtpVh
+8oDDvN61dGJkCw/9VEfzV5Px8yxixwIgd96Cm9aZDsDijlhfbkxvgAaSm7CxOJI6
+gn2fotwvt3E=
-----END CERTIFICATE-----
-----BEGIN EC PARAMETERS-----
BggqhkjOPQMBBw==
-----END EC PARAMETERS-----
-----BEGIN EC PRIVATE KEY-----
-MHcCAQEEIIqvI0YJXBaQAGdgoSL6MLBR2LbmKGpZciebA61vPdiYoAoGCCqGSM49
-AwEHoUQDQgAEuv/I3B3it1Okx2q2WxFsO1CWCgOOWoSzaTkMU54ugsKRY8mhSkap
-7/DlCtPb+efF+FOMhiqUyYujAeFPHO/vMw==
+MHcCAQEEIIuWnsOUBRrBqZ2Ceb9HavbN8VNJX3mv7QpFoqD5csGZoAoGCCqGSM49
+AwEHoUQDQgAERUmaZZpSCRK1VThliT20NBb/HCK8GgpC99a0G9w3mtKwnfub8hZO
+6joV/zn2QWwu0PvWYQw3zWVQ2VxpdaJnaA==
-----END EC PRIVATE KEY-----
diff --git a/jstests/libs/ecdsa-ca.pem.digest.sha1 b/jstests/libs/ecdsa-ca.pem.digest.sha1
index 0f43c9996e023..8570461d56505 100644
--- a/jstests/libs/ecdsa-ca.pem.digest.sha1
+++ b/jstests/libs/ecdsa-ca.pem.digest.sha1
@@ -1 +1 @@
-BEAD07ADF2EC1A7D46C2094B72B03510101425E1
\ No newline at end of file
+2E1DAC1D8204F57CE6F22BE35E634F898E9E4D7D
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-ca.pem.digest.sha256 b/jstests/libs/ecdsa-ca.pem.digest.sha256
index 06559f08adfa8..be1c2dc8dc2cc 100644
--- a/jstests/libs/ecdsa-ca.pem.digest.sha256
+++ b/jstests/libs/ecdsa-ca.pem.digest.sha256
@@ -1 +1 @@
-9965666BB50F095320BFEA5B57535FDEF82060F38155AE7287C2742A0A24E4A5
\ No newline at end of file
+91F2E4499375EA8F989DD56981FA945E8FBD23BF845DA3283BAF6FB37042CBF4
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-client.pem b/jstests/libs/ecdsa-client.pem
index bb07191190076..b522826073300 100644
--- a/jstests/libs/ecdsa-client.pem
+++ b/jstests/libs/ecdsa-client.pem
@@ -4,22 +4,22 @@
# Client certificate for ECDSA tree.
-----BEGIN CERTIFICATE-----
-MIIB1jCCAXsCBGp/B5IwCgYIKoZIzj0EAwIwejELMAkGA1UEBhMCVVMxETAPBgNV
+MIIB1jCCAXsCBA+/nyYwCgYIKoZIzj0EAwIwejELMAkGA1UEBhMCVVMxETAPBgNV
BAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQKDAdN
b25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxHTAbBgNVBAMMFEtlcm5lbCBUZXN0IEVT
-Q0RBIENBMB4XDTIyMDIwMzIxNTk0OFoXDTI0MDUwNzIxNTk0OFowcDELMAkGA1UE
+Q0RBIENBMB4XDTIzMDYxNjE0Mjg0OFoXDTI1MDkxNzE0Mjg0OFowcDELMAkGA1UE
BhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5
MRAwDgYDVQQKDAdNb25nb0RCMRMwEQYDVQQLDApLZXJuZWxVc2VyMQ8wDQYDVQQD
-DAZjbGllbnQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASTbCS0Taz+os56KvG/
-xJGEq4P8rPQ/QWJLjS0t/132O9/XamI90yoHscLOI8AY7RUvnzCGIYOeQyxMuBbZ
-4LtYMAoGCCqGSM49BAMCA0kAMEYCIQDZGbOXz1ewIJ3yyVmxYpf7b3oOvtoGR3Hm
-MPrcRAK36AIhAKASzLijFUTbtuQXI6+IIE9XdLUXDQSsjJ5TfvTmehX8
+DAZjbGllbnQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQcFnNCI6lDtSbHUJfK
+mSVs4MCvalE1xSc8uxPmcfpEfFhJoXEl7NyNxpytRA5Sv2dtrW/Y6EAOmIUrod7d
+7+BqMAoGCCqGSM49BAMCA0kAMEYCIQDyoR6LmZiL5gD7tgNIza3q7GEzZXnLacLe
+N+JWjYohRAIhAKYY6aNFJUCr4SR/Z8p5z+5/A2E5bNxr1OBuhu6aSPPB
-----END CERTIFICATE-----
-----BEGIN EC PARAMETERS-----
BggqhkjOPQMBBw==
-----END EC PARAMETERS-----
-----BEGIN EC PRIVATE KEY-----
-MHcCAQEEIDRLYogqXDUrJAEf4YxkZyDdcwdPev2538AArwQP/nfXoAoGCCqGSM49
-AwEHoUQDQgAEk2wktE2s/qLOeirxv8SRhKuD/Kz0P0FiS40tLf9d9jvf12piPdMq
-B7HCziPAGO0VL58whiGDnkMsTLgW2eC7WA==
+MHcCAQEEIK0mO6ZZ7h8qTWeJWGFYeO3E/JENne5OTfwwDZt6AYfaoAoGCCqGSM49
+AwEHoUQDQgAEHBZzQiOpQ7Umx1CXypklbODAr2pRNcUnPLsT5nH6RHxYSaFxJezc
+jcacrUQOUr9nba1v2OhADpiFK6He3e/gag==
-----END EC PRIVATE KEY-----
diff --git a/jstests/libs/ecdsa-client.pem.digest.sha1 b/jstests/libs/ecdsa-client.pem.digest.sha1
index d7d0154a6974d..062dae859f0bb 100644
--- a/jstests/libs/ecdsa-client.pem.digest.sha1
+++ b/jstests/libs/ecdsa-client.pem.digest.sha1
@@ -1 +1 @@
-5C7CDA84DF0FAE87B0BF77C897DA1F70E6DCD84B
\ No newline at end of file
+DF341DE8AACD9B18F225CC5706428672D9877FA5
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-client.pem.digest.sha256 b/jstests/libs/ecdsa-client.pem.digest.sha256
index 6dc88a01efb65..ca899d9f6c9bf 100644
--- a/jstests/libs/ecdsa-client.pem.digest.sha256
+++ b/jstests/libs/ecdsa-client.pem.digest.sha256
@@ -1 +1 @@
-9523EFD9443380AAA6C9A618369E40E3C6D7E011D40C307749B89A0A332A2077
\ No newline at end of file
+CA4DA0541E28DDB2ED59124FBE24213BD610AEAF4AA1661A2D4D821C24607AC5
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-ocsp-responder.crt b/jstests/libs/ecdsa-ocsp-responder.crt
index 1e2b0c87f93be..1e9c469ec8e93 100644
--- a/jstests/libs/ecdsa-ocsp-responder.crt
+++ b/jstests/libs/ecdsa-ocsp-responder.crt
@@ -1,15 +1,16 @@
-----BEGIN CERTIFICATE-----
-MIICVjCCAfygAwIBAgIEEsVJKDAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
+MIICeDCCAh+gAwIBAgIEeHFXFDAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV
BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl
-c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjBsMQsw
+c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjBsMQsw
CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr
IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UE
-AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEAbp6SVU9kZym6alv
-ePcUYyze4KtdvTSCqrlCvhPQMJXkraG3DQdaADBGHUAhp6V6qFF19j0uP/rmgYv4
-SV82l6N+MHwwCQYDVR0TBAIwADAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEw
-HQYDVR0OBBYEFNZtZQGDw+WGi0SIQeORd5GjEQ25MAsGA1UdDwQEAwIF4DAnBgNV
-HSUEIDAeBggrBgEFBQcDAQYIKwYBBQUHAwIGCCsGAQUFBwMJMAoGCCqGSM49BAMC
-A0gAMEUCIQC4Uihq24KsC0qSx6OMaQyzVsbtuyL/sCvKtSAvfyG8wAIgEOdY/kVF
-7vfW+oZ5Rlo6i281FvIDJFCMOqViN8voUks=
+AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEPH7mFDsURQ4W7lPd
+RCM8VdiRf42EzconV51yGoDpcZSPu4KQXdi++WkxT9p0idyOGI8/0cnS1UnFIaG8
+nRel46OBoDCBnTAJBgNVHRMEAjAAMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA
+ATAdBgNVHQ4EFgQULQhUl0tD80eJzcBw9F3aBOMT8EwwCwYDVR0PBAQDAgXgMCcG
+A1UdJQQgMB4GCCsGAQUFBwMBBggrBgEFBQcDAgYIKwYBBQUHAwkwHwYDVR0jBBgw
+FoAUYHsV4MHvAhg0vQ0VRGmVIialeWwwCgYIKoZIzj0EAwIDRwAwRAIgA5qToMdE
+qMQE11sqxPxpwjvdxhdA0GrN8nDbGlo478cCIAZoUMvgFv/mtpi9VxEC5YMTNOq+
+iJpZyIwDck79Mss9
-----END CERTIFICATE-----
diff --git a/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha1 b/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha1
index cd03ff2284b56..8ebbeac1a43cd 100644
--- a/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha1
+++ b/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha1
@@ -1 +1 @@
-6B77279E773353A64975775754716390E0728404
\ No newline at end of file
+3C0AECADD99DE1FC5B7C5A81144C0ECB26BB3519
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha256 b/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha256
index bf65889e5aaa6..98cfab83b8872 100644
--- a/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha256
+++ b/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha256
@@ -1 +1 @@
-4B61C7A3DE928F0D8F3E40B80856FD43762DB8285BB1434C5D6E167C145544E2
\ No newline at end of file
+6D5091B5E09F162903FA1E55B83D002BF8BE56A4D9B0732410191219B7A077BA
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-ocsp-responder.key b/jstests/libs/ecdsa-ocsp-responder.key
index fc07aeee8e612..09996b46b40d6 100644
--- a/jstests/libs/ecdsa-ocsp-responder.key
+++ b/jstests/libs/ecdsa-ocsp-responder.key
@@ -1,5 +1,5 @@
-----BEGIN PRIVATE KEY-----
-MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgX827/4CkJz/ML3eC
-NCGnNJ/3r3GMfIkDXP7bTF8AD32hRANCAAQBunpJVT2RnKbpqW949xRjLN7gq129
-NIKquUK+E9AwleStobcNB1oAMEYdQCGnpXqoUXX2PS4/+uaBi/hJXzaX
+MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgqSXSuRsmntIYB4B3
+Unvh261XDac/D7atXcTLfvxHXPmhRANCAAQ8fuYUOxRFDhbuU91EIzxV2JF/jYTN
+yidXnXIagOlxlI+7gpBd2L75aTFP2nSJ3I4Yjz/RydLVScUhobydF6Xj
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ecdsa-server-ocsp-mustStaple.pem b/jstests/libs/ecdsa-server-ocsp-mustStaple.pem
index e8c538081e05f..da951a1291f0c 100644
--- a/jstests/libs/ecdsa-server-ocsp-mustStaple.pem
+++ b/jstests/libs/ecdsa-server-ocsp-mustStaple.pem
@@ -1,22 +1,23 @@
-----BEGIN CERTIFICATE-----
-MIICyjCCAnCgAwIBAgIEcvbiOTAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
+MIIC7TCCApOgAwIBAgIEOdtbgzAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV
BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl
-c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjBsMQsw
+c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjBsMQsw
CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr
IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UE
-AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEG1jvLcSrSPHYE2k9
-AYUo2AcBnxhQSlWjc6qGdskFPraSzpcAmgJe93mIeqoOkMhp5WTd2VKh1wNnJQ/z
-YMftz6OB8TCB7jAJBgNVHRMEAjAAMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA
-ATAdBgNVHQ4EFgQUvihhabxAnZflf5IfG7fSRtq1JsowCwYDVR0PBAQDAgWgMB0G
-A1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjBnBggrBgEFBQcBAQRbMFkwLQYI
-KwYBBQUHMAGGIWh0dHA6Ly9sb2NhbGhvc3Q6OTAwMS9wb3dlci9sZXZlbDAoBggr
-BgEFBQcwAYYcaHR0cDovL2xvY2FsaG9zdDo4MTAwL3N0YXR1czARBggrBgEFBQcB
-GAQFMAMCAQUwCgYIKoZIzj0EAwIDSAAwRQIhAPVGh3LTQEQnQTxd6Op1cxoK10GA
-Seev3F0madq2Upo0AiAib1IpBJbsUR6h9AAqQKqGUGKlMOvalRmb5fN9xjkl8A==
+AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE3TkE9MiEGYhA5i5n
+k7uiDcNaXeHhAtI6bM9I3wB81nqAtPwl/fL59GRCeR1el0Wu7W0c6xF+DWKIlxlU
+zhwGYaOCARMwggEPMAkGA1UdEwQCMAAwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/
+AAABMB0GA1UdDgQWBBQ56EENV6yAt07fnIptwE4qIgFuMDALBgNVHQ8EBAMCBaAw
+HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGcGCCsGAQUFBwEBBFswWTAt
+BggrBgEFBQcwAYYhaHR0cDovL2xvY2FsaG9zdDo5MDAxL3Bvd2VyL2xldmVsMCgG
+CCsGAQUFBzABhhxodHRwOi8vbG9jYWxob3N0OjgxMDAvc3RhdHVzMBEGCCsGAQUF
+BwEYBAUwAwIBBTAfBgNVHSMEGDAWgBRgexXgwe8CGDS9DRVEaZUiJqV5bDAKBggq
+hkjOPQQDAgNIADBFAiEAxHccgj+Ko4fg9oL9Mo+MpU6zU1/G8HQ2r5PuUPbE8OcC
+IAx08piNUhfVVt/zaZlfLX8gIXz8mmbmOBMbmMFjMx8N
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgh0QNI4XmNLg8gocs
-fl16geJLuecIxjpvW9MY2JZ3vmWhRANCAAQbWO8txKtI8dgTaT0BhSjYBwGfGFBK
-VaNzqoZ2yQU+tpLOlwCaAl73eYh6qg6QyGnlZN3ZUqHXA2clD/Ngx+3P
+MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgVRzSZON1ISgXftxt
+H8VyOuD6jTdxbnO203MqWdPFid2hRANCAATdOQT0yIQZiEDmLmeTu6INw1pd4eEC
+0jpsz0jfAHzWeoC0/CX98vn0ZEJ5HV6XRa7tbRzrEX4NYoiXGVTOHAZh
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha1 b/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha1
index a83437d1a90cb..9ac8ecfad77ab 100644
--- a/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha1
+++ b/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha1
@@ -1 +1 @@
-8405431F55B0E4B422C0421B7FADC3160C72A494
\ No newline at end of file
+52575E280DBCD17BE2BB16F99041B8B9B90FAD21
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha256 b/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha256
index 574e330333d61..ea4da518fb056 100644
--- a/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha256
+++ b/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha256
@@ -1 +1 @@
-27893987C39D98357BBC3758EDBB7157E78E000BD373128ED38346E7F176C90F
\ No newline at end of file
+6EC581765A649E6D2D8E283E1181A7E32327FA9E9C68159E280D6675A4EE6422
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-server-ocsp.pem b/jstests/libs/ecdsa-server-ocsp.pem
index 2fe18cc70f51e..065a27ec0b1bf 100644
--- a/jstests/libs/ecdsa-server-ocsp.pem
+++ b/jstests/libs/ecdsa-server-ocsp.pem
@@ -1,22 +1,23 @@
-----BEGIN CERTIFICATE-----
-MIICtzCCAl2gAwIBAgIEQVAUIjAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
+MIIC2TCCAn6gAwIBAgIEc3mdQDAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV
BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl
-c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjBsMQsw
+c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjBsMQsw
CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr
IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UE
-AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEJJWlKXhZYhkGm7KD
-/4s5zG3FTZqxCkbF7LVTKUyEWU2oVE/X555ANKo7F4sNTh4kJYH9R9BNWm/ckr9F
-F4lGRKOB3jCB2zAJBgNVHRMEAjAAMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA
-ATAdBgNVHQ4EFgQUBA+bPOsot328qMyJD1GNNsM5MpEwCwYDVR0PBAQDAgWgMB0G
+AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGj966QCvYqydgxz5
+fX5JJZRb3UwGZiczFwBj0iTrpI6c1ISOCzWrbq3v0UQUlW38gE11/+M7iPfJ8z+K
+HLra16OB/zCB/DAJBgNVHRMEAjAAMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA
+ATAdBgNVHQ4EFgQUsbX4B4hzBcFM7ZQj66tB4VZZRG0wCwYDVR0PBAQDAgWgMB0G
A1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjBnBggrBgEFBQcBAQRbMFkwLQYI
KwYBBQUHMAGGIWh0dHA6Ly9sb2NhbGhvc3Q6OTAwMS9wb3dlci9sZXZlbDAoBggr
-BgEFBQcwAYYcaHR0cDovL2xvY2FsaG9zdDo4MTAwL3N0YXR1czAKBggqhkjOPQQD
-AgNIADBFAiEA94hM6CSWcr65vB6E2+WaFe0MLzYm+tEWPlLedT4BQekCIDRk8Ww+
-jUPFWz7Pz2lOqmH/FZWxVn1GEbeDHZZljk3P
+BgEFBQcwAYYcaHR0cDovL2xvY2FsaG9zdDo4MTAwL3N0YXR1czAfBgNVHSMEGDAW
+gBRgexXgwe8CGDS9DRVEaZUiJqV5bDAKBggqhkjOPQQDAgNJADBGAiEA+W11lMP6
+iSS+01h1eAHPQzutZIdZ76rTCnMt0W82YxICIQC/UHjCg/YYEB1b+g+A7GR4TWjq
+47Ex/m6/+Vs6918z2Q==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgOEISSH6wYpjSExnT
-fpQW8Dan60vNlFYIL1Q2xFPFzluhRANCAAQklaUpeFliGQabsoP/iznMbcVNmrEK
-RsXstVMpTIRZTahUT9fnnkA0qjsXiw1OHiQlgf1H0E1ab9ySv0UXiUZE
+MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgSoduw+gdLkgClffE
+IFmoF0pj9aGdT1q8RcI05flCQzahRANCAAQaP3rpAK9irJ2DHPl9fkkllFvdTAZm
+JzMXAGPSJOukjpzUhI4LNature/RRBSVbfyATXX/4zuI98nzP4ocutrX
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ecdsa-server-ocsp.pem.digest.sha1 b/jstests/libs/ecdsa-server-ocsp.pem.digest.sha1
index a2ce1449f8009..f38fb8b9960a5 100644
--- a/jstests/libs/ecdsa-server-ocsp.pem.digest.sha1
+++ b/jstests/libs/ecdsa-server-ocsp.pem.digest.sha1
@@ -1 +1 @@
-2D09B949B8BA53416B40B64C193F1962E5BE3A0D
\ No newline at end of file
+6254B29BE13CB7F86C8A93C029B89D76DB705525
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-server-ocsp.pem.digest.sha256 b/jstests/libs/ecdsa-server-ocsp.pem.digest.sha256
index 048ddee7588e9..9fe4092b9b369 100644
--- a/jstests/libs/ecdsa-server-ocsp.pem.digest.sha256
+++ b/jstests/libs/ecdsa-server-ocsp.pem.digest.sha256
@@ -1 +1 @@
-98BC40243CB5D19B7DEB912AF02598F668DE83B2CD77EFCAA69E3C5A07E26B01
\ No newline at end of file
+FD73A26040F9EB153BC74560125F89547F88CD83753FE2904E81F18C49A0D6C7
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-server.pem b/jstests/libs/ecdsa-server.pem
index 9a5dfadfc7b10..194b98cf5a0cb 100644
--- a/jstests/libs/ecdsa-server.pem
+++ b/jstests/libs/ecdsa-server.pem
@@ -4,25 +4,26 @@
# Server certificate for ECDSA tree.
-----BEGIN CERTIFICATE-----
-MIICTTCCAfKgAwIBAgIEVfZqODAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
+MIICcDCCAhWgAwIBAgIERpwkVDAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER
MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV
BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl
-c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjBsMQsw
+c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjBsMQsw
CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr
IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UE
-AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEkIPr8LJPhlw1aQyN
-6ZTN5zvdOVMBhKR66DFcyLsSjZTwdaYgrTdoHuRg4cY/mDfOjykn93QaXXiYELWO
-FLGxFKN0MHIwCQYDVR0TBAIwADAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEw
-HQYDVR0OBBYEFIQvX1T4/vSJNRIFuL1wFsP3e9SEMAsGA1UdDwQEAwIFoDAdBgNV
-HSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwCgYIKoZIzj0EAwIDSQAwRgIhAKWy
-vhMAorPe5oCXXM5bCHIyoq8lis9e8jU3ewBTnZ6+AiEA2hx0vQXogjX1T8zqn/2K
-XcOWOCaP5UWWsZU9F3tOtJg=
+AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbcTv1kEFjReoKnQr
+kt6NUKFKouB/Fq8czR9xGNsEflTiqzC1gUnWXWCSZAGqsdNml4XnviRxHxu+FZsg
+MciDD6OBljCBkzAJBgNVHRMEAjAAMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA
+ATAdBgNVHQ4EFgQUyteUn3L8KvqutJkC1FiSPa5DHbYwCwYDVR0PBAQDAgWgMB0G
+A1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAfBgNVHSMEGDAWgBTmgV6I/ikY
+ioQ9vv2qei0uiY5guTAKBggqhkjOPQQDAgNJADBGAiEAriAXYcaueuIJitGKcuz1
+akx+Cp6KPL62V6hxCytuyqECIQC4SrxeMhrkvrfYE+HwSpSoM4adoeubvWO/Trcq
+f/3c/g==
-----END CERTIFICATE-----
-----BEGIN EC PARAMETERS-----
BggqhkjOPQMBBw==
-----END EC PARAMETERS-----
-----BEGIN EC PRIVATE KEY-----
-MHcCAQEEIFpM+Xqp9Ga+tlNOh4qhoqooGCAI4dTgPn7bzOVjy/B3oAoGCCqGSM49
-AwEHoUQDQgAEkIPr8LJPhlw1aQyN6ZTN5zvdOVMBhKR66DFcyLsSjZTwdaYgrTdo
-HuRg4cY/mDfOjykn93QaXXiYELWOFLGxFA==
+MHcCAQEEIJrVwEvvtjj6vLrCDXK00GtzS3/sDyMNnr90GR7rdv9FoAoGCCqGSM49
+AwEHoUQDQgAEbcTv1kEFjReoKnQrkt6NUKFKouB/Fq8czR9xGNsEflTiqzC1gUnW
+XWCSZAGqsdNml4XnviRxHxu+FZsgMciDDw==
-----END EC PRIVATE KEY-----
diff --git a/jstests/libs/ecdsa-server.pem.digest.sha1 b/jstests/libs/ecdsa-server.pem.digest.sha1
index c67f3477ce1d3..6c2b33a4d78b9 100644
--- a/jstests/libs/ecdsa-server.pem.digest.sha1
+++ b/jstests/libs/ecdsa-server.pem.digest.sha1
@@ -1 +1 @@
-3124F81F8BE74AFE7FB3EEC1CFA17BDF48A97A68
\ No newline at end of file
+CD4C1D75223860723FA2C288418E9CB6F28C740D
\ No newline at end of file
diff --git a/jstests/libs/ecdsa-server.pem.digest.sha256 b/jstests/libs/ecdsa-server.pem.digest.sha256
index ea1854935ae70..0ae974bb709f0 100644
--- a/jstests/libs/ecdsa-server.pem.digest.sha256
+++ b/jstests/libs/ecdsa-server.pem.digest.sha256
@@ -1 +1 @@
-6AE713FACA4950A79F38272F40B72DF973206468DD88B6F528C7C2C236AE03B4
\ No newline at end of file
+BBD9CEDC767FDB365178E0B4FC96ED56F91C8CBA2FA5127B93C8DE00AAFDE53D
\ No newline at end of file
diff --git a/jstests/libs/expired.pem b/jstests/libs/expired.pem
index 4106ff57118cc..9349fddc0e1c5 100644
--- a/jstests/libs/expired.pem
+++ b/jstests/libs/expired.pem
@@ -3,52 +3,52 @@
#
# A certificate which has passed its expiration date.
-----BEGIN CERTIFICATE-----
-MIIDkjCCAnqgAwIBAgIES0ioJDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDkjCCAnqgAwIBAgIENQFz0TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjExMDExMDQxMzA0WhcNMjIwMTIzMDgxMzA0WjBtMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwMjIwMjA0MTU5WhcNMjMwNjA1MDA0MTU5WjBtMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEQMA4GA1UEAwwH
-ZXhwaXJlZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMOX7aXgIpMi
-1lLqNmsqmjg6N+hg0Mf63N7ZCPqwOSaGUo4lqfgR27RdQR9SI5JIDpFu0XUWpDQi
-+w+eH7i45VQJ3DE62bB1dW1WGnZOWOTH6uEfEX/kGjJBYjUInliERyvqINiyeAA+
-deqKIjzlny8APqzSgr2vnffKRxLzjN2JV4CLWPmXzQ0p8E6DmXgNHO3gcEviwqDg
-OP6PEdWAAbPQlutk4P34LYdnvDoQMtaTd8s7wQYxoU7nXy+iRh1ZOQB6Dj1nMCdX
-9sblA0cO1zVNRAJmlQBVlVmB0pfntRvs5OWywoXN7uVy9WI3dC4qU8k2APsnnWur
-Y1PYAGalCjUCAwEAAaMzMDEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwGgYDVR0RBBMw
-EYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQDetGkeZ2NsmsRm
-y64CYd9bq1QRi72Px0lnL/uVwX6v/LQBX/2TnkFIXJ/ICW0FyjZppsd4inLoXEH3
-guOfTrWlkD8OPNZmdNzMZf7i1JBtzKQVSyoX3J8zllPiYdfmWwUuqd2GnBwebzo7
-Sz5zVPhb0eQR+kdWT0tA3yzs9Ox+6MyDeWsHN4aBEDfezi170t2Ax/HVU86HA58I
-V8wPeXtRtN97bQYcOlGYElRA0VKygyAC2nVkxl1PjSKU7SEYTeHVp0qYCy2NhSA6
-n1DI3fz+ZccmAfKAxqy9ggvHmgnhRFoAtFeMQmAWB74/do2ZHCx3LE/EAv7sDXTk
-2SqCkSng
+ZXhwaXJlZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOpiZhK3zhWC
+Rmo714SuRmGvB+vsGtkc8UIBwKoA7PrcvveyQtBobG1jgKZAwLVGb7MAfzgvZB8d
+QWJpoHfjHMCKxW8PEvOi9RgpiMfAfMDVGLCSliGQ4TlP3PQCQHbVi2rjlYyBIL5D
+/JubBM6XpZrXQSXFNTzrOyWuMrqtgAICfoSTZGQXj3v5sqcVjb1+pN0IeuHFiTP6
+UPZ5JLGy5VxU2I3XZ2yxh/iBJWNSiL/38TGJRb5wXEQ7hxrU8guYQvtIqmMxCOuJ
+6qOMrPwsgQxwLfyICC6V/hCQJqo9aJAz+F6vrQ0DHe3stx9gVe9NvRXPQg9jnFQ+
+fJbYYSkiymMCAwEAAaMzMDEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwGgYDVR0RBBMw
+EYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAUrnrVrJn7Usuc
+IKdK7FFJ26PAkxeRUPZIluwztdW/mQYMFctnaGgxkUr+iRQAfk8mwUikpsp2PpUL
+oj+MvZwxaCzgRILdP5soMW+hfr9tpQRXHsMKStXxPLp9xaWTcRuVpuKO/cM1kBG0
+XiAupsf4chXrsWy2lJhUMTzbMcB2hhvnW9d5J8ducXb34WHB9E0zsvtVQ4t4zMoB
+ulNzuUUu+A1Nh6Xb/SQxAmPRrwRV4DCE37Gp05EIPHiaatG5S8/oQoIqIXsweyQC
+oJo2mD4a+cNsKRqzYldtJEjrkMVDZ/tH4XCUaFFlCvXTh16Y5zvWj5eynkfCnOVe
+T7xIog+S
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDDl+2l4CKTItZS
-6jZrKpo4OjfoYNDH+tze2Qj6sDkmhlKOJan4Edu0XUEfUiOSSA6RbtF1FqQ0IvsP
-nh+4uOVUCdwxOtmwdXVtVhp2Tljkx+rhHxF/5BoyQWI1CJ5YhEcr6iDYsngAPnXq
-iiI85Z8vAD6s0oK9r533ykcS84zdiVeAi1j5l80NKfBOg5l4DRzt4HBL4sKg4Dj+
-jxHVgAGz0JbrZOD9+C2HZ7w6EDLWk3fLO8EGMaFO518vokYdWTkAeg49ZzAnV/bG
-5QNHDtc1TUQCZpUAVZVZgdKX57Ub7OTlssKFze7lcvViN3QuKlPJNgD7J51rq2NT
-2ABmpQo1AgMBAAECggEAa1SkoUCkWG9mgutpX1dqd25jHY57IzIjH9qjVcV8JwrQ
-pJZAdhN+p7QAt2pAgNYvwFi6cfExvDZx5LkVmS1FEt+ySAWOTYm3HX+BV+hYWXrn
-BhAhbzfBtFAyzv1ivxofYBbDXYZZ2XBtDn4smZQ7zPPqcLW37tU+7ym95MVnSG1O
-c383X+hnsLyMo10lQdnD+0GLGVoVtOEQkX3vDM4ckejwsn3GUsnsasXl4pwESG/l
-e/lSqaAE5zv0jJ4Vd+pvmFA/QxnnbRzWFr5YAfZTq84VG7Ila9pAxQKsx+5hVzKH
-eMS+BrjkG3ZjJihRAP1BYl8+uZLoa9fAEb/K5aGkBQKBgQDshRWLe1OVr4GrwGmA
-M828l+ASvQgZr8iZfFC5sbB8CeDz5/6IFLXSLc3oAcUAdJzEq7jMLaqwvyNy4aeT
-vOYS0K94njk3l+mGtY+H1NYdzK9D306zSjIeUqA9efUCEm/4v323IpMVCF4s5MMU
-gLVQZ8hNIJKjjx+Nm1AMTWl0AwKBgQDTs+3w+kraf3JfG4EBYI6w5vNCBlJHDni+
-ZEOmmd5cgxR/sEZJiAbfsNs4pnHqKboQxFU+lgH2tcktkcN4rpFvN2bt865l2XCX
-3NqR8gJbN3kroAi7m0ah9p6ZfZGYT3S3nsjjeKZaz7dkZNtrP/zxrC6IbsamAEeO
-NnFa4rkfZwKBgQCN3UFP+TfoR23io7VkBS542Su4cZODPLF4hl0xFEhEDfTUtykv
-XCS2nWyspSsRm+BQAqQEK7v+6ZaMmJbYAWyKiotFMLonA0I533rowRtwok3Zyv48
-gdtP3sVoOldf18k7jTgNeXp5GhzKlqgACzc1tBorUMdDvVvypt67kk8XGwKBgGw8
-qyOwpEKwNGaB5mQw3ON/QNH572ka8grNsnTkaHa+IeXi1xzTKkiF4J9HIijJqrLV
-3ouAIILlkHT2+IykHAFTvxFbrEewc0uBhFJn/GJsq0vtp2lbyzIVqCLru1u3DAKx
-cR4z2kHFv4rZsgFUltts3+GrWW8X533DQhNopaNzAoGAbekygXWWCXX6WeGhvshe
-JTWf96Blmy7ZLDlOy7D7Ns3536ALcl/tPtL2wOqmz/9rBTHKX/v7k+yaaysI3vn4
-+79/ig8mSqEA1nHgA3JIILvV3C3r8RKoGsrw/6LZDynANrcHiiR+aHlrdaHsh2o1
-0FQr/GIVnWR/Z6gaB39T4xM=
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDqYmYSt84VgkZq
+O9eErkZhrwfr7BrZHPFCAcCqAOz63L73skLQaGxtY4CmQMC1Rm+zAH84L2QfHUFi
+aaB34xzAisVvDxLzovUYKYjHwHzA1RiwkpYhkOE5T9z0AkB21Ytq45WMgSC+Q/yb
+mwTOl6Wa10ElxTU86zslrjK6rYACAn6Ek2RkF497+bKnFY29fqTdCHrhxYkz+lD2
+eSSxsuVcVNiN12dssYf4gSVjUoi/9/ExiUW+cFxEO4ca1PILmEL7SKpjMQjrieqj
+jKz8LIEMcC38iAgulf4QkCaqPWiQM/her60NAx3t7LcfYFXvTb0Vz0IPY5xUPnyW
+2GEpIspjAgMBAAECggEAHXON1e5ItAUxqempFB/98gpXWXrKiNuNKTklJ3eET6Vr
+TZ6VrLBiZ4BoSvu5+01Ffd2jHsE9uHcpnx8crdPY6gzF8EWZoQmp+IZHjKoQQv7D
+4nQY71DQLC5v2i1qQkRlB2JfiU80eRP76uWYvgjqI0HHCuLz5Jq+TObNsw1YF2hR
+RCePiXwuIt/avNnZgb9WOdqSwd24jF6TCmkSMGKPUuJ0cjWHkLa1sLupxePiEC2h
+XmGtLC5k4lHUhom6CVmIV/Hpu7mooWDWTLRxSOdyAKI73s2zQb+c7VoLU9ShN6HR
+N+RqFzXUDL3SfZdbEokU1mr0WJyvY+E6obgUlqFGCQKBgQD1SDcvQd6vHmxn5+2E
+DXdfISHc/KKszkv0I9QHGktv8qAS5bGJadLWVmBpWmhxjN3M6hfiGTmt61c7NFNm
+haiXGDSlfISZVuch29Ze5AcgUGE1tXOaWHcHp+hj5uWm7glFMjW3ZpmjXoe20oMi
+j6uFFZAzz1SjGK1HP+nqbvYqhQKBgQD0oEdTqNY/7LUAfUvWt74oMhFveAGtq786
+4GHDTRVWGDwFRVrQqtrDrlWBE7ZVH2cQid/C5lqdicTnUrYgRXHHgiQ5sp8+dlfA
+Lu2rx3vtLrMHyfAGRbITUhZQxYUsONfzkEbKi1aB+OBIyCjAis744RKb3TuFWsJX
+Oc/lV9nZxwKBgQDuRp4dUV7zPZnQArVwb+iDyFruI5ogRzPv0pJZ6Ahakxc/5FTo
+iZ3gy/5Suhn2HQMm2k4jPaATvSh7giR21ublCQWzsVfa4locQURp4BiwIoWcuE26
+JbBUBqAx1I9J77mft9wI8ynTU7SBPNVZULotybnRKtssqX9DVOp27B5FRQKBgG1k
+e2ELAHxnou2MGmzzkAKzmR5q4P1D72kpzC4k+2Sbw8Nrp1fm/5tAC5aENSZNu64m
+qbyjfGQ0pqv3TeCSjXX1WbO/j/zDuSwFEF2gAGgpWQAjqsVzR1XigH4wRgRnixKt
+MysCMnY+0DVb5PVtXW7oX4T54tMYqg2Nmc1qgs4jAoGBALizxO2B+2riBZ/7iuOL
+WOdrCEPZFq5+q6lJVULu5SeSsbaTbng9jdYS+XYgRTj0uFz2GZtNFF9SzKBpnQeE
+GiYokGidq0PduPEnq9d1KzF9gKCBprxa5jxogkSqTzbcecMwuXE904rdr+JXwStL
+d8GuJjHcCJoZW9lGvVU/5YU+
-----END PRIVATE KEY-----
diff --git a/jstests/libs/expired.pem.digest.sha1 b/jstests/libs/expired.pem.digest.sha1
index 4b8f78e141f25..fcebeaa05ee9f 100644
--- a/jstests/libs/expired.pem.digest.sha1
+++ b/jstests/libs/expired.pem.digest.sha1
@@ -1 +1 @@
-5325BDD0B9A3417A1C07B947C83168A1C189D1E4
\ No newline at end of file
+8650F73BE93E97609540AD60B055C78721CDA0D4
\ No newline at end of file
diff --git a/jstests/libs/expired.pem.digest.sha256 b/jstests/libs/expired.pem.digest.sha256
index e38411a855f89..838b2a6583de5 100644
--- a/jstests/libs/expired.pem.digest.sha256
+++ b/jstests/libs/expired.pem.digest.sha256
@@ -1 +1 @@
-7B1A1353749D953FB0A2A2FA036B2C44BAFB3AE1EFCFA9D3A9DB50ADDCA337C7
\ No newline at end of file
+D815E7BA03F1220118893FFAEFD21C32C487EACB660E21AE9D570B0F9D025FA8
\ No newline at end of file
diff --git a/jstests/libs/fail_point_util.js b/jstests/libs/fail_point_util.js
index 1c3a698325b22..008c7c40b4620 100644
--- a/jstests/libs/fail_point_util.js
+++ b/jstests/libs/fail_point_util.js
@@ -3,6 +3,7 @@
*/
var configureFailPoint;
+var configureFailPointForRS;
var kDefaultWaitForFailPointTimeout;
(function() {
@@ -15,8 +16,10 @@ if (configureFailPoint) {
kDefaultWaitForFailPointTimeout = 5 * 60 * 1000;
configureFailPoint = function(conn, failPointName, data = {}, failPointMode = "alwaysOn") {
- const res = assert.commandWorked(
- conn.adminCommand({configureFailPoint: failPointName, mode: failPointMode, data: data}));
+ const res = sh.assertRetryableCommandWorkedOrFailedWithCodes(() => {
+ return conn.adminCommand(
+ {configureFailPoint: failPointName, mode: failPointMode, data: data});
+ }, "Timed out enabling fail point " + failPointName);
return {
conn: conn,
@@ -26,11 +29,13 @@ configureFailPoint = function(conn, failPointName, data = {}, failPointMode = "a
// Can only be called once because this function does not keep track of the
// number of times the fail point is entered between the time it returns
// and the next time it gets called.
- assert.commandWorked(conn.adminCommand({
- waitForFailPoint: failPointName,
- timesEntered: this.timesEntered + timesEntered,
- maxTimeMS: maxTimeMS
- }));
+ sh.assertRetryableCommandWorkedOrFailedWithCodes(() => {
+ return conn.adminCommand({
+ waitForFailPoint: failPointName,
+ timesEntered: this.timesEntered + timesEntered,
+ maxTimeMS: maxTimeMS
+ });
+ }, "Timed out waiting for failpoint " + failPointName);
},
waitWithTimeout: function(timeoutMS) {
// This function has three possible outcomes:
@@ -38,17 +43,40 @@ configureFailPoint = function(conn, failPointName, data = {}, failPointMode = "a
// 1) Returns true when the failpoint was hit.
// 2) Returns false when the command returned a `MaxTimeMSExpired` response.
// 3) Otherwise, this throws for an unexpected error.
- let res = assert.commandWorkedOrFailedWithCode(conn.adminCommand({
- waitForFailPoint: failPointName,
- timesEntered: this.timesEntered + 1,
- maxTimeMS: timeoutMS
- }),
- ErrorCodes.MaxTimeMSExpired);
- return res["ok"] === 1;
+ let res = sh.assertRetryableCommandWorkedOrFailedWithCodes(() => {
+ return conn.adminCommand({
+ waitForFailPoint: failPointName,
+ timesEntered: this.timesEntered + 1,
+ maxTimeMS: timeoutMS
+ });
+ }, "Timed out waiting for failpoint " + failPointName, [ErrorCodes.MaxTimeMSExpired]);
+ return res !== undefined && res["ok"] === 1;
},
off: function() {
- assert.commandWorked(
- conn.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+ sh.assertRetryableCommandWorkedOrFailedWithCodes(() => {
+ return conn.adminCommand({configureFailPoint: failPointName, mode: "off"});
+ }, "Timed out disabling fail point " + failPointName);
+ }
+ };
+};
+
+configureFailPointForRS = function(conns, failPointName, data = {}, failPointMode = "alwaysOn") {
+ conns.forEach((conn) => {
+ sh.assertRetryableCommandWorkedOrFailedWithCodes(() => {
+ return conn.adminCommand(
+ {configureFailPoint: failPointName, mode: failPointMode, data: data});
+ }, "Timed out setting failpoint " + failPointName);
+ });
+
+ return {
+ conns: conns,
+ failPointName: failPointName,
+ off: function() {
+ conns.forEach((conn) => {
+ sh.assertRetryableCommandWorkedOrFailedWithCodes(() => {
+ return conn.adminCommand({configureFailPoint: failPointName, mode: "off"});
+ }, "Timed out disabling fail point " + failPointName);
+ });
}
};
};
diff --git a/jstests/libs/feature_flag_util.js b/jstests/libs/feature_flag_util.js
index 12403b8fed8c5..4e75a65298aa9 100644
--- a/jstests/libs/feature_flag_util.js
+++ b/jstests/libs/feature_flag_util.js
@@ -1,11 +1,9 @@
-"use strict";
-
load("jstests/libs/fixture_helpers.js");
/**
* Utilities for feature flags.
*/
-var FeatureFlagUtil = (function() {
+export var FeatureFlagUtil = (function() {
// A JS attempt at an enum.
const FlagStatus = {
kEnabled: 'kEnabled',
@@ -51,7 +49,7 @@ var FeatureFlagUtil = (function() {
} else {
// Some db-like objects (e.g ShardedClusterFixture) have a getSiblingDB method
// instead of getDB, use that here to avoid an undefined error.
- setConn(db.getSiblingDB(db.defaultDB));
+ setConn(db.getSiblingDB(db.getMongo().defaultDB));
}
}
diff --git a/jstests/libs/fsm_serial_client.js b/jstests/libs/fsm_serial_client.js
index 6d6c0a18f4e49..97fc766b5fce8 100644
--- a/jstests/libs/fsm_serial_client.js
+++ b/jstests/libs/fsm_serial_client.js
@@ -15,13 +15,13 @@ var denylist = workloadDenylist.map(function(file) {
return workloadDir + '/' + file;
});
-runWorkloadsSerially(workloadList.filter(function(file) {
+await runWorkloadsSerially(workloadList.filter(function(file) {
return !Array.contains(denylist, file);
}),
- {},
- {dbNamePrefix: dbNamePrefix},
- {
- keepExistingDatabases: true,
- dropDatabaseDenylist: fsmDbDenylist,
- validateCollections: validateCollectionsOnCleanup
- });
+ {},
+ {dbNamePrefix: dbNamePrefix},
+ {
+ keepExistingDatabases: true,
+ dropDatabaseDenylist: fsmDbDenylist,
+ validateCollections: validateCollectionsOnCleanup
+ });
diff --git a/jstests/libs/ftdc.js b/jstests/libs/ftdc.js
index 105769c5c93c0..b0326c1f14b7b 100644
--- a/jstests/libs/ftdc.js
+++ b/jstests/libs/ftdc.js
@@ -19,7 +19,7 @@ function setParameter(adminDb, obj) {
/**
* Verify that getDiagnosticData is working correctly.
*/
-function verifyGetDiagnosticData(adminDb) {
+function verifyGetDiagnosticData(adminDb, logData = true) {
// We need to retry a few times if run this test immediately after mongod is started as FTDC may
// not have run yet.
var foundGoodDocument = false;
@@ -42,8 +42,9 @@ function verifyGetDiagnosticData(adminDb) {
assert(data.hasOwnProperty("end"), "does not have 'end' in '" + tojson(data) + "'");
foundGoodDocument = true;
-
- jsTestLog("Got good getDiagnosticData: " + tojson(result));
+ if (logData) {
+ jsTestLog("Got good getDiagnosticData: " + tojson(result));
+ }
}
}
diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js
index c45cec7e813fb..e6f8d24ef30b8 100644
--- a/jstests/libs/geo_near_random.js
+++ b/jstests/libs/geo_near_random.js
@@ -1,6 +1,6 @@
GeoNearRandomTest = function(name, dbToUse) {
this.name = name;
- this.db = (dbToUse || db);
+ this.db = (dbToUse || globalThis.db);
this.t = this.db[name];
this.reset();
print("Starting getNear test: " + name);
diff --git a/jstests/libs/golden_test.js b/jstests/libs/golden_test.js
index f3e218f17954f..b9ef72a0e0c8b 100644
--- a/jstests/libs/golden_test.js
+++ b/jstests/libs/golden_test.js
@@ -1,5 +1,4 @@
-
-function tojsonOnelineSortKeys(x) {
+export function tojsonOnelineSortKeys(x) {
let indent = " ";
let nolint = true;
let depth = undefined;
@@ -11,23 +10,10 @@ function tojsonOnelineSortKeys(x) {
// - Discards the field ordering, by recursively sorting the fields of each object.
// - Discards the result-set ordering by sorting the array of normalized documents.
// Returns a string.
-function normalize(result) {
+export function normalize(result) {
return result.map(d => tojsonOnelineSortKeys(d)).sort().join('\n') + '\n';
}
-// Override print to output to both stdout and the golden file.
-// This affects everything that uses print: printjson, jsTestLog, etc.
-print = (() => {
- const original = print;
- return function print(...args) {
- // Imitate GlobalInfo::Functions::print::call.
- const str = args.map(a => a == null ? '[unknown type]' : a).join(' ');
- _writeGoldenData(str);
-
- return original(...args);
- };
-})();
-
// Takes an array or cursor, and prints a normalized version of it.
//
// Normalizing means ignoring:
@@ -35,7 +21,7 @@ print = (() => {
// - order of documents in the array/cursor.
//
// If running the query fails, this catches and prints the exception.
-function show(cursorOrArray) {
+export function show(cursorOrArray) {
if (!Array.isArray(cursorOrArray)) {
try {
cursorOrArray = cursorOrArray.toArray();
@@ -52,6 +38,6 @@ function show(cursorOrArray) {
// This function should be called from the suite definition, so that individual tests don't need
// to remember to call it. This function should not be called from any libs/*.js file, because
// it's surprising if load() has side effects (besides defining JS functions / values).
-function beginGoldenTest() {
+export function beginGoldenTest() {
_openGoldenData(jsTestName());
}
diff --git a/jstests/libs/intermediate-ca-chain.pem b/jstests/libs/intermediate-ca-chain.pem
index 4d94be279ee59..c5a549e7e80e4 100644
--- a/jstests/libs/intermediate-ca-chain.pem
+++ b/jstests/libs/intermediate-ca-chain.pem
@@ -5,45 +5,45 @@
# Certificate from ca.pem
-----BEGIN CERTIFICATE-----
-MIIDeTCCAmGgAwIBAgIEe9SskzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDeTCCAmGgAwIBAgIESt5aGjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQyWhcNMjQwNDMwMjE1OTQyWjB0MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM1WhcNMjUwOTEwMTQyODM1WjB0MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO
-S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDf
-vZIt82obTHnc3iHgUYSc+yVkCHyERF3kdcTTFszDbN9mVPL5ZkH9lIAC3A2rj24T
-pItMW1N+zOaLHU5tJB9VnCnKSFz5CHd/KEcLA3Ql2K70z7n1FvINnBmqAQdgPcPu
-Et2rFgGg3atR3T3bV7ZRlla0CcoAFl/YoDI16oHRXboxAtoAzaIwvS6HUrOYQPYq
-BLGt00Wws4bpILk3b04lDLEHmzDe6N3/v3FgBurPzR2tL97/sJGePE94I833hYG4
-vBdU0Kdt9FbTDEFOgrfRCisHyZY6Vw6rIiWBSLUBCjtm2vipgoD0H3DvyZLbMQRr
-qmctCX4KQtOZ8dV3JQkNAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
-hvcNAQELBQADggEBAJnz4lK9GiCWhCXIPzghYRRheYWL8nhkZ+3+oC1B3/mGEf71
-2VOdND6fMPdHinD8jONH75mOpa7TanriVYX3KbrQ4WABFNJMX9uz09F+0A2D5tyc
-iDkldnei+fiX4eSx80oCPgvaxdJWauiTsEi+fo2Do47PYkch9+BDXT9F/m3S3RRW
-cia7URBAV8Itq6jj2BHcpS/dEqZcmN9kGWujVagcCorc0wBKSmkO/PZIjISid+TO
-Db2g+AvqSBDU0lbdP7NXRSIxvZejDz4qMjcpSbhW9OS2BCYZcq5wgH2lwYkdPtmX
-JkhxWKwsW11WJWDcmaXcffO3a6lDizxyjnTedoU=
+S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCb
+k/WPIqqv46Nv9FeodWiPtx4/v3XZJwGxTk3JEje2CLjeVjU0q6OZoofP1wgSIZSh
+iO2o9iDC5O1Aedop0i+wqe9dMcn34O1K5aM4ff8c4orfBe0xqyvE3cJx4BeSTZ4n
+NY00x9PkCcoq98SoU7S9vkJq+AxUzUII34GQ4xCeaM7+g43PpGo5KFDwrzI/VUJX
+qaeRNXS0/j8Wwp7Gv8L1a+ZGlxrgpXTJLGamhtkWyVEWSpgcc5suA0qSwvkAE1KX
+5aJoBUDL22fLRhs91xNFDUYTAvkG8X4gM0f8lBL24+nbOBkOLdpqSZZ+dk59JKHD
+TFGBx0p17I1g0xjWNjMVAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
+hvcNAQELBQADggEBAIwWNyaQhZglJyKMIAUAwlvBL5REA99bua06xWfJwdmdlci9
+Bb6MgQzVk5K68rsNlcL0ma+Ri5FfU+j7gsYZh4pILYb9xqFxiKX7bxMZv99LR8Mi
+0EImM7gz3S579qYBXWd4V6/1G864qln8neHv+X3MF/wk3O9IYqepWsC3xDRos1Zv
+xQfb37Ol4pcHtue4wHXr5TV8+KPcUusfNcftnpsEHyEUHqPORdHB7xRpfhosRYvL
+7WwMXNseuyHFcdA/rEhUVsca+SUeOMIW+8euuU/as3ZaEpv1ZmpHEYXHb2SlS6W+
+gTzUOtNXsKVDrm9uEcUHytp+xvp9l9NNM/IRGGA=
-----END CERTIFICATE-----
# Certificate from intermediate-ca.pem
-----BEGIN CERTIFICATE-----
-MIIDdzCCAl+gAwIBAgIERt21mjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDdzCCAl+gAwIBAgIEe6nR6DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB1MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ2WhcNMjUwOTEwMTQyODQ2WjB1MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEYMBYGA1UEAwwP
SW50ZXJtZWRpYXRlIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
-uLEuQhQ7IQImAIKUMANKaR+4/WLf0F03p5RDlv/TCNdEXTLws03x7bppdv4Y8r6+
-oChy+8rtyA2ckblb0z0OeMlsJY5a04eUhrZYeG4OXn1QuvUqfXl++oBlHnWUD3xG
-3v9oPKMxGf9nr6JJXBCeG3owLR9Lbr3QS6Pvz9WwNZGpUVDm/QQcKvbGHmB9fE7/
-RM6IgxtagZlug5WUCTT08tsLfb89UQchCAjO9eZvDcENofXcnsJWImJdTYDlquMM
-DB54R9cqoLtDV9NiPVYsjCQ1BgXYMxeG0K/T1rWQY4uB132Y3oFy3RxaDT1BqdAO
-O6BOo1AZNyIYIPbt0+Rp3QIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3
-DQEBCwUAA4IBAQCSOAjf6Qh0JnAOK4TqPYt2VG8HEmumxKk/HvIKrk9gbBc6RoVi
-Z1M/gxRz82lnmJvIPbsJk4AmTsw8Pnech8Ujeahw0ybRJbLs7FA0YrHg1B7517Tr
-eZl6TZn7o5+3HKKcCPpoh+P1XrNWP21NsY75T8sTN0BQ3im3aZDApgO4v0YFqWl0
-20YOdrLk81cJ8Znjdh+/ieR4uPH06CbXjAGPAbB+mnEWMNLlV2WGsJtDCHYM+wU0
-zd0wy2KvqMBbr014v/c4jmyeCBcmgxQ9Q8ATWbys7S5p0CFB6+zeV/2Mb32lwSCM
-+Xeg/ms5ZGQJY5fIznwIg+Osg1zGvMF2Rsq9
+12Tv7dcfDmz2/A2bquC4GIPqMHHf1l1cRK8mOydwJRFmzbc4MEFgCmvhURLAE6ie
+B4ghfCKpZqD2kO/GtDBK7isMxur14NbKKKFXnwPreSBknSTccJ+8iIvxK+wni+w0
+Ox/Avr4byocV0O6WJ6JEvvcyNbBk+IWsTfNbLZ32/A6WtraE5q2vIZpN2bNEtJe9
+JVu56wI95zcAZmnz3S1RtLVvT8XqmHnCUTpN+5oJWRBTr9pScQNjicpKo+GST03Y
+j5KaI8B4cdNecldgqbmebcL0m3RGfBKgv8AEqsjdqg3hvD0rXUpeiGKoXMjyzmlv
+OCsQtgP6azneIZRt6MQMsQIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3
+DQEBCwUAA4IBAQBWWBPrsmerwScxU3y1IVGIOoI3hEBCS9t+BzYHpxuvaSHjyYiZ
+e1MLgxt4FAbHu6WMB5T1rkJgrGUluCdctxXMg7Ak6d+hVbuBAzAV44rEw/yVGLGV
+7FvMOxYh9e+HFTq1iI8kSmgDCKsTww6kfE4fs+FI3fCXwhfy3zLlAlBYoqV67bVF
++Yd1E75kBNcAuyY6Zic1N1BI6f23npvY3plQp2qWjhdGEUb76CZSXrEZ3P9q817O
+D27YiPP6uhy5ypVnna2jmTnJ5M2EZ01Sv0w94pz5jUXSi49FRATMc73wYl8bSvw+
+swyDhMJMHUeTPr1deiB8SVdzVsOZCd5LQeuz
-----END CERTIFICATE-----
diff --git a/jstests/libs/intermediate-ca-chain.pem.digest.sha1 b/jstests/libs/intermediate-ca-chain.pem.digest.sha1
index dbe9e3898afc7..e1ec750dc4655 100644
--- a/jstests/libs/intermediate-ca-chain.pem.digest.sha1
+++ b/jstests/libs/intermediate-ca-chain.pem.digest.sha1
@@ -1 +1 @@
-F42B9419C2EF9D431D7C0E5061A82902D385203A
\ No newline at end of file
+D33E7C8B0748C66DBEEE6E24410FA72A47607DF3
\ No newline at end of file
diff --git a/jstests/libs/intermediate-ca-chain.pem.digest.sha256 b/jstests/libs/intermediate-ca-chain.pem.digest.sha256
index 2cffe1b5da960..4ac5afdd90414 100644
--- a/jstests/libs/intermediate-ca-chain.pem.digest.sha256
+++ b/jstests/libs/intermediate-ca-chain.pem.digest.sha256
@@ -1 +1 @@
-21A1C6A87B31AF590F5074EE716F193522B8F540081A5D571B25AE5DF72863E3
\ No newline at end of file
+6568E01751761F5EC6A07B050857C77DD2D2604CD05A70A62F7DDA14829C1077
\ No newline at end of file
diff --git a/jstests/libs/intermediate-ca.pem b/jstests/libs/intermediate-ca.pem
index 97dcda1bda1ed..5edc2e61d476a 100644
--- a/jstests/libs/intermediate-ca.pem
+++ b/jstests/libs/intermediate-ca.pem
@@ -3,51 +3,51 @@
#
# CA issues by the primary root CA, which then issues its own server cert.
-----BEGIN CERTIFICATE-----
-MIIDdzCCAl+gAwIBAgIERt21mjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDdzCCAl+gAwIBAgIEe6nR6DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB1MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ2WhcNMjUwOTEwMTQyODQ2WjB1MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEYMBYGA1UEAwwP
SW50ZXJtZWRpYXRlIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
-uLEuQhQ7IQImAIKUMANKaR+4/WLf0F03p5RDlv/TCNdEXTLws03x7bppdv4Y8r6+
-oChy+8rtyA2ckblb0z0OeMlsJY5a04eUhrZYeG4OXn1QuvUqfXl++oBlHnWUD3xG
-3v9oPKMxGf9nr6JJXBCeG3owLR9Lbr3QS6Pvz9WwNZGpUVDm/QQcKvbGHmB9fE7/
-RM6IgxtagZlug5WUCTT08tsLfb89UQchCAjO9eZvDcENofXcnsJWImJdTYDlquMM
-DB54R9cqoLtDV9NiPVYsjCQ1BgXYMxeG0K/T1rWQY4uB132Y3oFy3RxaDT1BqdAO
-O6BOo1AZNyIYIPbt0+Rp3QIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3
-DQEBCwUAA4IBAQCSOAjf6Qh0JnAOK4TqPYt2VG8HEmumxKk/HvIKrk9gbBc6RoVi
-Z1M/gxRz82lnmJvIPbsJk4AmTsw8Pnech8Ujeahw0ybRJbLs7FA0YrHg1B7517Tr
-eZl6TZn7o5+3HKKcCPpoh+P1XrNWP21NsY75T8sTN0BQ3im3aZDApgO4v0YFqWl0
-20YOdrLk81cJ8Znjdh+/ieR4uPH06CbXjAGPAbB+mnEWMNLlV2WGsJtDCHYM+wU0
-zd0wy2KvqMBbr014v/c4jmyeCBcmgxQ9Q8ATWbys7S5p0CFB6+zeV/2Mb32lwSCM
-+Xeg/ms5ZGQJY5fIznwIg+Osg1zGvMF2Rsq9
+12Tv7dcfDmz2/A2bquC4GIPqMHHf1l1cRK8mOydwJRFmzbc4MEFgCmvhURLAE6ie
+B4ghfCKpZqD2kO/GtDBK7isMxur14NbKKKFXnwPreSBknSTccJ+8iIvxK+wni+w0
+Ox/Avr4byocV0O6WJ6JEvvcyNbBk+IWsTfNbLZ32/A6WtraE5q2vIZpN2bNEtJe9
+JVu56wI95zcAZmnz3S1RtLVvT8XqmHnCUTpN+5oJWRBTr9pScQNjicpKo+GST03Y
+j5KaI8B4cdNecldgqbmebcL0m3RGfBKgv8AEqsjdqg3hvD0rXUpeiGKoXMjyzmlv
+OCsQtgP6azneIZRt6MQMsQIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3
+DQEBCwUAA4IBAQBWWBPrsmerwScxU3y1IVGIOoI3hEBCS9t+BzYHpxuvaSHjyYiZ
+e1MLgxt4FAbHu6WMB5T1rkJgrGUluCdctxXMg7Ak6d+hVbuBAzAV44rEw/yVGLGV
+7FvMOxYh9e+HFTq1iI8kSmgDCKsTww6kfE4fs+FI3fCXwhfy3zLlAlBYoqV67bVF
++Yd1E75kBNcAuyY6Zic1N1BI6f23npvY3plQp2qWjhdGEUb76CZSXrEZ3P9q817O
+D27YiPP6uhy5ypVnna2jmTnJ5M2EZ01Sv0w94pz5jUXSi49FRATMc73wYl8bSvw+
+swyDhMJMHUeTPr1deiB8SVdzVsOZCd5LQeuz
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4sS5CFDshAiYA
-gpQwA0ppH7j9Yt/QXTenlEOW/9MI10RdMvCzTfHtuml2/hjyvr6gKHL7yu3IDZyR
-uVvTPQ54yWwljlrTh5SGtlh4bg5efVC69Sp9eX76gGUedZQPfEbe/2g8ozEZ/2ev
-oklcEJ4bejAtH0tuvdBLo+/P1bA1kalRUOb9BBwq9sYeYH18Tv9EzoiDG1qBmW6D
-lZQJNPTy2wt9vz1RByEICM715m8NwQ2h9dyewlYiYl1NgOWq4wwMHnhH1yqgu0NX
-02I9ViyMJDUGBdgzF4bQr9PWtZBji4HXfZjegXLdHFoNPUGp0A47oE6jUBk3Ihgg
-9u3T5GndAgMBAAECggEBALPMwaTQvzOGPOq4NH19Zp5qpJQBArR9W2YIH7jLum3b
-65Dzu4JDOmfd1zhhbHY9HaUbW70mtE/SeH8hPXSq9wC9zkvNhzxwvDZdJEmxksmI
-I+SuMjxO4EAMaOS7QHXG3dPQP3DwyiduDkncqqA3CUTksivrUGsx3KsWoKrrdAzg
-5tMfgcw+nRuOonJbIiUvTZT9CkpfTmOaNaAAS+60EDjBWGEHS1X6OUIcInQqlQ+o
-zbtSsIT5Xna7f/XYHzwtEc0oQdlc1+nnfMsBKZ2xFsTZqzWldnMPIBekhP3293hc
-EW9fLIUDKYtwyQ9zVk3AxFGtjRz3LknDGQywh9NkdJ0CgYEA3KceK5ZeRTlLsmhz
-fGZXfoSWIdYluhYi2+0rQ5I175BJoKxY5UQHt56bRtORkpVucbslj5w9uUffAzjV
-L38W/CqFBE1AMTd6fwxdU0MkWWMBTTNVJCoMCV2q95SN8gvJk18JK6e4VuteVE9U
-eF6uLWFGYzki/gM4Y31e9Mkzmk8CgYEA1kdUVTRGBjfkQ0ZK2JUk+zTaPDOwIaqA
-M56Srhqe93drKWnBO6xnMpFmCMHARCnY45Jr7ONn3ZYg7E1RjQrAzqAcj4i4jbaZ
-knY7/dEJLC+M8GNs88R1Et/1kwZXR+7NESUDTaWo29CCdpZ4Fr/9zUc+Cq+YSViz
-pnnRQsmRKhMCgYEAn7PtaErbTGsd2LE09RL1vMKmDzN59ufSry9How6OLyhVwg88
-ACRvGX2YkXjL4jZ5y6NSmlDRc+sLBL/7vPbVYgo8YFKxZW3sIUyHt53fDztTU9cV
-hqlQMO80sSE6Y7gYW6vrbUdlarPMgGUylDSjCIFheqt+Ii+efpPdSHrf+I0CgYB9
-Crrnzc1fK5aEu6xJnx8piLohj3gk7T5K4SpvdxF1D7b1P8WHcR/3nmmQiqAMJu9u
-Pa/FWR7vbABiZOxGBhRlf4GrMPmhmQEdOunfP/C+XDE+xoZ56hb3oMHJvlWIoI4C
-hzraQOW9AHxhf4UsoXUWKZDG45lWk/CiIUxAUSfpCQKBgQCe4idGKMxYLTH760Ay
-XTy7PkAGmOkz2uzkZ2mXFu7s4jVWkkIcemsyiTlESPMisjQ4cfi3rAucXzEoLxxc
-ecuCCEd3gixzl/j/4U5iGl3twU6eBpWK1KF7RG/EQ9hhuRLZXUoZbACd1uG5k+oV
-QcMXqarq2H87hd9vrxlB99Fx/A==
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDXZO/t1x8ObPb8
+DZuq4LgYg+owcd/WXVxEryY7J3AlEWbNtzgwQWAKa+FREsATqJ4HiCF8IqlmoPaQ
+78a0MEruKwzG6vXg1soooVefA+t5IGSdJNxwn7yIi/Er7CeL7DQ7H8C+vhvKhxXQ
+7pYnokS+9zI1sGT4haxN81stnfb8Dpa2toTmra8hmk3Zs0S0l70lW7nrAj3nNwBm
+afPdLVG0tW9PxeqYecJROk37mglZEFOv2lJxA2OJykqj4ZJPTdiPkpojwHhx015y
+V2CpuZ5twvSbdEZ8EqC/wASqyN2qDeG8PStdSl6IYqhcyPLOaW84KxC2A/prOd4h
+lG3oxAyxAgMBAAECggEBAJhQgHooDYYd9+n1lYcYshZj8k+ftzHXg0/uq5JZKSyN
+Sa1fSxSUpl24O/Ug1UMRke6xjTxDJpe0a6fCZzk0jUgumTJxJL6CJSLmNRf1paZa
+Ccw2LMxXqPLGQx1UOgLwXV8R7TL1LKHjNMIydWxBi2ufUpo0yrITlOzqkieH9Qf3
+3Ovc54S1/4VBWCkul+CwYvIpcm21A0W+y6iwNaRPTAzqU0D7bNUc4SE50oaJLhhy
+t4qBbZ6N037L0u4Gh37SzSM3+AS13n3L5u2z1cIGllHul1Is73NUzfH/dZruREaY
+UJqgGVy1N7TJKN0s0Ug3h9HGSrxv4/Q1YPdDqMWdO6ECgYEA7S8njwKY2ga9BPbU
+xvL2JcIf2fpmm3IbYajwT69nVRbF6jxo/jNa4wtLadD37/+T3zuCUn9+GUoeeBob
+C6zx3f3m7MJ2lftXJyQVm5ENtshWLANGk+BA5I35V1YZwqk+ep3q7t8HEfiKsj3w
+w+4YrdZqti7KQ12WMXRJr+nyhssCgYEA6HtDX/DpGpmOVEYKi0hpwflTzRMrKZRA
+VdPQh9PFztsPpVx2bxDLUPGORLjz6mjQ54s6vpWsFCIRGWknfshlMWCLwNewTWKs
+n6HlTXuA3FSSbXfDRrEvMU7S9uVjlkUawNrLCH9rcc3W006VdnuCvErqONbjRlfI
+8YBS6tetDvMCgYBQXKK9nLanYJMYpH0Rb26g+nYKSAIN9wp5+2B4z0hmlxG1vIQi
+ZMoNlV0W3Du1cFXs09/jDdluviM2tDmnqhBFE+rzGyxMwt0ToPFFRU9tN9GilfWK
+0veJuOTbh4uw3eEScIESMDTYDOsJW11BNWHdPIHpek8DpijmVq1E0jJfIwKBgQC1
+jz5o+Q4zVeUP77tgV2cws/U8XVICIOO1o7xht4PuLqqja/iaeLUwO9Xt2bu0P7OW
+gOeZ2+4NsyVDgRn1K6/LxiierFPlu8Aw0xDvWBqk9+97SmLZHJtMmNCtPSxvOPzR
+zI1vz4Mr16OEVwTnCUJqpt8RENFiKECoSp13BoUC/QKBgGBKIi2f2EYY2i+F9aFR
+wnyzKiTtHTiTVuLI0gKzh5c31wUDNJaJcODVerdAJPz0DZvRBWPvquUi2haJi1ZL
+ayZWCrcwXZWXq+DJGg29VP+JK0tI8xuYR9riliWZe5hLnmLj9M2RU3m2Em2/vJVQ
+SugaPOT2njhLHBmDgbUq6lOJ
-----END PRIVATE KEY-----
diff --git a/jstests/libs/intermediate-ca.pem.digest.sha1 b/jstests/libs/intermediate-ca.pem.digest.sha1
index b18b337262920..5acc6af1798f7 100644
--- a/jstests/libs/intermediate-ca.pem.digest.sha1
+++ b/jstests/libs/intermediate-ca.pem.digest.sha1
@@ -1 +1 @@
-70F69F1CF9ECA2515CF1F7A6A53C5CE749D4E59C
\ No newline at end of file
+0EB0876F8189F8EE0FFFC551AF474F0E66D8B52E
\ No newline at end of file
diff --git a/jstests/libs/intermediate-ca.pem.digest.sha256 b/jstests/libs/intermediate-ca.pem.digest.sha256
index 2abba6584e324..73e090b8578e4 100644
--- a/jstests/libs/intermediate-ca.pem.digest.sha256
+++ b/jstests/libs/intermediate-ca.pem.digest.sha256
@@ -1 +1 @@
-D56DD3A2C10AF3589B668FEB3F9A014C876BD912D701BF2B7332BA42ABC8BC43
\ No newline at end of file
+EBB24526AF892C7F7FB25145356299CD18A2E4B0ACBB447E400842F4AB6633F5
\ No newline at end of file
diff --git a/jstests/libs/kill_sessions.js b/jstests/libs/kill_sessions.js
index 57e6c636ab280..957a412db2ab2 100644
--- a/jstests/libs/kill_sessions.js
+++ b/jstests/libs/kill_sessions.js
@@ -789,4 +789,4 @@ var _kill_sessions_api_module = (function() {
})();
// Globals
-KillSessionsTestHelper = _kill_sessions_api_module.KillSessionsTestHelper;
+var KillSessionsTestHelper = _kill_sessions_api_module.KillSessionsTestHelper;
diff --git a/jstests/libs/load_ce_test_data.js b/jstests/libs/load_ce_test_data.js
index 7d0a7bdc7e0a4..22f979fcbd982 100644
--- a/jstests/libs/load_ce_test_data.js
+++ b/jstests/libs/load_ce_test_data.js
@@ -1,5 +1,3 @@
-load("jstests/libs/ce_stats_utils.js");
-
/**
* Analyze all fields and create statistics.
* Create single-field indexes on the fields with indexed flag.
@@ -49,19 +47,19 @@ function importDataset(dbName, dataDir, dbMetadata) {
*/
function loadJSONDataset(db, dataSet, dataDir, dbMetadata) {
assert.commandWorked(
- db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"}));
+ db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"}));
for (const collMetadata of dbMetadata) {
- coll = db[collMetadata.collectionName];
+ let coll = db[collMetadata.collectionName];
coll.drop();
}
for (const chunkName of dataSet) {
- chunkFilePath = `${dataDir}${chunkName}`;
+ let chunkFilePath = `${dataDir}${chunkName}`;
print(`Loading chunk file: ${chunkFilePath}\n`);
load(chunkFilePath);
// At this point there is a variable named as the value of chunkName.
- coll = eval(`db[${chunkName}.collName]`);
+ let coll = eval(`db[${chunkName}.collName]`);
eval(`assert.commandWorked(coll.insertMany(${chunkName}.collData, {ordered: false}));`);
// Free the chunk memory after insertion into the DB
eval(`${chunkName} = null`);
diff --git a/jstests/libs/localhostnameCN.pem b/jstests/libs/localhostnameCN.pem
index 7b8530d39ac6e..918ae0688649f 100644
--- a/jstests/libs/localhostnameCN.pem
+++ b/jstests/libs/localhostnameCN.pem
@@ -3,53 +3,53 @@
#
# Server certificate with IP localhost in CN, includes a SAN.
-----BEGIN CERTIFICATE-----
-MIIDyzCCArOgAwIBAgIER67qozANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDyzCCArOgAwIBAgIEI1TzoDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjBvMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM5WhcNMjUwOTEwMTQyODM5WjBvMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDESMBAGA1UEAwwJ
-MTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuBhhUIRS
-AzneprgtUZER24xU5jSbtGRXwRc+2MC3G7WgIKl3OvIFLn3BcYuUnWIIXD0Wa1Ry
-vToIeqHj6Sb8IMlm9lNMtKOmjcFfLvaia8q+SnsXc/FZMQT3qvNorhoI574+mUBL
-msFbNeYthLijvpvd2aL0uYqebtcqc73skL0USDbl0djGijo5StzCvQCK9J44Htzl
-A3oitRYkWiBZZW7SFinPLKsUueFdhGFZK8SUq1ZNjowUbx8jzMGzZu9JquGdhIEq
-3V8p/5WixFI3s5u21WFaAKljjh5HKj2HSI+MC/Hiuqy9sgoPZOjbGuDLaAgrC97Z
-WjW5qbvpX3RUMQIDAQABo2owaDAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV
-HSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQUSrTOui2RoGQl8kBB/Aj6r+waT3cw
-GgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQCX
-pXs6Lnste/dxYim7N+xet4UmMLrSCDLOHAECef57hbH9uVeH+Pjl/TMGbx098Yl2
-ibcVT4Z7ieQWGmEyGWJw44RsMcpQpiMGXnaWZe2k+Hn9xfSQrVoyOFhxgUTnFu7u
-nzxqh2Mod/MayFajtLFdYPjHuykxeL5+kBEPJuwTPBN2uaKKyJrmKwnq2mxUMAFj
-+i2B9bZ67hSCaLAY4D76QCvEiAeTwEzhijovW077jOrqRdrHViRa9MI0CZkTy+cI
-h23LR3WPjlHlihag/EIdOjXlLYoONmEMKiDvTsI0f6hynXUNyYa6gT82NQwbxz1u
-JZn5x2nCr6m0PdUCa40f
+MTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv/TeqpZE
+PNM4+QbmQEIpk25GVitmdIwpQ2TDJCkdumcQRB7aFEovGi3bGgG3B9wkg88ASFh0
+QdytV/TNFbX659hVVKdh860jQDe4joYZXkUIXeRhGiq4wXbKUJw493pyBTWitNTp
+uYbVOB/7jvfvunTy9p8Z58QPXwP/IUsUqkOwrv1WE+rjBvia/zxDl4I5UoGSI208
+4h7GiHmSQuguaUWZkmlsVgmV8laYS1jp4qOr+hGoFYegW63tdZABD1GBOrxLP5qm
+yrlc7+FHsNi1AmIdYHg7QV3BpxxMVb7J4cSl1wolS6KHuckm2o5K4FJLx1Vhjl8w
+UKJPo6B2v0wdQQIDAQABo2owaDAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV
+HSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQUpe2ZqO42LqlVTOF/xOsmRS1vbKYw
+GgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQBn
+ww6JKaLBChLsM/qhp2rGIT3QPKry2p54yRWwwqE7dLN7GnhCIjfMbZX7wf4DqeWc
+xbcuHQI6AQJWde8xBW44p+3uI1xk0NoH+2xsjtGxqMyMVcWeG8qnowqFvJos5qLc
+xf37P74FKCVCAaU3F11ikCNZ+7rG0EdDfINUSRDayAe2+qBuv5c3LvE5L6rifZK7
+V2S/eY1d7TFANsjxQDHVaiyqJQa3eMGW9GhiYD6YQ7LjNTE2+ofjn/csjhhks5il
+dB88SdZFitCPBG8ZBehL1kdJOrRyQFqwjTPxSoprShk9YwK8fQn1NZyl9248R7iu
+64ugfNDTOT2tJOdQwLnY
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4GGFQhFIDOd6m
-uC1RkRHbjFTmNJu0ZFfBFz7YwLcbtaAgqXc68gUufcFxi5SdYghcPRZrVHK9Ogh6
-oePpJvwgyWb2U0y0o6aNwV8u9qJryr5Kexdz8VkxBPeq82iuGgjnvj6ZQEuawVs1
-5i2EuKO+m93ZovS5ip5u1ypzveyQvRRINuXR2MaKOjlK3MK9AIr0njge3OUDeiK1
-FiRaIFllbtIWKc8sqxS54V2EYVkrxJSrVk2OjBRvHyPMwbNm70mq4Z2EgSrdXyn/
-laLEUjezm7bVYVoAqWOOHkcqPYdIj4wL8eK6rL2yCg9k6Nsa4MtoCCsL3tlaNbmp
-u+lfdFQxAgMBAAECggEAMn4FWucf82CQ79s+GswWQMhJlOZC+oQ3CW/NvfeFgWUc
-aTjxycoNn8XAI9trxIrZziq3FnMQEVR0dOYoM4+MwZVb1x3bwQPNr72k1KWywKvX
-62e9dABzPR2mrE2cnXvoi19DnhbjZau4z5y1SVy3FJV9kK7APo+FitMbAqnJ05yA
-2zH0O6tk4vyoUo5Bj/8GFam194uCpAhm0NfBy9Cyk1ZenDMRmyUefahmeBGFqeaF
-f6E/Bazb6l4drhu4yhuaEA4e/VXkopixbzQHMctGoU5EUQLUlJkbtAkYx61c7x61
-addr4OgPWv9Um3eIu/0FOziqVMTuobSrf63vStOSbQKBgQDwTU6/fAkcEHNpOb9y
-oWnS2L6JhNMhzZyhyj+nUz2qp6trurnXwewE9ZtZuSniaQxT4I5Qpl7CaGBmhGPs
-E8qEX/lKK9DY58vMUx6JDBtwdAQLBC42LQmneLj71gfwWZ8VVy7AZ96sNL7GC+YL
-2WvhDG4f08ZfyRtMSYgSgeliUwKBgQDEHxk1FL6q04lExutvjO64J/QyH3qZaqjl
-UenOQVA5/B2UuJWq9n4m14e9ZzWdw3GFiT4+hV4iMs8z1HWMyuYpEA6UNla1u1Xe
-VmFGq306OuTh54PP/fvu4fwiSWXKQTEkrbb9WLdc4sIy2STuOX+41b4cZy0I3FqK
-tZdkLY5m6wKBgQCQwoGF6Pqz5VUhNqCWNZbCZb+iqFloK60H6gaejg7AF3G03C/I
-QhIkirCjRGBu/Elo3gXdn9vF0YsBNw/az7FYPVi3zd1qTXkABbKHbLu66qjk2gfc
-qxT9xkPpse3mZJbpDDQlxGzn4H5sYA6dZMUQNaTBl5oRadz//+vw+kHV8QKBgQCY
-R+ljvOrba4svWyFeKVkGpwdGkAi67QWdof/gRfiMPYWef8C+0cxcTog4edY43JPd
-8xXgp9/SwA8BGJv5qWYTRkN7s8GaNI7VJ886d1eyCh7EheZkbrra3p/O45zk8b+9
-0iC/EM63kd7mapLxYrYYh+ao2TgvpCGtiJi3kWP75wKBgQDcJ0+POm5QQYlHDecF
-MZOpfI1FDfummX5yY/+8RuG/LVyB5nfilbz/EdMlNc2rVenI2IOl2MHqEuRNv2sh
-5/w0/Dgmk02TWz9HO5G0TstE1t5K3Evd8Ti6IF/N8SoJlJAqI+aqFXW5r1U9+KH4
-h9BY45YpwEgi05lHTdQzk413bg==
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC/9N6qlkQ80zj5
+BuZAQimTbkZWK2Z0jClDZMMkKR26ZxBEHtoUSi8aLdsaAbcH3CSDzwBIWHRB3K1X
+9M0Vtfrn2FVUp2HzrSNAN7iOhhleRQhd5GEaKrjBdspQnDj3enIFNaK01Om5htU4
+H/uO9++6dPL2nxnnxA9fA/8hSxSqQ7Cu/VYT6uMG+Jr/PEOXgjlSgZIjbTziHsaI
+eZJC6C5pRZmSaWxWCZXyVphLWOnio6v6EagVh6Bbre11kAEPUYE6vEs/mqbKuVzv
+4Uew2LUCYh1geDtBXcGnHExVvsnhxKXXCiVLooe5ySbajkrgUkvHVWGOXzBQok+j
+oHa/TB1BAgMBAAECggEBAISoyinCZ65Rm5IevomyL+F/2IQ8wjXRl8LHpEUdVTo5
+d/hcCgLzsdG51y4F+WQpM+MT7/IPj2jlBbc/q2xRs/D+hRpYA03QXghHefilnGqV
+8s5Qwvq+g6jQW8mR7Zy6ton7I1MtwclB8aE0GToZB0gpArCGC/UsfiMANBXiOYng
+w5UMJNzTQzPmbLtxcMBysDTJFrzbaeNAvEm3zOe5+Y3+JzK7PuasoGajF3Trm4yD
+Fhb9PtiBH/V63W5SidfpwENo3P1/fvab9LBRfA9z5g+ZiypnE2CDvy29fxiq/pY3
+fXn0kxl4ym+7wIvxnYx23AjMMERnJHmOn9skGm3RKXECgYEA31knsxHVaZJD7Djd
+UzABp9xUK5rETgQ1Dc3wS5kv37ZmodQLOS1QJxF4iQneZ053m4AjjOEpqp9Kvf6H
+8RFWi5f3/3wB0icBVCoY1NnI8eaOaBzckffClge/jju+DLAwUNL3Smh0ZIl7Egoq
+CLel4LpRJt7LKFbseLs3ttn4XxMCgYEA3ATfr9gwYPmow6en38awSrLrhWJH0ANn
+/p8Q5EWdQo7ViHsAnNJt1Zg0mCzGLeKnuFXT2NZzNaizqA3Jl+FKvsHwIdtEBbBX
+OiYVnEbc0NuTKrHb1nZan7XmRHnxahlr4vf9KsxSw6nQsOhBHagMqUW334YMk5qO
+BG6dk92TGNsCgYEAiYE+K8Ti+ugN4TTxLfH0UwAW4fGawd1dPG55blGVY8nMTf1a
+G42GN0dOSjBFOJzajmXJfUZyfJUtUuONliDyg2bATA5woI1bCBISz5h1WlhzfC2o
+rkU3C18h09N8Ihum41u+25SLdAogNu8DCfLmsQCETcYElYVOeNXqayyu+PkCgYBE
+h6wYvHDNM9YyIv1yARQWIEalxCf0DauNroP3ZguGmLDAEvfs4MpfNkpjf8a2shtl
+mi4jIyC9fO+Aj9LT4NOEOSoPkZlNYFC7BvPCOnw6/bmIGeAMm868Yk730zezwXlO
+N8n+U6gP08vx3lWx5A8VhmawU7OFIiXMEZw5W1Ge8wKBgQC7IqlXmq/cZZyYItUM
+JUNPgbNU8RF1V0DDyxcqZ6brNronoJe/FJQyE6y1dV3ndk0y3jS+hZHhuvrfkMhW
+tspeTRZ7GqvmsDknnE/Oow8MuOaO8mFAebMee9/Wm7fp8UQzkqCD1ro1T0852Aax
+qqG6MHvmqu7cHdsgTM7lzGBHaw==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/localhostnameCN.pem.digest.sha1 b/jstests/libs/localhostnameCN.pem.digest.sha1
index 9df0f445e09f2..9295181016754 100644
--- a/jstests/libs/localhostnameCN.pem.digest.sha1
+++ b/jstests/libs/localhostnameCN.pem.digest.sha1
@@ -1 +1 @@
-C848512676CE8B7F1DC1E4EAF0733329F9129356
\ No newline at end of file
+D9F106405A41A436C0B53C4670BED9A7008C1F94
\ No newline at end of file
diff --git a/jstests/libs/localhostnameCN.pem.digest.sha256 b/jstests/libs/localhostnameCN.pem.digest.sha256
index 7fbb9c42d0b78..cf87b29408366 100644
--- a/jstests/libs/localhostnameCN.pem.digest.sha256
+++ b/jstests/libs/localhostnameCN.pem.digest.sha256
@@ -1 +1 @@
-D57B1241325C2C3DD08B7B7B2D095026C48DCF8390841E00C000EEA7F5DF3F87
\ No newline at end of file
+64439F597E9E8F00EB8C01F0CB413EA1B12B95A18FE41195384B61AA08F290FC
\ No newline at end of file
diff --git a/jstests/libs/localhostnameSAN.pem b/jstests/libs/localhostnameSAN.pem
index 0194976edc841..32cb68ba698ca 100644
--- a/jstests/libs/localhostnameSAN.pem
+++ b/jstests/libs/localhostnameSAN.pem
@@ -3,54 +3,54 @@
#
# Server certificate with a selection of SANs
-----BEGIN CERTIFICATE-----
-MIID8DCCAtigAwIBAgIEA8/VOjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIID8DCCAtigAwIBAgIEHBHc+TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjB5MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM5WhcNMjUwOTEwMTQyODM5WjB5MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEcMBoGA1UEAwwT
c2FudGVzdGhvc3RuYW1lLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
-ggEBANdWi2k6E12hbrZ7x9ypiFvPXvYBeQpQn/LndWDhbhr7gdzd1CYDov02sB2x
-v+ZHAbw4M+mlCSubpi71/TriZ+F+QOsNqfRNPB1x+KqFQOQqlA/5bq4Jflz9GMkg
-wvtb7tMVAbk3Hv4nHDJJ1TBEFkpR7zQl9hAFQxm1fokb5nEkDQk54BMcDf0bHT99
-nP2uSB8/5gAIGMRS4d0YSVyCEezkNjY5w1DEIpyMx3oQ4DfcE3eyh6RcXvz/5lEr
-YTcnRrN4f3mZySPR3rRolEWwioBppz8bnDr6a64C1wOGOsybNY0J+7BQV3totXwE
-P7UE1ikD42QD3bwLvWLqNGQ+wysCAwEAAaOBhDCBgTAJBgNVHRMEAjAAMAsGA1Ud
-DwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQUvdKLieNLHvQI
-2LepEk9sdqm08sAwMwYDVR0RBCwwKoINKi5leGFtcGxlLmNvbYIJbG9jYWxob3N0
-gghtb3JlZnVuIYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEARKd2X8OKkSJ06z0G
-kZ9KjHSUL5MSvdZ65+gPxpH+ZPVa08jnopy81e8FtKNXPA6oE8YoXUdHbsp9C81H
-qb496yK5mAIKsYFmxyp6hC63/si8hnfVCTuKCGu7clX6kKVUReQmj00KgpsIPCf9
-CGC5X42ahiqLvuzrs6lKEKN57xBeEtY0qguhunwOakgstk9i60ELQFoShxM4tS6m
-5VIu5bp1Ryji+WXvIKFZOkzi3TwFiTllw0EdGdcVL8XymMeINzATocJs75ht6eq6
-Vh1R+O2Oq6BS+gnEJS4dugvwUSo5VinKn1QzBT559Ebj4BOSMH5G+l0MZzm02yTi
-YyV/oA==
+ggEBAJyiDUfLhicCkMLaulsWfI8QQJEkJFbJcJ+dxL/ckl0XpE1Bcox9KeNy1dJ/
+JEOQqWyLnKqb6MOpS8UDp1VhDY8JeQShecy6ddPlfsunM0Er/YLiPoeudc47g0Oq
+BywBtZQ1XMwhnD0v6GbaL9vQBCenfA2laVtKMHfJRTrGCDxgiPfad6nCki92Chio
+7W3IxGjQXdbqMKvZjI465dbyNuZsx+B4dSzYCUs4aPbvWpzqaaTLakVATatUGcHG
+TQEzPgyytSk46pod3nILad2isNU5ZOnsNCaHjKNHoZRn+1Z7b5FIu+/pl75Moi3h
+T0FjwLQRZF++3cnBzX0vpH9jZB0CAwEAAaOBhDCBgTAJBgNVHRMEAjAAMAsGA1Ud
+DwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQU1Fd1BIdSItER
+RKQ8Jv0M+Lww/KkwMwYDVR0RBCwwKoINKi5leGFtcGxlLmNvbYIJbG9jYWxob3N0
+gghtb3JlZnVuIYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAJ6phZ78MyNxZuwD3
+7DY1s+X9b1fxQ8JTZF8AAsLmyVTXZD6DUVXPw9Cz4SqvFb4XgpWTIQKj9ZjiuVXt
+lPCjQH2y76mdAdKhe0FOkndfBRcg5ezGtWvmiDMqoM9pAdzZeTcIjhfFeYxtEQB/
+xfXzFsVSFvYXgTdTsK2ii6PYpuboSkqKiZd5jhlW3PVGNMA4G9hysuHLK6Y2MFUc
+ixE/rhz7pH9u3H/XBwtwxMwDj/NiMpSOlEfomBleX5mZdWCCklQM9ix8voq8gUWI
+bE8nywECfSn/nt9dwykQIRP+ZE4+Jz9YkIduEkL7x78pIR27tc97SXwpXQEHhvj0
+obSYjg==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDXVotpOhNdoW62
-e8fcqYhbz172AXkKUJ/y53Vg4W4a+4Hc3dQmA6L9NrAdsb/mRwG8ODPppQkrm6Yu
-9f064mfhfkDrDan0TTwdcfiqhUDkKpQP+W6uCX5c/RjJIML7W+7TFQG5Nx7+Jxwy
-SdUwRBZKUe80JfYQBUMZtX6JG+ZxJA0JOeATHA39Gx0/fZz9rkgfP+YACBjEUuHd
-GElcghHs5DY2OcNQxCKcjMd6EOA33BN3soekXF78/+ZRK2E3J0azeH95mckj0d60
-aJRFsIqAaac/G5w6+muuAtcDhjrMmzWNCfuwUFd7aLV8BD+1BNYpA+NkA928C71i
-6jRkPsMrAgMBAAECggEACr6QujFhofxaD9ThRgtXeG1CabftGCyprm8KFklpOwNt
-mV2gJj1sNoIyC2lBD/ZfCcssG+5WJMzhX9p6RiPh1wh7qL7Hyh7IbxeutqNrd3cA
-aEOj2B81JOKkB/UQhYYyxml36ovrifbdm0y/zGa5TgAElW+HdYktrey2YZT4zJBO
-fpkkfaj1EjcWvTnBfsfsyX0M81Nvnvc8ViejhwbUf1WGMK0VCgxPAMVuqYQAr3OO
-HWN2fOvU9tyeKC305viIqE2x5zOYbhY5U/mPht5CXVhI4xHZEeXlGeK8ic7wiWqt
-yY834uGhzEEvr4FQcUVgujhgkQfGrd3+qXjiZ+/+sQKBgQDwAeXjy+0Z7gUtEwQk
-fX5FDv5CqwVgWWwMVAg7ogchZuFstuO2qlbeg0BrHXUdiZAnCLvay2vCYApKzcs8
-hZBoV10mZUGjZVkC06LWa6EvtSTtYPIe5NWoHcMQiYHY0zj2h5Hyh5JEii/a8PsN
-7v1JmwrTg4xoAfm5FZH37wMEdwKBgQDlr9TC6KfFnvcsvl0S/4dCDXNk/Fvk2nj5
-ICjRPQkllutWjkSJCecn1K80NFEO+b8yWTuT7ARZY19fU3Rp08RN7qIkjoMT61ug
-DQN4nLMJF8hjLQSu/ZpuFzq7UTEi2LiUzkaZ+6/FwnxXl6PlAYrCSo8n4RDtEJqC
-OueDAkan7QKBgEhQKhXigYPIhWK5Ugw0i3D/PXiU0r/e8YEdaIWfIgrcVptM13tY
-A2hzn1smuvy1T+uS5BbeJ4+fJDq5mryXeZUWjYheBdLXXGRPo8Z7E0uuhnvHM+w/
-Amju3rEC3U6ZBZax7jVmx5lDEXwkE2B1W3dr8W0dO5ay0a0EZMCRERWtAoGBAInW
-5ZY6zUR5y9hcPbsDAocPT2sRT4yd1++Z7yTyviSC16TQKC0ddk2fA64On36fPrDW
-gDybbVi/nCK3EKvZ3HrwdPn/VIaFvizk4mplj8QrM6ThBWiG5UCgTCzR4u0Ipm0X
-BaOvOaMyvjBK3p62ODG1UQgbt26tgiwZcYpbuU9ZAoGAE66/9onziZ6mpyjlZdW1
-78bECsRPvC3dckAMcDME8Wr2QGgYfrMRzdoJn2jE1mAF5K9MsV4UJnpx1WwkJcSe
-jcZUZkRzJgpag6BiSuFwOVcx2oY68dor4dBCBf8NDQYycie0H55UuN9UNOTLjYUy
-dEIFFmIgZcexEsmgD4r0ync=
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCcog1Hy4YnApDC
+2rpbFnyPEECRJCRWyXCfncS/3JJdF6RNQXKMfSnjctXSfyRDkKlsi5yqm+jDqUvF
+A6dVYQ2PCXkEoXnMunXT5X7LpzNBK/2C4j6HrnXOO4NDqgcsAbWUNVzMIZw9L+hm
+2i/b0AQnp3wNpWlbSjB3yUU6xgg8YIj32nepwpIvdgoYqO1tyMRo0F3W6jCr2YyO
+OuXW8jbmbMfgeHUs2AlLOGj271qc6mmky2pFQE2rVBnBxk0BMz4MsrUpOOqaHd5y
+C2ndorDVOWTp7DQmh4yjR6GUZ/tWe2+RSLvv6Ze+TKIt4U9BY8C0EWRfvt3Jwc19
+L6R/Y2QdAgMBAAECggEAGqEabOhXKG6xOc+B+1Z5WsFCmOpyGycoNJrYBl5zq8wH
+LPNBjE0DQB4X6C+hdLM1erTJr9/N1OYFcbtLSaK0cWsE3hqt4Haa7amIwwrsFdpq
+wUPBqcOpV6Sajnhru7mWq1nfKv1T2Ls6YbIajN5ytmMtHgzPOqwETXk0geeFIu5q
+gP/aMJINGmEmOvw1ZlE6+x3s20l8wrVjqRPfYtKsuKh1Iuu+jWjZzR3AeASzu4If
+kYJiaDwO5oeY+/bt/VFLClSwYMfcACNDMjptXSx92RrDRxnu4SPJ4SqQLms7Q+0C
+X/TnkGAgvJJuRsrvSJftJiSVW4nOrKHvR4PoaUIVKQKBgQDMICBuFtYWNz8egm9L
+VTs5BtM5Gn2gIgCu4cz7Ai9lmmAq/xyV5X2qImjeNIEcrNaxUXrGJTTR46PhN8Rt
+SAqUMzcb1A038iShMr80N5ncDAOwNI3lYlHjHd2BqnHB+7wqIyEfCwgVmDQtePdt
+X098GwitGx4DoiCss5JhSZGqmwKBgQDEcDAnP9YosbfNZKHvmvNvza4bMjEZ7mMg
+tQh9cuGdtUC6Nk3qMFg652KNhZvg1W2T8JCQB6IuSoDDarc/nrTyjf1OTgrqfPqt
+14vRHTzq5fY8ry7W5fgb1BMbKIQthKY4Z1MMe5C+pt90ofQU/iyVJob1TQLFgxs5
+qQ2GjKhbpwKBgFfFaKxtvViIIOfphhmKaJC40pI3RdVZSZnpFc7IvcCehMN4Nr5t
+k39YlGcXhHzkBNzyACy7St8FVy48YIXs+D+JViTtJVHtGLsHqxe04L+xmtRlhK4h
+fLx/1wMuWwPiTqJ/wHossCk47RawcRPia7cdmLl2c410ZUBdZo/WpoAJAoGAXvvE
+POZAHnM4Zcc4CgyJk1EH07ykQ16ibek7TrVi5IgE4UVqzUdNEkZZwAaPxdpNXtBe
+hlY6lFmQA22xZ8DMy8/eYKOZ4aJG8BIeWCHkF8zUEKnAY0bVfldAWcxwhTXzzagg
+XxPMfHNh7xp0VEZGtmPns+rl4S0w1+OnV5zht1cCgYAQHOXPX1q02OoUy6czML+R
+OxUYJ+a88JvZHeo7PEIgxgrmR7ilz778uZ6VYthijidj1JizkJsC7XwsTl1+gcpS
+2BUCeKoriLgk7VTlbQKESg7t0TuyGZqvrKgXYFH7pfmQgWKPnV8fRd7neJTZTTU5
+519KLeSDjmOITErrQIVGiw==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/localhostnameSAN.pem.digest.sha1 b/jstests/libs/localhostnameSAN.pem.digest.sha1
index 1852be474dab2..27fb851592b76 100644
--- a/jstests/libs/localhostnameSAN.pem.digest.sha1
+++ b/jstests/libs/localhostnameSAN.pem.digest.sha1
@@ -1 +1 @@
-378683F7C8F790D575291116028F47E9A6001AAC
\ No newline at end of file
+0CA15ACAD6F10DF5C671B6DC739B4A7B25F73782
\ No newline at end of file
diff --git a/jstests/libs/localhostnameSAN.pem.digest.sha256 b/jstests/libs/localhostnameSAN.pem.digest.sha256
index 63817335da55e..8a6a740676997 100644
--- a/jstests/libs/localhostnameSAN.pem.digest.sha256
+++ b/jstests/libs/localhostnameSAN.pem.digest.sha256
@@ -1 +1 @@
-39A96BCD7FBB5D653D2AAC46F1FDDDE0C8FDD2B62E43F220F2A0627E3EEA8C30
\ No newline at end of file
+E3F243A8F60DF7E8157AC3C06D3486824C9EB8C30F557921DEEF106A3A272734
\ No newline at end of file
diff --git a/jstests/libs/mongodbauthorizationgrant.cnf b/jstests/libs/mongodbauthorizationgrant.cnf
index 3c2a82b36cc20..08f6c4b7ff277 100644
--- a/jstests/libs/mongodbauthorizationgrant.cnf
+++ b/jstests/libs/mongodbauthorizationgrant.cnf
@@ -11,4 +11,4 @@ database = UTF8:admin
[MongoDBRole2]
role = UTF8:readAnyDatabase
-database = UTF8:admin
\ No newline at end of file
+database = UTF8:admin
diff --git a/jstests/libs/non-expiring-ca.pem b/jstests/libs/non-expiring-ca.pem
new file mode 100644
index 0000000000000..542364b44870e
--- /dev/null
+++ b/jstests/libs/non-expiring-ca.pem
@@ -0,0 +1,52 @@
+# Hand-generated file, do not regenerate. See jstests/ssl/x509/certs.yml for details.
+#
+# CA with an expiration date far into the future, used exclusively for testing client-custom-oids.pem. DO NOT regenerate this certificate or add any certificates to this certificate chain without consulting the Server Security team first.
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIETEJLQTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
+BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
+IFRlc3QgQ0EwIBcNMjMwNTA5MTgxMjMxWhgPNTE5MjAzMzEwMzU5MTBaMHQxCzAJ
+BgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsg
+Q2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVsMRcwFQYDVQQD
+DA5LZXJuZWwgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+ALcpu7tjtN5syZJdK3hytuqbWOYHzTPTgx1L3EjONJu8FEmcOY94KBuG85+CfkoD
+545dduCp2JHrEu29UCeV88baLa/XHQ6/yNptbhm3vS9Mo7wKPpdMvxHEUMV6AEVq
+nXSmRlFumekF2ggxQtJsQ/i/gBCZRb/Z+En1mbSFjT9e7XZ5rzA1aB+aXyBaYY+Z
+ajtdIJ+zOWKbo5wU/blDUK67vsAKOKrFFzo3RdSnGEpJ1WEe14nka40ieUrmiYVK
+gG+sFd+92wrdZJOGnCvTL5PNnzx+4dmSqnC/tUKfSgbHn2g6pfljzhF8Md9WMKKi
+aNbGQUPxDzACJ72WySQFYMUCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq
+hkiG9w0BAQsFAAOCAQEAd6MKAqauGVl3PE3xL0SYPiFgCAj5MXrHjJWhqmhjoKrM
+8Pz+yCGEPTLRKIZIiwkC9Paq6ZOc+epsq13d+hVnGn5sOcJXK36w7wMV8liQeckH
++S1AYcUJSe+HOKDhWZ+RD4Qhx3gGZlTuKk7uy+ZJ1cxOPjH9RRgB9TYm211hoYd4
+MaQQ2Mm+6srYCQFPO05yfWy11kYSmuo6vYUEm/HU+CND3seVHs7+m8er3bKfarhr
+62rNmzKWdFZRyk9Vufo19OCC0ryLTohYao68J1I/7ghCuv7daIfVutc2gwvTj+9k
+PVukHno3wMF4J7ucic1eXm+Yq3t6FaW+YHjQfTQaxg==
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC3Kbu7Y7TebMmS
+XSt4crbqm1jmB80z04MdS9xIzjSbvBRJnDmPeCgbhvOfgn5KA+eOXXbgqdiR6xLt
+vVAnlfPG2i2v1x0Ov8jabW4Zt70vTKO8Cj6XTL8RxFDFegBFap10pkZRbpnpBdoI
+MULSbEP4v4AQmUW/2fhJ9Zm0hY0/Xu12ea8wNWgfml8gWmGPmWo7XSCfszlim6Oc
+FP25Q1Cuu77ACjiqxRc6N0XUpxhKSdVhHteJ5GuNInlK5omFSoBvrBXfvdsK3WST
+hpwr0y+TzZ88fuHZkqpwv7VCn0oGx59oOqX5Y84RfDHfVjCiomjWxkFD8Q8wAie9
+lskkBWDFAgMBAAECggEAMUziYwXrYRMXDnZVhQ4bffpia0geS5za5b/Ngys9rc1q
+CIYsWBmXchFY8c5F/9YdNYyiusX8rQoBBky2jnEmNJf1RCc6gwXMIj7iK7nxChIu
+4CZBwqe9piKZOw4jlIul5gf7VV/XvYj9rsbTnZ/Wi9W/urgGtiUNV5rrzFNGOY/2
+6xMkT5Ng/vPJZgXR0nSxsKccFWglBJPFOcsTH4conAdDi9MEnenV2p40ryfECVKt
+Bvpe9GFGtxsZxuIac8Juxt7XH75XOo1MYASytSt0S1Gpp6NJ8VKk/n35Ea29Cbtc
+0Syw76/9uYy571w/SWQgcl21OusplMg9Dd88i2gGAQKBgQDfMUFCbaHguy8YEqGb
+2OW+gQCoKM81+cLV4dgP61JYSmMZv+OzqqJ1PhQMxKPU7Fvl9LTr42YvEEjAXBKh
+KP7iN+VdQRL520Y+cGcSucjPfRHru+4dGEJ+aPPuYG50HIbPDm0L1OaczZTY8hyB
+cs4nqo1j8MJhF3S/AxFnLmdQMQKBgQDSFiuL85MsGqBUfrK8xR6nlj4Zi5x17nde
+p8YWZs/DhTRPvRDNbuq77TfIXjMMyMKJsjLNmUEfdQuh6peEMotvpJionlsiTg8Y
+efNDaQLseKKNK5/0Z7vSJxmR8nZIKjflLtMHNniWBHpHc8+0chKS0BDQ5KZQScCV
+Gyt8gggo1QKBgB2QX62V6hBjmwxcQ23qYBxI6DZeGXxz1fwQy1boe+LYD6J0iYvd
++WEAVRWP4oesu0uNi32HhJyNUqWwTINuc7yxXL9qhEH2aqqQpwaS92eMkJgiL5tJ
+AF3QNyeHPHpC4RSCO62KiPWSQbou79mxxF76t3nahVTpD3zRwjdhrSuxAoGAM50Y
+w8Bqxuofu8KI3RG8r0WdKh9/qAWXNB0Z8IT/xDRknrZ/e0klyFfGXaau+hQUn0m1
+a4ecVUMnQXdmFCdq1Fnm235UO4Bb+xJy7nvNKRWWMgKmwJ//p+jNQmsEHkSpCNhR
+JjN+urSM6iMMw3NHFuShTQDvz0ffVYKgU22K3aUCgYAA07eKDRBaKUzyfDTSG7UU
+d1Gri/rw0PDyyxvl+Dn7mylH2rAjeDZxjcrYs0ZoXT2/5A6Mg+C4qNfFjv1PaLKa
+W5+meJUkQG9Jv8Wxbbcz2ewhfky4SfPwTpgCycKiSiU4If1nWBLbEuokhHF4UXeq
+I2mtmDuHNCXggaMWM9tnQA==
+-----END PRIVATE KEY-----
diff --git a/jstests/libs/non-expiring-ca.pem.digest.sha1 b/jstests/libs/non-expiring-ca.pem.digest.sha1
new file mode 100644
index 0000000000000..87ce842de1411
--- /dev/null
+++ b/jstests/libs/non-expiring-ca.pem.digest.sha1
@@ -0,0 +1 @@
+1459D973E8240972BD740F43356804E8E7E39BA7
\ No newline at end of file
diff --git a/jstests/libs/non-expiring-ca.pem.digest.sha256 b/jstests/libs/non-expiring-ca.pem.digest.sha256
new file mode 100644
index 0000000000000..7c1c23e4f9b37
--- /dev/null
+++ b/jstests/libs/non-expiring-ca.pem.digest.sha256
@@ -0,0 +1 @@
+EC5C4791F4FE1CC67493714F5441A7B14E831071078B6D2725E3F1CE5080A325
\ No newline at end of file
diff --git a/jstests/libs/not_yet_valid.pem b/jstests/libs/not_yet_valid.pem
index eb22af451b6e9..db2412b1cdd46 100644
--- a/jstests/libs/not_yet_valid.pem
+++ b/jstests/libs/not_yet_valid.pem
@@ -3,53 +3,53 @@
#
# A certificate which has yet to reach its validity date.
-----BEGIN CERTIFICATE-----
-MIID1jCCAr6gAwIBAgIEJLBcqzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIID1jCCAr6gAwIBAgIES6i6UjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNNDIwMTI5MjE1OTQ0WhcNNDQwNTAyMjE1OTQ0WjBzMQswCQYD
+IFRlc3QgQ0EwHhcNNDMwNjExMTQyODM5WhcNNDUwOTEyMTQyODM5WjBzMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEWMBQGA1UEAwwN
-bm90X3lldF92YWxpZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL29
-rHDLieL9YgGh1QmOKxB0MWXKqDAZ3/ZBdVWyV3ALLmqRO3sJTNiVeTdLG5LN87bW
-8XfZrim37qpS+mh/NXJHq6MhTq7KBQuHm8/NjpRwKTQAI7gXvd7ItFUCPUn72fs6
-S3Yj1KMyG6oUuG/H8UTsH1XNZqCDbUMM27puAG5L4gg0ggu0PSXS3zR8R0Ljafkx
-N1OGqaQ2hY9p10e8ka/YP0kBR/B+VSSngk1xUx+z9P1+DOW9sTJ3egj08mH5hszc
-SXJmQqmbj6ybRxQifMCvHKvOaB/C26ifV1cfn9CS8JP6dhJrX5Y42d+ufZJ2KXHZ
-YJTkILQJlD7dHv184LMCAwEAAaNxMG8wEwYDVR0lBAwwCgYIKwYBBQUHAwEwGgYD
+bm90X3lldF92YWxpZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALtj
+wVivlSxj0vQEqUivXQso7ZGc+V6kvZRGzJWdv9C5IGhisk0OUSA7dOVpqaKlo8VB
+kXXkWpq+d+ZeU5PmlNOen/Txrd5z+rLbuFw7lHcPudO6k/2gXeeLdCPxnA3jZkXl
+AHARqS+T+eTg1sCGN2dZ2rMeGv1zpzDB1rx4kxBXumw8xOnVQ2GhBm6nQypYW8kY
+roaGwktF6hKekwIXMoPeirSkQ+v3KwtTeAQzxZfjsfYgC28u5qOFiu623S6HgSwv
+4agZgsJjED8JZh/u9u0901NGTB41YqGpvexIc/+XgSt4BLqjsK+VSfIlulvBMb4a
+I+h/8m7cm7nHhPBmjDUCAwEAAaNxMG8wEwYDVR0lBAwwCgYIKwYBBQUHAwEwGgYD
VR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMDwGCysGAQQBgo4pAgEBBC0xKzAPDAZi
YWNrdXAMBWFkbWluMBgMD3JlYWRBbnlEYXRhYmFzZQwFYWRtaW4wDQYJKoZIhvcN
-AQELBQADggEBALBm9gBCGg6cM/aCjypxlIrRqUVuNAK1jZvZyRFIZhLtLw5FN/+o
-JKzYjqs9gQhlNoFG+/41c59Afu06/Yy3DPi/VKPJ2TZDr4DPCBchSKdjDUEXm50B
-0fHymT/hjhMPsF1tclgwINPCwBeReSKOfXxjSAQyjC7pSQEF3Qr5hYXQEP38+NSW
-eDg+5S0Y1+QDpATYanJCUzBRD4ZRvoy7VsueszMNtygPEQYorE6GnM7CJ5z1b4j4
-az7/LqVAZEOW6sxFT3bPuPtsiMEjbWxXcqbfbeb6WRntGG+YfvSq9Al1SRBAUjcI
-w5xxu9jynyHZyHc1BC1lKP8XY/GFWFG+/nM=
+AQELBQADggEBAB7PsAcFFKW6UqJ/JyztVPV73Id/5d+epWQjZhljKKlTA9PFpmvo
+RGWwszklbOe4K0dZJSBnxhl8QNMo6A90z+Aff73bWbMqjEfaF03AVMv4uM+DpgQj
+LbMhLwcATgGC16tbDOWxbwJs87yonFRgrqOFVxuAnPfUu5hSGfi4aAEsyfMmoMue
+jCnLIuKJm8Ra+KDq0RUxcWYu47C0+vBTjskR9TIMESYXF5ljTpdph6f3ZNIqzNg5
+S7S57Ym+J+RoBo04jlKlewvjHkdiupcvbbJUsnfus1fSQEKPpn/gPYeN6dl1KUWp
+bsC9c+prApS2XSBC160IEuz4Fg3/Hx0UUmM=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC9vaxwy4ni/WIB
-odUJjisQdDFlyqgwGd/2QXVVsldwCy5qkTt7CUzYlXk3SxuSzfO21vF32a4pt+6q
-UvpofzVyR6ujIU6uygULh5vPzY6UcCk0ACO4F73eyLRVAj1J+9n7Okt2I9SjMhuq
-FLhvx/FE7B9VzWagg21DDNu6bgBuS+IINIILtD0l0t80fEdC42n5MTdThqmkNoWP
-addHvJGv2D9JAUfwflUkp4JNcVMfs/T9fgzlvbEyd3oI9PJh+YbM3ElyZkKpm4+s
-m0cUInzArxyrzmgfwtuon1dXH5/QkvCT+nYSa1+WONnfrn2Sdilx2WCU5CC0CZQ+
-3R79fOCzAgMBAAECggEABF5qqwo0uZJGAG2efyLT4QQb4xEggEMIQh5NulRp6P2I
-6xE/Al07dL00Vk6XavDtDKe0VaMgfHtstmVEuMU9EM2PqSjqK+Ig7ZwIvWcpQke2
-2yipqeJ6D7glVtO/X4W6BivYcvbuPXe45VimyoQbDRA2xH3/P/DmwiV4pblEr0Vr
-2AdQ9ZsV0t6MekKnQidw27U/UnA3zE8i8PqAfSCIzBHKKntjdRVWJVAs1eVInEDk
-erHzvu6zkMairlIY2YDUmRH+XKpqj1cOWDyrBLKLA8GkY8fbKn42f7MgbuiQBbOS
-1xH6w1U943+LptUm2VP3iczCLucy6letfNtJcn5yUQKBgQDt+vs2ObQ9GP98msYx
-4Ut9nTmv0RzFyGMfeua0GUYtryqmUDBHsy6YbhnXAtazIEPQ0hWF0f4l3EGeJ+kB
-dXvrcSn5YIRKlo6xNOM8yDGOsRLwVjOcEeXHKmopWX0I/5FdIkiMo392Knrw09BM
-WHDtd9bFokbNRZyecVdaSB50qQKBgQDMG53mYjGiaWQJZcbKbdvzSiLI4KWQ+w1g
-SuTyC9ml5MGTf0D6uw5iLhnxg5X1aN/QxbcrfIPpwAmAyP6CjNzurC3p1FZ6vszL
-lkDnSXiHM/kdrEDkbKILLVlK0zk/veeHHsRujfdpCnWdUaB+gU3d3HDhPV1mK6ct
-sj9QBvzn+wKBgDQpqDipi009FnscfcbfKCnfdY6JGnJryvZSfREK3SwUUhfkScDd
-kKAOuhE76Q2YS+UQt3D+p2NrFOLywor0UnY39shXlIe42owTGJ/xDZPGUm1lp6hU
-7/Wo4V4w5Ew9oII2iopxJ/Yht5LkCqtBU3dppiTpvfUuhfbgxxHd7vcJAoGBAJdr
-9ZyxRHllDp4aL3yGPCMl4OB+KNaCKc5CQc+AExwLtcCjK6XXmDgmU4DlSh8iisjr
-pZRRb9u+aE5uzsIzlsRKYFWU/gNaAb0X31a3Hv/PAwWMgSf1n9IC9reYNGShkl6f
-6MLL97yp0c/AevzfGNF7dRLk36GoyQIRtv678Gx5AoGAQsKJ8fvOSgXiRUI31Ers
-99st2+viPOpAgsTLQICJMCkj+Sp//KmRo1YrieYpb3zmIWxhAqQ3HwNcTRq+OKSH
-ix/ABTytekH/56+uvzUcNIzuwbk/GKwoXI6E6TA58mnsh4dd59ZlPTtDicK/Glsf
-Dx0G3fNCJsrG6CrH1EM9n/I=
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC7Y8FYr5UsY9L0
+BKlIr10LKO2RnPlepL2URsyVnb/QuSBoYrJNDlEgO3TlaamipaPFQZF15Fqavnfm
+XlOT5pTTnp/08a3ec/qy27hcO5R3D7nTupP9oF3ni3Qj8ZwN42ZF5QBwEakvk/nk
+4NbAhjdnWdqzHhr9c6cwwda8eJMQV7psPMTp1UNhoQZup0MqWFvJGK6GhsJLReoS
+npMCFzKD3oq0pEPr9ysLU3gEM8WX47H2IAtvLuajhYrutt0uh4EsL+GoGYLCYxA/
+CWYf7vbtPdNTRkweNWKhqb3sSHP/l4EreAS6o7CvlUnyJbpbwTG+GiPof/Ju3Ju5
+x4TwZow1AgMBAAECggEATeVS2zXp8dDxQUSqxi83o0r5Lp2tP72FnRytMMipku9R
+3HKiocuAx8BPgIoi3Ryz7myqNfKeA1OH7fhqD8I7PZjj72ODnkRiA3W4toaB2dhs
+q+oUWMsQHg9nUQW/lDpiKk3el7ePt9pwd8dLHi7s2waMFgc+uvsXlfoN9Ly+jXzk
+RK49fi8njY1Bf0acSQLJvaDrSc4rnLipY8/WBRVnWlZY0FRABQ4dsI8bHzxsRgNN
+3G2CT0zveBqY8egodwRkGqde4yYVnz2buerCc+YkA6tbBOB5kcX1IKa8zOH0HzHg
+2ngWFPMcPGC08FROdXS0opAz6ORTFnSHksh6zyoBxQKBgQDc/midtYhuB6zDnAsw
+bnfAPkLRo4f7fFwgsH0IdwzSGstCphV7HWTXi9dJsQo59LrKnEFvjy6OGDe7Vzt/
+d4+/Lg7rs0G5UHaMVEypAQe0bxRF+zvdnIWFC/aksiWxTtXnq0wWVv17VRW+mSSh
+ZNvI/hX+aapHx2E/CZ4DML5GowKBgQDZEqe2qbVbkfiOEXIevyf9JOQ7Qy/j67/w
+8JrWi8w2QiMpLBndYh7eZZIA6nAhWEU3wOc4C0WVn6qa3GXaYcILPb3KQ+sNfo+B
+R+cuXbdTsOIjd3zQ6KAKxzaTay45hbpRCa/BWXrgpSqmUQO9cz5vRNygxcna2mEP
+UghlXSuHRwKBgFoM7PI+s54qUHLQ1hphGTjj5fRoz5NhhGTppblaUYYX1vWMmbo2
+Kw5N9GcZ1hHxsF+5NkzQKmdKZQNYs2zoelGr4faXb9OkfvoFq8s+GTakAL3XdviB
+vEKPsmxAD23lsvIY8gM0ZUvpStgErVF6uLh5GNs3kWR4UR8PuyyPoylDAoGAaMP2
+KdSycOV8fqSw3WII3MZHWMrOfEQ1uQWG1XFXOACnk6FzQkOu/ksrMkHzqEQ/8+6o
+KYZXKSWMY31nswRncKF1zf8FnNeuTwjy7I/SgRPnsJJkrTb4tvr6vh+GicCe8amp
+J7oV8gIFGYwUMAVE5tLnYLjU+UlYetpuru1OqtUCgYBe6k9r9O+VJAV3OJLQSKc0
+OfKTBcsGBAAzSiWQTvkvlyq6J0yK7qgonO+3jY7hlDm3TUqJo8ZGVck5XG7sJNMm
+pjP5B9gFyFmiDcjE0rP/O7H5d89xm6TLihpNtNAr645Oo3zggUvLsks679ij4YRy
+ruwgwxo3b3z8nK/splKurQ==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/not_yet_valid.pem.digest.sha1 b/jstests/libs/not_yet_valid.pem.digest.sha1
index c2b282c0ebd2d..fe10b1ff56073 100644
--- a/jstests/libs/not_yet_valid.pem.digest.sha1
+++ b/jstests/libs/not_yet_valid.pem.digest.sha1
@@ -1 +1 @@
-385D8CBF6B41B08FAE5C0361A9F3E08FB278B991
\ No newline at end of file
+CCE5D2D4BB9F8BB8780BA9A691A27E6C84F5536F
\ No newline at end of file
diff --git a/jstests/libs/not_yet_valid.pem.digest.sha256 b/jstests/libs/not_yet_valid.pem.digest.sha256
index 4c735b56a1078..9d2404eeeebd6 100644
--- a/jstests/libs/not_yet_valid.pem.digest.sha256
+++ b/jstests/libs/not_yet_valid.pem.digest.sha256
@@ -1 +1 @@
-6F8A24651A0D6EA91FD82AE20C96270138E88C0E2D9B7B1C831EF571EC8EFF52
\ No newline at end of file
+0E0165718DF08EAFCE8FB65464E3E00F51EA21C585462747846F25EA185375B0
\ No newline at end of file
diff --git a/jstests/libs/ocsp/ca_ocsp.crt b/jstests/libs/ocsp/ca_ocsp.crt
index 67ba4dac386e4..7b60cf68f20bf 100644
--- a/jstests/libs/ocsp/ca_ocsp.crt
+++ b/jstests/libs/ocsp/ca_ocsp.crt
@@ -1,21 +1,21 @@
-----BEGIN CERTIFICATE-----
-MIIDeTCCAmGgAwIBAgIEaP5C8TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDeTCCAmGgAwIBAgIEUlBUJTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjB0MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ0WhcNMjUwOTEwMTQyODQ0WjB0MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO
-S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS
-ASa6t/0P3MHRRhLP2W25y2NJ9eBl6YMJj3oY058ixhHOhPLvdnWPcLHOh36BiNu8
-5+lX8w27vAz84qzloYQ44YJO+uvU9WBEoMa707IWTre3PSoYfe4y49fm9AwYkG2R
-qn7674TGn5eNZnSuBVzab7Fy5+zBDAhgCcB+z0MYP3COEyrmzkKY2rPTPu7K7o3r
-5FFGfxgceDzN6lukG12h75F+R64o8lvVBkHV5+1mwlx8SlthPH1HJB6/ZjX1m4gw
-yRlU9JfR00iphro4FYI1SgGKeSP7Fn1E0VRB/YwRAoZFguq62vlMImzh+2folFF4
-1XzEO+BKjUiRfYbRTpWRAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
-hvcNAQELBQADggEBAKRiAji29p93B9cA/9gjE5Zlb39LRtYA5+RBC0WTn8u5ku//
-XGpz3P8aDpO0BFAd/gNiyGRWQ7blHj0Mn02QHqW7T/yyyE0m/fASdXIoJApFDEtK
-n1mUcmVdiiIlQvXo/oi1RTwxckQNqFYm2M/XSrg0HLFVHsTQrdDskbs0SRQZK3qv
-5EluYHu7UvKAAmzepZhtyC1VNrZbP97cJ03ZupUyMo6NHLk0WxFHvYM3U7K/W7ui
-YuFWenPWmESFGHR923lF6HjzvkhHYXKPaYhTnKZJ1kjwhYfcdfSmbizJBBlryzEL
-+0KtahO1J5NBwYyVgo+Pkv5OEWwunzrpsY+TLR4=
+S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDj
+KEjkptHYsXzYYRGitba+fkBEhsuRVoAVphzopMVKven8XMHde8b0xJmqnqPYCPiN
+shU62bcM+fWENDjpuavcqvzC2nYK118mbrRkko2AurqCPKiaj+o16aogsPS+lBS5
+WnK4C369pesQsZz+SO/s/LwW8iAsOAaayoFvJaK5RCaU+v1C7M6gFDyeQFLCp7UR
+H4IZmWqR7A35mC76bdswyleTpN5gqAVjR2E3k/U1Azp42vUqNiNTNBzLLe9Gllbp
+UwAqUZkf1o10Ew+K4wsU6L/rGK/QbaNObJ1NEg0UnVQGjYCJiemKMIVt5OsB7aj9
+LWIzHFLF2xynv9NwOZ9fAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
+hvcNAQELBQADggEBADidce5kTcwIUD+CHiJ04YFZEpxSyMCk2R7baZuZS6igngHH
+z05oz4rnvavuaudFwvgj5f630pbel140DdWH/p7nsEsW6QEr9tThWDeabRiRcpq0
+ELd9kvYOGLlc0TZBAuFFLu60fe/NiGlWvALszIY166Bq+Y3xoHiLy5SJ56+O2REX
+AKnzx2IIiDkz+GPHcQnjaTIo8t26I4ArHjekh1DQ0soUtzav+IBZUgb6H0Q12NH0
+4GVTZv5fLAMufqzCzhzmbU28p45Hrj8ZFErim2ES2l/akOHJUbNrbMTz63G8ZbJk
+Inl2kjX/FBwuQ2d1idIJPU4B8djmQrJrb7urcog=
-----END CERTIFICATE-----
diff --git a/jstests/libs/ocsp/ca_ocsp.key b/jstests/libs/ocsp/ca_ocsp.key
index 8f2faea5c0988..6b2e18401cdb2 100644
--- a/jstests/libs/ocsp/ca_ocsp.key
+++ b/jstests/libs/ocsp/ca_ocsp.key
@@ -1,28 +1,28 @@
-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDSASa6t/0P3MHR
-RhLP2W25y2NJ9eBl6YMJj3oY058ixhHOhPLvdnWPcLHOh36BiNu85+lX8w27vAz8
-4qzloYQ44YJO+uvU9WBEoMa707IWTre3PSoYfe4y49fm9AwYkG2Rqn7674TGn5eN
-ZnSuBVzab7Fy5+zBDAhgCcB+z0MYP3COEyrmzkKY2rPTPu7K7o3r5FFGfxgceDzN
-6lukG12h75F+R64o8lvVBkHV5+1mwlx8SlthPH1HJB6/ZjX1m4gwyRlU9JfR00ip
-hro4FYI1SgGKeSP7Fn1E0VRB/YwRAoZFguq62vlMImzh+2folFF41XzEO+BKjUiR
-fYbRTpWRAgMBAAECggEBAM2sFPs/oNe9NBE6mMf5wU36lPZlmW0WQqDFZSYVdECB
-TgU+DhNaqA5gp7OG3e4NVG/xjYX2ZBfeN7YgZobLJgzzR0UE2J2L58mu0JQ546uX
-UrRicezkwLoUjWoC0CnqoXTNT2mB1T8WJD52/oVEeUjxpQ/NdwEmQLmWM3G+mY6P
-JQgxk8CIFVu1P0qajjmT/AjVEwaH/ftQN0rLFomr2UVSJ7XcPD4KzqXTOu5BahIp
-uoIEtc84dzPQIvh43Prjwea313zFKiGVWWoS514PpySi9CPk+Z8N7kY3r6ENHnXP
-MLptUEUAQ9IQsKBdQemF4e4U1YY2o43rxU02K4nwgcUCgYEA6HPo1H4fN6Ox22VZ
-KwzdVLJNojVYvV51C6dLSh9r7TQiomKwbMrfoIN0TUVb38mlhSQrlBCgpgJW+HOf
-U6cO3SdWvaprCcKLpSPWfy30nAW7LfzExNLF6+kLXIhUse1T9nNC3iHnjDP/ZCo+
-/QEJXu0+tkAwv46k/EYYfBzzkycCgYEA50cZ3o4QekPGHWT/9ioGwFjbl94c/9wW
-t5YS3ELeuQS16ZVsQlwH530Jd4Md1JqtL2024a/aMu5BEkV94ZgIu/xq2t0xOgUG
-CEwW8abfEIzXArfCMJBlSV63ir/Rm7ZNrwZNtf+fu5gTtSShNAIOEQ2gQvjLGAlz
-MQ+xgXsRpIcCgYBub6MOUV1DceHt+hiC+3mr44o+plminl29S53ZeVQtPbe2rmxj
-Q387Izj9/RXJHGQm/Sg3EC9Cr5niMAxiEdxd9XKgaXaxDFyL+JrPkWXFOnPTRWO2
-nuypR+6954dVgQuC6v+5ySHY2ltbmzaRvQxWrSA+NgDzATIhKKw5SwftwQKBgQDP
-r8XHWYPqh5oNI3nHsQ3HBQSNaGRaT2YHDrl5GoOvWk+RpyFEpihi9KHftNrA5PXI
-tSannKcxPUsqAUV9pG2TgqTiTee61nAIW2vvLY766b7bLfEwczrBnFDuRDsSva8d
-EWgPm7HxYCuya2ZnscC4B6h2+7xFpZbP4+ve/ollWQKBgQCf+2qZHTIhaJGZwpyh
-hROhQCJimw7ECTkuP1vEu2guhu1QKMCp8yD/Li7S+SdzZp1L4NPbMswJfy1wXkfj
-uyWP2mRmQuWg+RUPFUmvSp3eP6LKBHlmrBEfVI2lBLTaB+5MG84a6r8OaIbJXd6s
-bbDRynzmmyQ7u3wKCA8DfMpHBQ==
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDjKEjkptHYsXzY
+YRGitba+fkBEhsuRVoAVphzopMVKven8XMHde8b0xJmqnqPYCPiNshU62bcM+fWE
+NDjpuavcqvzC2nYK118mbrRkko2AurqCPKiaj+o16aogsPS+lBS5WnK4C369pesQ
+sZz+SO/s/LwW8iAsOAaayoFvJaK5RCaU+v1C7M6gFDyeQFLCp7URH4IZmWqR7A35
+mC76bdswyleTpN5gqAVjR2E3k/U1Azp42vUqNiNTNBzLLe9GllbpUwAqUZkf1o10
+Ew+K4wsU6L/rGK/QbaNObJ1NEg0UnVQGjYCJiemKMIVt5OsB7aj9LWIzHFLF2xyn
+v9NwOZ9fAgMBAAECggEANiTTAxU0LYdYr6arcnIHpe4n2W2btf+9NevLNjSYBhGw
+ZQOi/ezT5mG6Eu4PNZ4lyHhrEQT12bT2rVVZP3SPH2DuMG+r9TeSqXF3tAUuj0Fy
+1ToqIfUtqT4R3Arxuz9GlUuWzoGG6yNSHT+IoR+3eHBhUMLTolaUVG7yXRDGcYv2
+9yPN+dGx5SBeM/SERLBVZsze855RQNvlmf9yd7HvddviS31JqWdRNNS+oXSecLfk
+FuWsUOd+Bhg+0s1cYFOw1UcwA4tytXcpuwtsNbe5hVOR+aV1DphMOU5YjDzPzwcD
+MH6b/3y6vEqk3tyY8fBd+6USrE/7UiWKEKiOu7n18QKBgQD4ndfS4W/pzGP0Wo0W
+DjedsrdNgF1ZwVh1Uv/94D78uYcUEgKwBxUWXJGW8H6QeHytzUBEAPMZbpeyoY78
+X7vdIqgoWt+A865BeCL6LoAbz2AQbK+jh9mj2MDr3dF7rwzc7TXLa0YgetrIjDCv
+Zpr18EWr9RfyiYTJfRqJv9e4CQKBgQDp50szNC4OEAJdh/H3rKTC70ICNpb0602g
+pmFyllEqvWo7eFT1GDfawGfS7Wc85CWVbNMx7h65PLFxLQsCecXCJZZiwPvVQsqm
+xXYA7ClCvGEvXglct0ZJvMk+UHgjusT8YDmPwGXNuu3y31pUktQvD0fOmu1sDKqO
+hXmn8wFmJwKBgQDzb0LD10AI0rxQqFWbcgJdJIA9n+JIH3xpLQNB6FBkj5lHOrds
++xJX2f4oqIWVGpUibWZu7+sZTOO24mCql2bRCb1T/l742iIXMvY0uFFyaaTrkDCO
+1Fd0Cqokigczr1zN+msBKqH47bLKShLlvHchN8pQrOVs8+CUli3lEXa9WQKBgQDW
+mFSe+q2SS0/QPexaPjO/gME/xJPEyqkizeMzvQNkwflX7HONlhWQhdv7YcHI1MxC
+hmBRO9VGP4/QdPHX6J7uG4wYuFOT+j5wuVMlT4YfazOCwLS4MpGzDxhXKn6+0Rjv
+Lt1ArNT55hlvLUnzs+4l6tAHlo5jBk/oiD7wPnu3GwKBgQCsPSbbYcC2H+xHNkPW
+cK4cisQQAeNJXLtZUElw/OePDTMgo2hMnvKkep3LI3WqwJ208Uo8DOW5E+t1GC8I
+61ENw2rMf1z8uaXLj5IbJPAGsgYvEef7kNthvtVl3XkTRZ8K4Sw+MGWnSpMOcv/n
+AKUlIVYnLie1Kwnu1xD6LRV40w==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/ca_ocsp.pem b/jstests/libs/ocsp/ca_ocsp.pem
index 9f57d6aa4cca3..322c9bb255d5d 100644
--- a/jstests/libs/ocsp/ca_ocsp.pem
+++ b/jstests/libs/ocsp/ca_ocsp.pem
@@ -1,49 +1,49 @@
-----BEGIN CERTIFICATE-----
-MIIDeTCCAmGgAwIBAgIEaP5C8TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDeTCCAmGgAwIBAgIEUlBUJTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjB0MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ0WhcNMjUwOTEwMTQyODQ0WjB0MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO
-S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS
-ASa6t/0P3MHRRhLP2W25y2NJ9eBl6YMJj3oY058ixhHOhPLvdnWPcLHOh36BiNu8
-5+lX8w27vAz84qzloYQ44YJO+uvU9WBEoMa707IWTre3PSoYfe4y49fm9AwYkG2R
-qn7674TGn5eNZnSuBVzab7Fy5+zBDAhgCcB+z0MYP3COEyrmzkKY2rPTPu7K7o3r
-5FFGfxgceDzN6lukG12h75F+R64o8lvVBkHV5+1mwlx8SlthPH1HJB6/ZjX1m4gw
-yRlU9JfR00iphro4FYI1SgGKeSP7Fn1E0VRB/YwRAoZFguq62vlMImzh+2folFF4
-1XzEO+BKjUiRfYbRTpWRAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
-hvcNAQELBQADggEBAKRiAji29p93B9cA/9gjE5Zlb39LRtYA5+RBC0WTn8u5ku//
-XGpz3P8aDpO0BFAd/gNiyGRWQ7blHj0Mn02QHqW7T/yyyE0m/fASdXIoJApFDEtK
-n1mUcmVdiiIlQvXo/oi1RTwxckQNqFYm2M/XSrg0HLFVHsTQrdDskbs0SRQZK3qv
-5EluYHu7UvKAAmzepZhtyC1VNrZbP97cJ03ZupUyMo6NHLk0WxFHvYM3U7K/W7ui
-YuFWenPWmESFGHR923lF6HjzvkhHYXKPaYhTnKZJ1kjwhYfcdfSmbizJBBlryzEL
-+0KtahO1J5NBwYyVgo+Pkv5OEWwunzrpsY+TLR4=
+S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDj
+KEjkptHYsXzYYRGitba+fkBEhsuRVoAVphzopMVKven8XMHde8b0xJmqnqPYCPiN
+shU62bcM+fWENDjpuavcqvzC2nYK118mbrRkko2AurqCPKiaj+o16aogsPS+lBS5
+WnK4C369pesQsZz+SO/s/LwW8iAsOAaayoFvJaK5RCaU+v1C7M6gFDyeQFLCp7UR
+H4IZmWqR7A35mC76bdswyleTpN5gqAVjR2E3k/U1Azp42vUqNiNTNBzLLe9Gllbp
+UwAqUZkf1o10Ew+K4wsU6L/rGK/QbaNObJ1NEg0UnVQGjYCJiemKMIVt5OsB7aj9
+LWIzHFLF2xynv9NwOZ9fAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
+hvcNAQELBQADggEBADidce5kTcwIUD+CHiJ04YFZEpxSyMCk2R7baZuZS6igngHH
+z05oz4rnvavuaudFwvgj5f630pbel140DdWH/p7nsEsW6QEr9tThWDeabRiRcpq0
+ELd9kvYOGLlc0TZBAuFFLu60fe/NiGlWvALszIY166Bq+Y3xoHiLy5SJ56+O2REX
+AKnzx2IIiDkz+GPHcQnjaTIo8t26I4ArHjekh1DQ0soUtzav+IBZUgb6H0Q12NH0
+4GVTZv5fLAMufqzCzhzmbU28p45Hrj8ZFErim2ES2l/akOHJUbNrbMTz63G8ZbJk
+Inl2kjX/FBwuQ2d1idIJPU4B8djmQrJrb7urcog=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDSASa6t/0P3MHR
-RhLP2W25y2NJ9eBl6YMJj3oY058ixhHOhPLvdnWPcLHOh36BiNu85+lX8w27vAz8
-4qzloYQ44YJO+uvU9WBEoMa707IWTre3PSoYfe4y49fm9AwYkG2Rqn7674TGn5eN
-ZnSuBVzab7Fy5+zBDAhgCcB+z0MYP3COEyrmzkKY2rPTPu7K7o3r5FFGfxgceDzN
-6lukG12h75F+R64o8lvVBkHV5+1mwlx8SlthPH1HJB6/ZjX1m4gwyRlU9JfR00ip
-hro4FYI1SgGKeSP7Fn1E0VRB/YwRAoZFguq62vlMImzh+2folFF41XzEO+BKjUiR
-fYbRTpWRAgMBAAECggEBAM2sFPs/oNe9NBE6mMf5wU36lPZlmW0WQqDFZSYVdECB
-TgU+DhNaqA5gp7OG3e4NVG/xjYX2ZBfeN7YgZobLJgzzR0UE2J2L58mu0JQ546uX
-UrRicezkwLoUjWoC0CnqoXTNT2mB1T8WJD52/oVEeUjxpQ/NdwEmQLmWM3G+mY6P
-JQgxk8CIFVu1P0qajjmT/AjVEwaH/ftQN0rLFomr2UVSJ7XcPD4KzqXTOu5BahIp
-uoIEtc84dzPQIvh43Prjwea313zFKiGVWWoS514PpySi9CPk+Z8N7kY3r6ENHnXP
-MLptUEUAQ9IQsKBdQemF4e4U1YY2o43rxU02K4nwgcUCgYEA6HPo1H4fN6Ox22VZ
-KwzdVLJNojVYvV51C6dLSh9r7TQiomKwbMrfoIN0TUVb38mlhSQrlBCgpgJW+HOf
-U6cO3SdWvaprCcKLpSPWfy30nAW7LfzExNLF6+kLXIhUse1T9nNC3iHnjDP/ZCo+
-/QEJXu0+tkAwv46k/EYYfBzzkycCgYEA50cZ3o4QekPGHWT/9ioGwFjbl94c/9wW
-t5YS3ELeuQS16ZVsQlwH530Jd4Md1JqtL2024a/aMu5BEkV94ZgIu/xq2t0xOgUG
-CEwW8abfEIzXArfCMJBlSV63ir/Rm7ZNrwZNtf+fu5gTtSShNAIOEQ2gQvjLGAlz
-MQ+xgXsRpIcCgYBub6MOUV1DceHt+hiC+3mr44o+plminl29S53ZeVQtPbe2rmxj
-Q387Izj9/RXJHGQm/Sg3EC9Cr5niMAxiEdxd9XKgaXaxDFyL+JrPkWXFOnPTRWO2
-nuypR+6954dVgQuC6v+5ySHY2ltbmzaRvQxWrSA+NgDzATIhKKw5SwftwQKBgQDP
-r8XHWYPqh5oNI3nHsQ3HBQSNaGRaT2YHDrl5GoOvWk+RpyFEpihi9KHftNrA5PXI
-tSannKcxPUsqAUV9pG2TgqTiTee61nAIW2vvLY766b7bLfEwczrBnFDuRDsSva8d
-EWgPm7HxYCuya2ZnscC4B6h2+7xFpZbP4+ve/ollWQKBgQCf+2qZHTIhaJGZwpyh
-hROhQCJimw7ECTkuP1vEu2guhu1QKMCp8yD/Li7S+SdzZp1L4NPbMswJfy1wXkfj
-uyWP2mRmQuWg+RUPFUmvSp3eP6LKBHlmrBEfVI2lBLTaB+5MG84a6r8OaIbJXd6s
-bbDRynzmmyQ7u3wKCA8DfMpHBQ==
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDjKEjkptHYsXzY
+YRGitba+fkBEhsuRVoAVphzopMVKven8XMHde8b0xJmqnqPYCPiNshU62bcM+fWE
+NDjpuavcqvzC2nYK118mbrRkko2AurqCPKiaj+o16aogsPS+lBS5WnK4C369pesQ
+sZz+SO/s/LwW8iAsOAaayoFvJaK5RCaU+v1C7M6gFDyeQFLCp7URH4IZmWqR7A35
+mC76bdswyleTpN5gqAVjR2E3k/U1Azp42vUqNiNTNBzLLe9GllbpUwAqUZkf1o10
+Ew+K4wsU6L/rGK/QbaNObJ1NEg0UnVQGjYCJiemKMIVt5OsB7aj9LWIzHFLF2xyn
+v9NwOZ9fAgMBAAECggEANiTTAxU0LYdYr6arcnIHpe4n2W2btf+9NevLNjSYBhGw
+ZQOi/ezT5mG6Eu4PNZ4lyHhrEQT12bT2rVVZP3SPH2DuMG+r9TeSqXF3tAUuj0Fy
+1ToqIfUtqT4R3Arxuz9GlUuWzoGG6yNSHT+IoR+3eHBhUMLTolaUVG7yXRDGcYv2
+9yPN+dGx5SBeM/SERLBVZsze855RQNvlmf9yd7HvddviS31JqWdRNNS+oXSecLfk
+FuWsUOd+Bhg+0s1cYFOw1UcwA4tytXcpuwtsNbe5hVOR+aV1DphMOU5YjDzPzwcD
+MH6b/3y6vEqk3tyY8fBd+6USrE/7UiWKEKiOu7n18QKBgQD4ndfS4W/pzGP0Wo0W
+DjedsrdNgF1ZwVh1Uv/94D78uYcUEgKwBxUWXJGW8H6QeHytzUBEAPMZbpeyoY78
+X7vdIqgoWt+A865BeCL6LoAbz2AQbK+jh9mj2MDr3dF7rwzc7TXLa0YgetrIjDCv
+Zpr18EWr9RfyiYTJfRqJv9e4CQKBgQDp50szNC4OEAJdh/H3rKTC70ICNpb0602g
+pmFyllEqvWo7eFT1GDfawGfS7Wc85CWVbNMx7h65PLFxLQsCecXCJZZiwPvVQsqm
+xXYA7ClCvGEvXglct0ZJvMk+UHgjusT8YDmPwGXNuu3y31pUktQvD0fOmu1sDKqO
+hXmn8wFmJwKBgQDzb0LD10AI0rxQqFWbcgJdJIA9n+JIH3xpLQNB6FBkj5lHOrds
++xJX2f4oqIWVGpUibWZu7+sZTOO24mCql2bRCb1T/l742iIXMvY0uFFyaaTrkDCO
+1Fd0Cqokigczr1zN+msBKqH47bLKShLlvHchN8pQrOVs8+CUli3lEXa9WQKBgQDW
+mFSe+q2SS0/QPexaPjO/gME/xJPEyqkizeMzvQNkwflX7HONlhWQhdv7YcHI1MxC
+hmBRO9VGP4/QdPHX6J7uG4wYuFOT+j5wuVMlT4YfazOCwLS4MpGzDxhXKn6+0Rjv
+Lt1ArNT55hlvLUnzs+4l6tAHlo5jBk/oiD7wPnu3GwKBgQCsPSbbYcC2H+xHNkPW
+cK4cisQQAeNJXLtZUElw/OePDTMgo2hMnvKkep3LI3WqwJ208Uo8DOW5E+t1GC8I
+61ENw2rMf1z8uaXLj5IbJPAGsgYvEef7kNthvtVl3XkTRZ8K4Sw+MGWnSpMOcv/n
+AKUlIVYnLie1Kwnu1xD6LRV40w==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/ca_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/ca_ocsp.pem.digest.sha1
index 112d057d8a9e4..1382923a4b93e 100644
--- a/jstests/libs/ocsp/ca_ocsp.pem.digest.sha1
+++ b/jstests/libs/ocsp/ca_ocsp.pem.digest.sha1
@@ -1 +1 @@
-BF8C557A0367B29F817643DB7F1786C8F9E0EB29
\ No newline at end of file
+D7AAF4260C327731CEC6EAB5F679CFEE8C560A82
\ No newline at end of file
diff --git a/jstests/libs/ocsp/ca_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/ca_ocsp.pem.digest.sha256
index 508eb3bfa2d45..fdb0aedc04849 100644
--- a/jstests/libs/ocsp/ca_ocsp.pem.digest.sha256
+++ b/jstests/libs/ocsp/ca_ocsp.pem.digest.sha256
@@ -1 +1 @@
-D783E2DACE3BEA1AD49CDAE84E1F1CB54ED79C97A76DCA8C19FCC4B60A6C3CD4
\ No newline at end of file
+C82EE66C7DE8A0B9E7140C5BD0084BE4F52147F121A5B958B327148849A6557E
\ No newline at end of file
diff --git a/jstests/libs/ocsp/client_ocsp.pem b/jstests/libs/ocsp/client_ocsp.pem
index e7ffb1b5126e2..510f0aec28bc3 100644
--- a/jstests/libs/ocsp/client_ocsp.pem
+++ b/jstests/libs/ocsp/client_ocsp.pem
@@ -1,52 +1,52 @@
-----BEGIN CERTIFICATE-----
-MIID+jCCAuKgAwIBAgIENMutCTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIID+jCCAuKgAwIBAgIEYUoivDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBiMRAwDgYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjBiMRAwDgYD
VQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMMCWxvY2FsaG9z
dDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMQ8wDQYDVQQHDAZPQ1NQLTIwggEi
-MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLCW7KuORvLi/8ZPwbAE3/skF9
-JqAMfShWEPVpDmlyabiR9B+DkKps0e+tF/T82ZPzKyLzCQJoZCtxFLxlvEx2OnOW
-5v89ZMDackJf5UmGP/oeZYWsCAHmu86x6sXjrmTfXtO6e7pf+GIyeujVrxb0fTZs
-6TYKVLlpr5GFWslsaLaEUV8cDTQQj0WvlYX3rgKeQ1jd3B6qn9X1CU3W5Grjg/D8
-oRjygSiTEJE22tJ/IlVDulGTgAZBXewmO5TZsHC7+EJfG2HdmQJsgjNJm81W5GAq
-PEmsHlcZ/jlKa4fMLw71a1UEFvFaTA1QWr+4StVTU1sk7qdzjoCTkkvVjXohAgMB
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDP+anHp2PcAacpG70/Xc49/EbZ
+j1n1zOi+rAyh/Hrws52uhLClhkQ6qzaaZlB/wOG84YZvBTtP6L+ZHB9rysnGEIzx
+Zstw4Wg52CRs+D3VPK+FYpAAMQsdiNQn9YuBt/DNYuDeYuStrY+gFFmFR674Cj+z
+hexFHiBIi0wtQmv5Lx0B1SGnVRAkG38aEUYVtDDZgSKFFCHHKga7q6MemHiuvqvG
+vxRyEtPaSTA5OnKBxRT+tCPhQWGR8p4X1PwQRSTw1LpP6ZYpKtIMdImDbEWIH6ys
+PCOxp5ir07uUAvoclgmjzwCy4f1LI0KyL3IgtHtcKPjx1T6P8bhfSekAVRabAgMB
AAGjgaUwgaIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0lBAwwCgYIKwYB
-BQUHAwIwHQYDVR0OBBYEFAzQKqv6kSKcIz85diU9ncsO9yzRMDgGCCsGAQUFBwEB
+BQUHAwIwHQYDVR0OBBYEFKZns1Tv+GB2VXC/nOoZWLULWSITMDgGCCsGAQUFBwEB
BCwwKjAoBggrBgEFBQcwAYYcaHR0cDovL2xvY2FsaG9zdDo4MTAwL3N0YXR1czAa
-BgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAAbl
-SGWOBNAd7R3aGyPQiqqOnIUkYO/ugcleM9DZMoyuL9v/ourpSwY1CkRFHhXO2iDp
-wjIKNJVVdWs88Iju/tQrn1Y8oTMUvzVCFLL91Iaa4odHhx34HlvFBjQ4EPmq0v41
-Cd+hYe42UDPm+fG3t9o7UZ51G8bkgjzKFSfofSR0TYpEhwIOGya/ccjTPpy4gGAn
-A0uWQxdAniu/lJ16bVapQD2WZGVhfOQpMTg+40uM87IY8dz9+eXq6/s9lvw0imt3
-3G4bgFv7VlCTMX+Zwa6Y8a729/mfORpEfB9DETgvrypvFVGLJmddG67tuFCmEFzx
-pOtpcwwSe3LjQyPx880=
+BgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAHEV
+fJbRizVql6i3JXqZmXvCZKFX1UmtekVj1SbKSkE6gDOYbMMGm7wZgei8/eUC7JZ7
+DCHkkLQGsbHEq1IqgB/2unXiX54GAkxMOBa2sdAYZ/qGBryV7GziZ/pffOMBdu80
+osD5RoCcrdupWvp/hpLYg2cqMQ57uo3F67jrk3JQZyol7zEmzhGN1vGTmntoZI3s
+rPd/7cNtspz1pWnlAMUQWMJDsSyiYh02luSnWWkrj9iF3AgHuHcwIBf1obNyq2kb
+gFr100o3hFHC0LO/BLk1Bno5ekHlGNEXpH+CwOK0qrfsmOhCaxgozICiMEvwr0n6
+nJh0bQjM5FSl7ihIJzQ=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDLCW7KuORvLi/8
-ZPwbAE3/skF9JqAMfShWEPVpDmlyabiR9B+DkKps0e+tF/T82ZPzKyLzCQJoZCtx
-FLxlvEx2OnOW5v89ZMDackJf5UmGP/oeZYWsCAHmu86x6sXjrmTfXtO6e7pf+GIy
-eujVrxb0fTZs6TYKVLlpr5GFWslsaLaEUV8cDTQQj0WvlYX3rgKeQ1jd3B6qn9X1
-CU3W5Grjg/D8oRjygSiTEJE22tJ/IlVDulGTgAZBXewmO5TZsHC7+EJfG2HdmQJs
-gjNJm81W5GAqPEmsHlcZ/jlKa4fMLw71a1UEFvFaTA1QWr+4StVTU1sk7qdzjoCT
-kkvVjXohAgMBAAECggEBAKWy/A1L2Xn2+siD7c9nb0CHTLUDicjHd9zNmEr1rQ7n
-wu6yiemIbd3Dc1QTYifctcbVZH9t0d46Kc3uu1ZX1xsB1wXHXXTQPifI6exzJo7+
-tXSLLA1at5qDmUtQK9IFqGM3c5oUkeTyw8koz85cR2KesYUuyqChxnO/CgcIMaxb
-UKD+4OUoqoaWHv4XIKg5mjuchHuQRXqWhNihSNBte8UV5AR+earQRUtRlo2VLYrW
-5Ra0GPRZD/561gvNCEPKAZO0OFU7UDGbH5hFPaKe9l/5nlQiZ9yX/aovkNPcBkOI
-+EuwvvmQQWxkPFj27SZqpJADqR1vL4/blCBJ0lQMwPECgYEA6fmLewEBkYV4B1Zn
-ubdpliVHwfLM0zCuzBHNrnHpRaTHXHF+3BWfhT+etOQJu4tviOERIX8/rhW6lti+
-DGttAIV+PG2Z/h58qdRYyps96HTwwGFZD7eUA71zdOaTdcOug5+IZ3yyio5ep7SB
-e6uwSaCIQWdf3vbrrvFDC1fdF00CgYEA3iZUBfvIaV+c6QZyEnX9DAG2QhuYS6FS
-7/WEKvUVWtSsASZL6XzBs+OKINSMXvGl095SNhCTSATCTazIPkzLs0WAiY1z0+gH
-LgDSW/vGMIM9FsfCYJ6jxNJuAXPl9b9tXbQlA97JJ7RALcKfNg6WFIEubo4IQcYT
-40g1+hHHjCUCgYEAkMaAsBPnTIwxwRiOBjSfePCIflImu2ccQdmiU5bYqOdVFLaF
-sNkQY/sB0RxpiUq2MRRS7U83sBhx5HUo47Z1NCVl/y4yYzOqH8vtRTDaFcSGVAPo
-f/kv9UB8+JDUHcJ/caJ9XMukDmgZ2duvYz7RTj2vEglNIUHYg991hMdRXuECgYEA
-2RbDjYVbd/4DrISr/PZobs9NeJTW4zGnhu96VTBjyfuCiy0NigJsmIqHdUXAVxf2
-YXElDchs5TghWR68IIWTmoEU91bQFgjs2zvaViFYsBfQHu7fOOROTg0Fi03jL5/+
-FE/yzDFuiepPvWgvhfgDGc6TvV33G6+hx73NYa72PjECgYAzNDqiUGcKkbPi9iCi
-FcfoPlu9QrSYFo/+/1towWg+xD/AydzR9yFDtlh0D7XhU5hvM38BuNH3oWRXLCSj
-Ul4gUBPQ9aLXUbP0qxPvaIGYofqLqFbzFgUNmYK5J8o3g394DDgwBwEr3HyUh5n5
-rriGBbcgH2n6JyCIogymNPowMA==
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDP+anHp2PcAacp
+G70/Xc49/EbZj1n1zOi+rAyh/Hrws52uhLClhkQ6qzaaZlB/wOG84YZvBTtP6L+Z
+HB9rysnGEIzxZstw4Wg52CRs+D3VPK+FYpAAMQsdiNQn9YuBt/DNYuDeYuStrY+g
+FFmFR674Cj+zhexFHiBIi0wtQmv5Lx0B1SGnVRAkG38aEUYVtDDZgSKFFCHHKga7
+q6MemHiuvqvGvxRyEtPaSTA5OnKBxRT+tCPhQWGR8p4X1PwQRSTw1LpP6ZYpKtIM
+dImDbEWIH6ysPCOxp5ir07uUAvoclgmjzwCy4f1LI0KyL3IgtHtcKPjx1T6P8bhf
+SekAVRabAgMBAAECggEAZ3pxX0WKCg1IfsuufbjOxUigd987g3FQ3I3+OxrEkL7N
+1edvfOwxRn+py2jc5LOznacYnGSd/kRwEF5Nw/RCMKpLdlEPKDr/AMjqVs2s9kwb
+iOJhg7bjMO7DoJ4rq3858Pchq4TJAyE4NQ3q/MWN6RwwVcJsrHcw2Od2H47M6k4B
+CoLtyuIzpN9vQtQydR3XwaMNAKBV/DjviSJrqZw3xt7qKHnRCVrn9d1liZm/L0mq
+0B4hwhtfnUNn7AaV5AGj7Pd9oa6lUt8Jdc2jTE0vV6bTr/Yr1YwB9Frkh1pZhZ+a
+1xWVQVofT6uoMokK1uGcU9zFYUQ6ipP+Dod9Ni6VAQKBgQDtjP+jU7J0BkDv2+Ve
+fchvLBKvpOIYicnWxtyc1axlX28iZXxa77o9PTF9zLS6pkGf1xJtkg3gRusMTSTY
+67GtHsvnnGug7xYaJvMVnEbleOGjEfdg4Cn4noYrzwtk6nfqJiUaPHjMn3hQtD2l
+XqYqNAQmU4IGmwQm+IBUX1GSnwKBgQDgIKRQPmgV5/EDCmRDeCNthUaj1gqqzjdl
+c4mF0EWAUWTB2RCHvcsDAwxtiGoDqDGaouns0t3Sv5M1+Q0Te9F1cdnCrR1ujrXI
+kKkzYp7AfhxIu5q2qt2pF+EWaMaWM22Byynt0byXmL1ZQqUyQRQyFlxlmfqRh0dw
+eJS7Fb3WhQKBgCnRZXtKE6MrKqlO65zrxaoUdyGmhArPztFZZqLRVqzt1PGYLgEQ
+LJf1Os4riDFHbUwHH35cdMbjlSMwPWlWbIvOXRTjy2qkTB7Edox6W1ywIACF/Cd4
+rsrOlU4G/7l/MSbuGh/5P0a+V/7VC/kdpWDOkWBx2PTyuxNlMaoosYJ/AoGBALhZ
+4Q2n51FtcI6Bs6rTXMCvCUWlvyxzZplqCUop36EGUgwokTI16JBXp5dIx3quk+6P
+5j3HzHGW5m1Up27JP6aMuQMqBbV0aQ3J/4KmsdG/aWBh/4YU0TJO0/PsxruxHXfP
+NnPJOoivN7/904echBIW1nUzzCh7z0UxyZSA859JAoGAXKXvh8s3n7o323yQN0R7
+ouyg1Qk8jYIKAwlUsaPXMBX711Cptx2/qtLRwQpos+JgqBD5xOg3LT/DOqYL+Ip/
+vMCn0KeX2crEstsJFqZ28YzAXoZBG3DIwH3a44wutL+xfYj4IdHWq1hLDqIg5tyo
+6MClqNMZRz6UFl0iotzfHFg=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/client_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/client_ocsp.pem.digest.sha1
index 0a34e970c45be..2771aa053d498 100644
--- a/jstests/libs/ocsp/client_ocsp.pem.digest.sha1
+++ b/jstests/libs/ocsp/client_ocsp.pem.digest.sha1
@@ -1 +1 @@
-22CB52597C604163DEC4C7CA6075857B65BC0A38
\ No newline at end of file
+167A47262BB5612FA24D8199A6549BE003F39576
\ No newline at end of file
diff --git a/jstests/libs/ocsp/client_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/client_ocsp.pem.digest.sha256
index 2647408b974ec..661252b28b4cb 100644
--- a/jstests/libs/ocsp/client_ocsp.pem.digest.sha256
+++ b/jstests/libs/ocsp/client_ocsp.pem.digest.sha256
@@ -1 +1 @@
-BF7E7B303E250F8043BCE5DF152A9CE69EA3C654DC2A3F6C6A08358EB62C37ED
\ No newline at end of file
+140FD8E16CDF21418853EB2BD04A052EEE2891E0CD147A0908D9974E1E70C459
\ No newline at end of file
diff --git a/jstests/libs/ocsp/intermediate_ca_ocsp.crt b/jstests/libs/ocsp/intermediate_ca_ocsp.crt
deleted file mode 100644
index a4193f87c9343..0000000000000
--- a/jstests/libs/ocsp/intermediate_ca_ocsp.crt
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDojCCAoqgAwIBAgIEYLGF9TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
-UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
-BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjEwMzA5MTY0MDMxWhcNNDEwMzExMTY0MDMxWjB+MQswCQYD
-VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
-dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY
-SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEApP3UQTlZVYFjzvRREJbdqYBw1zF+NWayd+AFUqWzrW35TECxnmR0
-PEr+ILEucOfiPB/AwRoTCMF0IJk1y6l2ljxGs9vuGD/MdBtnxzJ3cVbzPTtVm5Q4
-kAmVJz7O+2cw70XGD3hruDMKGkAixRwLXp16ENl0jyJ6V44JBRfOQcZLG3geJgve
-cbp1KwkTASaRcYv+93tr9z5s92a/2UVXRuSK/Rf1+x+U4+GRVJh4/k8i9nP/ieYg
-92OGqhWr1ETdSv66SZ+sHd+4OftMbETqBdiTGj7GM+EszAEUTPYDabTvQlOBtdZH
-NYOLHGMxKxdEj5EyzE4y8WO7yk4W+TZItwIDAQABozIwMDAPBgNVHRMBAf8EBTAD
-AQH/MB0GA1UdDgQWBBRRg4ZhgrLm0lO4jGm+fmVnaczaPzANBgkqhkiG9w0BAQsF
-AAOCAQEAZK3kybfwQ05q6BQevqkun9CKC3Vwhv6BLez5wOXW3gQ8BQdypKbSMEB8
-4RPEcy1zBU+6LPa8S+WE88EN85/0n/oS7N7MAgEpSWT8vflBm76nEfG4jG+l8h+Q
-yIp0e5/ITq/MtRx9EiRk+vU6l3Mkvqq+2a3T7pKhvE4jOIOyFtg5jr+p2n46WEw4
-g3N/BzbZLpz147KO7tYrelhIAAcbmeVUKlQOjtcljeGbZimRgt8kzJWBVNAS6tEj
-J8FTRLMX6HSTbVMx8tjq+MxF9hn1Ztc/3GIIuTvlGeTkLTS8atR4318YfMcZLlwm
-pt3Zd7lPfbW/gmFewm7GB5TL9rDfbA==
------END CERTIFICATE-----
diff --git a/jstests/libs/ocsp/intermediate_ca_ocsp.key b/jstests/libs/ocsp/intermediate_ca_ocsp.key
deleted file mode 100644
index 8d2c2725ee846..0000000000000
--- a/jstests/libs/ocsp/intermediate_ca_ocsp.key
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCk/dRBOVlVgWPO
-9FEQlt2pgHDXMX41ZrJ34AVSpbOtbflMQLGeZHQ8Sv4gsS5w5+I8H8DBGhMIwXQg
-mTXLqXaWPEaz2+4YP8x0G2fHMndxVvM9O1WblDiQCZUnPs77ZzDvRcYPeGu4Mwoa
-QCLFHAtenXoQ2XSPInpXjgkFF85BxksbeB4mC95xunUrCRMBJpFxi/73e2v3Pmz3
-Zr/ZRVdG5Ir9F/X7H5Tj4ZFUmHj+TyL2c/+J5iD3Y4aqFavURN1K/rpJn6wd37g5
-+0xsROoF2JMaPsYz4SzMARRM9gNptO9CU4G11kc1g4scYzErF0SPkTLMTjLxY7vK
-Thb5Nki3AgMBAAECggEAASkb7h2GKFjRp+oGC/TTuFaD9K+PcLa5OKilwPATdHva
-jhPCbBfOzYHFidtVNUwcRkn+5BzX127s7zHEtBsMD4B7CtbYNOl1+bcbosYTGwP+
-kAaz0nVXdIPsvarub8xJBtXZz9AMCe6p+odK91H8Ln0zF50/+aXHcIg6PgPt2n6U
-smChi15o1F6kdr+hwrqUpjW7NDN3Fs5lCH4dNw8I5PvpqPwl3IkwYG8e76A/9dJa
-Fe1mzrUcmXi57JwSePE+Q7/ncIfXYB964AkTMLabylaPsB5EKP587jfpEfXXfyXn
-Y+MLFCfP8dUXwu2nAr6vSWs3Ne4TGwWLLKGSP1UQuQKBgQDRBrQj75aN4hPulr9j
-MTLIXxNRBOEkKXv11mvZFEV1ljyyw3qCivIndJBLNLRDsY+cr6yOYVwvfF5sx6Si
-sF4N789yusRQr3iARJ67+fIJ04gOaIMW8iYzB9kr9eaLdpWSbbBkVG44aF28CiDb
-dgeEFFjXYY5u4T+V+YJPLuDrLQKBgQDKEc6SXndtATpU8Gq5DWcUthPwEVQmVYsF
-6EGWtU/fdVgTs1XmkFuRLy4VZcICK8+uGqN+bOMtr5gKJjEhAr2aDMqpm3keXdLz
-Xlf/El2zzQ1Pj+Jm69odeCqGHwXGQTMOF5bqvIngWi1A5ijS/N3BiNLwtzlcKm+P
-yJuJF+dh8wKBgQC7Nd7XpLlaIFcrxLZrl9/c2FKLqOwgoEsW9tGnHmHLnCCHF089
-ZkbWEa8+vFiLnJd8hVbuOsL/AMvtb63DzGSg5N0O67nybgZmE4972rPuGxfrl615
-Oq393JSkq9utoyr5d+aZJYmGWetCBGxDQuYeZL7hQM35/yIdJ9iPJPRrjQKBgCac
-rndjm7h1kprmcc44lGjtvfOSrBzDHdScI+RTcxbFCnaBPznWfdjJRioKjr7xdjbT
-mkgvMF3rfsb5s0uWhXppVVSBg+xci1G7xl7UOJmB5jg8y0tVaBFXg/Cq/uR6UvIv
-acQjEMmREbKkCEsAzLMNnRkoOcq1xSmZcLcKnUknAoGBAJjGDcvr/RsrP/fehxxs
-jqtxALg26wAUbfvgjrJxZ3sSmXvP++t8KcXHoAi9vbUXwHjdcq1GCiynmuJG/D4z
-u7oBsQnducfSTULsmdMIjnBTy6cdcilfgfX+3h/eUEDzF2R0vx3ugmJMUW4+iMm8
-CVLNHOr0uNpdrz5tOf6SpRhd
------END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/intermediate_ca_ocsp.pem b/jstests/libs/ocsp/intermediate_ca_ocsp.pem
deleted file mode 100644
index 5c77a6e454a79..0000000000000
--- a/jstests/libs/ocsp/intermediate_ca_ocsp.pem
+++ /dev/null
@@ -1,44 +0,0 @@
-
------BEGIN CERTIFICATE-----
-MIIDojCCAoqgAwIBAgIEYLGF9TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
-UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
-BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjEwMzA5MTY0MDMxWhcNNDEwMzExMTY0MDMxWjB+MQswCQYD
-VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
-dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY
-SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEApP3UQTlZVYFjzvRREJbdqYBw1zF+NWayd+AFUqWzrW35TECxnmR0
-PEr+ILEucOfiPB/AwRoTCMF0IJk1y6l2ljxGs9vuGD/MdBtnxzJ3cVbzPTtVm5Q4
-kAmVJz7O+2cw70XGD3hruDMKGkAixRwLXp16ENl0jyJ6V44JBRfOQcZLG3geJgve
-cbp1KwkTASaRcYv+93tr9z5s92a/2UVXRuSK/Rf1+x+U4+GRVJh4/k8i9nP/ieYg
-92OGqhWr1ETdSv66SZ+sHd+4OftMbETqBdiTGj7GM+EszAEUTPYDabTvQlOBtdZH
-NYOLHGMxKxdEj5EyzE4y8WO7yk4W+TZItwIDAQABozIwMDAPBgNVHRMBAf8EBTAD
-AQH/MB0GA1UdDgQWBBRRg4ZhgrLm0lO4jGm+fmVnaczaPzANBgkqhkiG9w0BAQsF
-AAOCAQEAZK3kybfwQ05q6BQevqkun9CKC3Vwhv6BLez5wOXW3gQ8BQdypKbSMEB8
-4RPEcy1zBU+6LPa8S+WE88EN85/0n/oS7N7MAgEpSWT8vflBm76nEfG4jG+l8h+Q
-yIp0e5/ITq/MtRx9EiRk+vU6l3Mkvqq+2a3T7pKhvE4jOIOyFtg5jr+p2n46WEw4
-g3N/BzbZLpz147KO7tYrelhIAAcbmeVUKlQOjtcljeGbZimRgt8kzJWBVNAS6tEj
-J8FTRLMX6HSTbVMx8tjq+MxF9hn1Ztc/3GIIuTvlGeTkLTS8atR4318YfMcZLlwm
-pt3Zd7lPfbW/gmFewm7GB5TL9rDfbA==
------END CERTIFICATE-----
------BEGIN CERTIFICATE-----
-MIIDeTCCAmGgAwIBAgIEBdhiWzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
-UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
-BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjAwMzIzMjIxMzA5WhcNNDAwMzI1MjIxMzA5WjB0MQswCQYD
-VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
-dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO
-S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCg
-H42hLFFnWFETIDs4Q3rjzJLB4mxqn7BiFDbhzivKGN8SMrIaoyg8CkNJWpJVYEBN
-BjaQHMzivBiQEjDbx2bWz7+rMjont9zJbNmMMuEZcqQw42SBlQ/xXBnIbvICGoXy
-7EkEH/kYzX7NjUhAHOJUdfyTW0okChPxOQr8CI07HVYmeelBZh6FPnzdQ5mgsbmk
-vsdesE1gvcfFtm/7Q6+GXp+1GDVGRUmPmHTYPIkjouJWQM++WU2KofSe5k9Rn1Oz
-ZE3jJAaB9gGA83/xcLkVLBe4dyE5foVbbXL7t37yB8R06/7ffV62B7sn0M5X/rfA
-UY5sJ6WOWdQz8k+WjXlXAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
-hvcNAQELBQADggEBAAsY/vktUSwXC1MCC8cYtcrlI0EgGcvkcRxEjRv7t5YVZii6
-eqKSfaX5HDxKl8dH7Z95Z3sDqr7iwPFtzmzQHEwvSSKbiqeS9Be0yf6mJv10LC5d
-M9qoMvbp90ob3Jhib5IGzeijcQFfzbZa+MGnWiCGX04U/hUrayMdmna83exKbeNW
-S0LT1F82rG2QklFOFSZSInXsBiR4olRWqXrYpNjP4B5gueQ2+XUlMZdphvkOksCo
-/UBdqKotBFgyYXdMygl4hscxo+O4FRpX6RKVyobJXKax+mzbc9YUKTFtKu6KlZls
-jvqjtuXgmZvXOgduG5D8Sqoqp/q1nYzYpcgEss4=
------END CERTIFICATE-----
diff --git a/jstests/libs/ocsp/intermediate_ca_only_ocsp.crt b/jstests/libs/ocsp/intermediate_ca_only_ocsp.crt
index dd618b0452bdf..8646678566997 100644
--- a/jstests/libs/ocsp/intermediate_ca_only_ocsp.crt
+++ b/jstests/libs/ocsp/intermediate_ca_only_ocsp.crt
@@ -1,22 +1,22 @@
-----BEGIN CERTIFICATE-----
-MIIDojCCAoqgAwIBAgIEJWcflTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDojCCAoqgAwIBAgIEfCXJSjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB+MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjB+MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY
SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEA1GZlPg548GQidASCrRZDXfJm3Li+vIazhm4MGhv6bJ2o+SYHfu5j
-PO51T5l0TGvuoJPnzwq+RlW5RW3/+fCMl0gt+awDDzjKCekn00w8KSk3rqGDYQk7
-cSVuoPJVyJJ/vo86bwVMB9TYyxPi1lEBeeyVaa2FvbzS+SZq2c9c087WVYRmj/81
-yGztUAwj2Zoru1ECuhHE8YMeyDqmi52XByUmI7Ywrzs4xc3pQ+AI62IsiUFbCNqw
-EPHhEcCgEwaUlWihvUfO5lJSKeUyn0WlQ8MmnUTnCCMYDDJKPublsuu7Ngl6xmEM
-H3u9iSbV6qWQwsC4PJcw5PO30yT5QkFe1wIDAQABozIwMDAPBgNVHRMBAf8EBTAD
-AQH/MB0GA1UdDgQWBBRuXVvr6fP/Pyp2uqsHQ7KTvYGuNTANBgkqhkiG9w0BAQsF
-AAOCAQEAaO2/QF2KG9LM1yKWe9ZbrvZa2dWgFHebY6HckDgE5rc5+nYug9zSEUi+
-QYKZfEAYbMZ2a3ymybsNsGpVPBYv6p2kaa6V9hrJb/awMI9dFg+o/U4v3dLQZmci
-6cSI/c2T9mqJ4tVUPZE5tyVXZBZrQEUOJwcrcH19qWtS2n5Zk4BjkFAysC/eICMw
-yD4kGI/djEkaABkCC7Xb75ySh5BJgC8ZZ/5gmKYiFZnV8d6ktXh7nPcqfsyuTuei
-NBTSZpdTJ5WWNpybnK7/QUeedk1imNkTqK1TOJ4yjxIlhpGOX0f9q9InCrdYZb6P
-SaRjfoPnX4zSLP5nv3QSDMcJjD/IrA==
+MIIBCgKCAQEAnreW/G0+IaH5ly9cI00zwvHEs5jUc+eF8B/YWi7E/arCIRgOhnwv
+c9pBDoVqmwx9cTtEI/KJW431V8LTBu40ToDBtrJCRUydF+dAnoxv5n1TlCF0t/52
+pyGM5Rf3K5nfJGjSpNPnw3yojpXKuAKJZdQxlPBtnsmCNk8+CtkTE3m/psLIGa/9
+Rq3Ctx0xfdJQiiajV8Kv08a3CDaWW2Tj4dWukUI6LFFfOKXe6Fvc1sfTPG7oH+GM
+x/e7uBbCcP6J5368posgLddZifQjv9bQoGx0AB15cl9PzyI6PlaV4nww1HNwd7yY
+UzWNVdOQ2LPkTERu0WwG7/lEE6L56p092wIDAQABozIwMDAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBT0V8yyi6DeSqcaTzB1DWftYWVlgTANBgkqhkiG9w0BAQsF
+AAOCAQEAyuXdfv9tkL2YgwzVFOSPkBTowGECU+7aEGtI45rRgkqw3x6HUl+zYcqa
+REj0dLsFKoixkYSKS33Q7mg09jUXOcqs76hk0nf21ITe0c4M9NZzZPrN7PLmKRw5
+gYwsXHGYGL+3FmqTNN+L6kk/Gd/KcuwY2NH1701Qj0c6uP+Z54lP45DKKaJujouU
+iIOGaLg2sU0r79baqzt7DvZVmlbqQZ8XTYykZo4O4E0hJ7WTNNwu+h1HoOanSfYE
+EWKzo3qS7JkvZG1hoQhNk6np8+KQOWGGlom4j5HhFaSvlMk+M4NyqvJsO4hA5B2D
+tEOIfINSAtpVNmi5I16idsVixhy34A==
-----END CERTIFICATE-----
diff --git a/jstests/libs/ocsp/intermediate_ca_only_ocsp.key b/jstests/libs/ocsp/intermediate_ca_only_ocsp.key
index 31d77afb2d249..558d745a58cee 100644
--- a/jstests/libs/ocsp/intermediate_ca_only_ocsp.key
+++ b/jstests/libs/ocsp/intermediate_ca_only_ocsp.key
@@ -1,28 +1,28 @@
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDUZmU+DnjwZCJ0
-BIKtFkNd8mbcuL68hrOGbgwaG/psnaj5Jgd+7mM87nVPmXRMa+6gk+fPCr5GVblF
-bf/58IyXSC35rAMPOMoJ6SfTTDwpKTeuoYNhCTtxJW6g8lXIkn++jzpvBUwH1NjL
-E+LWUQF57JVprYW9vNL5JmrZz1zTztZVhGaP/zXIbO1QDCPZmiu7UQK6EcTxgx7I
-OqaLnZcHJSYjtjCvOzjFzelD4AjrYiyJQVsI2rAQ8eERwKATBpSVaKG9R87mUlIp
-5TKfRaVDwyadROcIIxgMMko+5uWy67s2CXrGYQwfe72JJtXqpZDCwLg8lzDk87fT
-JPlCQV7XAgMBAAECggEAS9I9w/hgndfxIJ9XkrhG1iupIpPkquIfBhcUxOFF9S4W
-2tIDjQFGdcLeJ8ss4/cHmVUTRCqen/cMC0foP+3qEjsXBnCsKuvn27/akqg9Nahc
-Ez/e6W3lRU/KfTPlAZOifIEep/EpIgaOWXhA2qbSMxcMg0wJWSEl4wEe1aAbkBQ4
-lZ+i/sj/vhceaDDBHTrsC3TcCm41eREHBFnRbr/ga3REAmKah0s48aeVp6cgDbaB
-4VLoT2KGhXlHFTQsUysM6W7xm35RRarbtiJfqH83ZQTepWNsrJ6du/eY/UfKnsfH
-fP/ppGtrktENRsmAesEjz+EwQPpsZLayxFdGZ6O2AQKBgQDtrC9CsfznaWdPtLey
-H02JXS+roPJWXRL6ND1u9qA2rg+/12XsqgIofz7sFU7Xm3334wVqwNIaLqm7tIxM
-8R3LzwPz5SJnNTWbILmckolZ+98r1AHlfwM/6zhj9krWE2vYlu16vI4wpP6taxb8
-c2wscUcMcTjPhfQGbF78VlW7AQKBgQDkx1AIvHLKE0S6Apexjjj3sXgsroo+Kssp
-aRinANuNHDPyLhVoKiVJsTs1soZ44Ls1YmSVRPMVoHQxxIKDnW9oui7aOOwoChhg
-qqt5N7gVF0fxcD5gaut9zY7LdDT70mC6nj2TYCjMxObVDTDkl2kHg77Ct5aQ6i+/
-SX2jjhZR1wKBgQCCTiO/IkxxJ3XP2lnYW5csF54aL+yNcwwDh36jBq5CLF5QJK0p
-+u+h+lmqH+iRBxwiOyJuK/dFCY5fbhZ53LwkUIRvRJH/GcohGekJscGCRzhNFDhR
-9bf3ZGQPUiongpBfOChNYXFntB45P/xjPnjxSM3WQYEPLfbjSqktGbx5AQKBgQCN
-wzvCzhNLSHlT0ftxmLGQOrY+6cDcGORfOrJ9+bgSTqGZthipvUPx0BLiiUdua8NV
-pyywmlMO2ahmlmBRciLxAD28F32uqeLFM7yhlndJCm4YG/drA5X9FH0hcVbLnGc3
-/IonYnSlvnVTG6NqYrBAadCxE9YG4jbID5/80zTdbQKBgB/30lNgWCXNKs/RVsMU
-npTVDESCwaU19wTlbOeVS3vCFDapJmuv7K7uhi1b67Pt5wnaw3drKNddIOS53XZp
-yfsYjZWml8z46qyA5kK4scQ1LkYPnj4Zf+q9dF9i1mYGT2b65v/dzfrLr7//hcBZ
-23zayhJpJWDJqbUWliaDmY98
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCet5b8bT4hofmX
+L1wjTTPC8cSzmNRz54XwH9haLsT9qsIhGA6GfC9z2kEOhWqbDH1xO0Qj8olbjfVX
+wtMG7jROgMG2skJFTJ0X50CejG/mfVOUIXS3/nanIYzlF/crmd8kaNKk0+fDfKiO
+lcq4Aoll1DGU8G2eyYI2Tz4K2RMTeb+mwsgZr/1GrcK3HTF90lCKJqNXwq/TxrcI
+NpZbZOPh1a6RQjosUV84pd7oW9zWx9M8bugf4YzH97u4FsJw/onnfrymiyAt11mJ
+9CO/1tCgbHQAHXlyX0/PIjo+VpXifDDUc3B3vJhTNY1V05DYs+RMRG7RbAbv+UQT
+ovnqnT3bAgMBAAECggEBAIql0g2c2KPEvSXmx1RHQqpPTQeewCin3YcQKo1NQeRe
+YMtMGpQ8s68+v4oR3jinxoU9wp6sZnVGI0aQ+IubVrIlJBOW0PF8pdaVuwtFnxL3
+9CT4vN9mOPxzAIO4VcEO37EMqMY4HsPUh+JW1Am+nYwIu29iQEBvw4Hx4z7StilR
+13pCl+GrZ+c/+64SRKDt5vlnPRElm30410nCRR37TvdRqmWqfrcph7mZ7RRQTtE6
+UPAU100eR+RT/U7lLb3xzxFOtUZ1dZXCMobM/lzDHXpKb7zrBwcg4ugo8GhOkjlN
+UpV+b/pvHNsViqpk3Rp0ZznIl2j/iLYtO+eZ1T45qnECgYEAy61VFS0xcGHLHgQE
+YQYrA6res/RTHWnstnZDW9KJ6yPFmKS8/Dh4WS30ORP0bNuCsn0UUPOp4C/njrPF
+JjcpohyqFZK/0OmW+oomEJWbH1Rdv9NaViVfjDcydeV1YI+bGKT2LCTjVOvk7oT+
+Qwvndzk5ToYBCBZNTs0MqjboOCUCgYEAx32AeFp48eMWlyZ93qVgvRYFhUQtHKd8
+ofKzMPJCJT27zAUg1arDN04S5LepyoioTr/rFIMlxjawl5t3VsJA8/oYvmn3xnKW
+WZxnMhd9kTz/VtZU2acOzCXCUKAJYQDITemshC7vzayK7m2+i65nF3ygySmbun1Q
+sStWmtxSvf8CgYA6gTKEX7PozR50I8FCQWxPpGBOHqtVMpz9GGFm2cYmFeNnYkKq
+zGm9bBbP4mx+bFtRe94/Fo7AR+Jiuj6yKHw8BbHRepLApqholRA0CVVCnZBETqiP
+QDL+6sgC14Ns1jM90/ymV345YX0kCm2iwqOdOoG8jX90XDuenAW8SwNPRQKBgCar
+jVvHLeq1uotSStP9+uKsgiVzMct8LAy4n0O9slTFwsGpvNOuZmkyGTd84sz/8KBT
+U56GTaw+rby1xkxrG2wv7tv6bq1pSPvV9asgIpck29pZcdk/KP8p0qEOZMMJA8sW
+oPp+nr2f1M6jN2ycQh+raP+VsTpVUG8Hl6ItU1zBAoGActnprdCobGUplY/Q1YY0
+M14xXwF+lVoILrvU0Tu7ihxS4cNLRqxn+T0IR/zIiqnzsna1SqTfvanZg3sRQUCz
+gKwWLF96uXCwdBMaXGXS9gDHWdbNjmaPCcpjeKvA/mleEMoSbpKMriPd1ekUfTu1
+KoMtXj7ywkdsXmOibpWE1dY=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem b/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem
index 98632f07b8e91..48e4edef18fc3 100644
--- a/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem
+++ b/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem
@@ -1,50 +1,50 @@
-----BEGIN CERTIFICATE-----
-MIIDojCCAoqgAwIBAgIEJWcflTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDojCCAoqgAwIBAgIEfCXJSjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB+MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjB+MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY
SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEA1GZlPg548GQidASCrRZDXfJm3Li+vIazhm4MGhv6bJ2o+SYHfu5j
-PO51T5l0TGvuoJPnzwq+RlW5RW3/+fCMl0gt+awDDzjKCekn00w8KSk3rqGDYQk7
-cSVuoPJVyJJ/vo86bwVMB9TYyxPi1lEBeeyVaa2FvbzS+SZq2c9c087WVYRmj/81
-yGztUAwj2Zoru1ECuhHE8YMeyDqmi52XByUmI7Ywrzs4xc3pQ+AI62IsiUFbCNqw
-EPHhEcCgEwaUlWihvUfO5lJSKeUyn0WlQ8MmnUTnCCMYDDJKPublsuu7Ngl6xmEM
-H3u9iSbV6qWQwsC4PJcw5PO30yT5QkFe1wIDAQABozIwMDAPBgNVHRMBAf8EBTAD
-AQH/MB0GA1UdDgQWBBRuXVvr6fP/Pyp2uqsHQ7KTvYGuNTANBgkqhkiG9w0BAQsF
-AAOCAQEAaO2/QF2KG9LM1yKWe9ZbrvZa2dWgFHebY6HckDgE5rc5+nYug9zSEUi+
-QYKZfEAYbMZ2a3ymybsNsGpVPBYv6p2kaa6V9hrJb/awMI9dFg+o/U4v3dLQZmci
-6cSI/c2T9mqJ4tVUPZE5tyVXZBZrQEUOJwcrcH19qWtS2n5Zk4BjkFAysC/eICMw
-yD4kGI/djEkaABkCC7Xb75ySh5BJgC8ZZ/5gmKYiFZnV8d6ktXh7nPcqfsyuTuei
-NBTSZpdTJ5WWNpybnK7/QUeedk1imNkTqK1TOJ4yjxIlhpGOX0f9q9InCrdYZb6P
-SaRjfoPnX4zSLP5nv3QSDMcJjD/IrA==
+MIIBCgKCAQEAnreW/G0+IaH5ly9cI00zwvHEs5jUc+eF8B/YWi7E/arCIRgOhnwv
+c9pBDoVqmwx9cTtEI/KJW431V8LTBu40ToDBtrJCRUydF+dAnoxv5n1TlCF0t/52
+pyGM5Rf3K5nfJGjSpNPnw3yojpXKuAKJZdQxlPBtnsmCNk8+CtkTE3m/psLIGa/9
+Rq3Ctx0xfdJQiiajV8Kv08a3CDaWW2Tj4dWukUI6LFFfOKXe6Fvc1sfTPG7oH+GM
+x/e7uBbCcP6J5368posgLddZifQjv9bQoGx0AB15cl9PzyI6PlaV4nww1HNwd7yY
+UzWNVdOQ2LPkTERu0WwG7/lEE6L56p092wIDAQABozIwMDAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBT0V8yyi6DeSqcaTzB1DWftYWVlgTANBgkqhkiG9w0BAQsF
+AAOCAQEAyuXdfv9tkL2YgwzVFOSPkBTowGECU+7aEGtI45rRgkqw3x6HUl+zYcqa
+REj0dLsFKoixkYSKS33Q7mg09jUXOcqs76hk0nf21ITe0c4M9NZzZPrN7PLmKRw5
+gYwsXHGYGL+3FmqTNN+L6kk/Gd/KcuwY2NH1701Qj0c6uP+Z54lP45DKKaJujouU
+iIOGaLg2sU0r79baqzt7DvZVmlbqQZ8XTYykZo4O4E0hJ7WTNNwu+h1HoOanSfYE
+EWKzo3qS7JkvZG1hoQhNk6np8+KQOWGGlom4j5HhFaSvlMk+M4NyqvJsO4hA5B2D
+tEOIfINSAtpVNmi5I16idsVixhy34A==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDUZmU+DnjwZCJ0
-BIKtFkNd8mbcuL68hrOGbgwaG/psnaj5Jgd+7mM87nVPmXRMa+6gk+fPCr5GVblF
-bf/58IyXSC35rAMPOMoJ6SfTTDwpKTeuoYNhCTtxJW6g8lXIkn++jzpvBUwH1NjL
-E+LWUQF57JVprYW9vNL5JmrZz1zTztZVhGaP/zXIbO1QDCPZmiu7UQK6EcTxgx7I
-OqaLnZcHJSYjtjCvOzjFzelD4AjrYiyJQVsI2rAQ8eERwKATBpSVaKG9R87mUlIp
-5TKfRaVDwyadROcIIxgMMko+5uWy67s2CXrGYQwfe72JJtXqpZDCwLg8lzDk87fT
-JPlCQV7XAgMBAAECggEAS9I9w/hgndfxIJ9XkrhG1iupIpPkquIfBhcUxOFF9S4W
-2tIDjQFGdcLeJ8ss4/cHmVUTRCqen/cMC0foP+3qEjsXBnCsKuvn27/akqg9Nahc
-Ez/e6W3lRU/KfTPlAZOifIEep/EpIgaOWXhA2qbSMxcMg0wJWSEl4wEe1aAbkBQ4
-lZ+i/sj/vhceaDDBHTrsC3TcCm41eREHBFnRbr/ga3REAmKah0s48aeVp6cgDbaB
-4VLoT2KGhXlHFTQsUysM6W7xm35RRarbtiJfqH83ZQTepWNsrJ6du/eY/UfKnsfH
-fP/ppGtrktENRsmAesEjz+EwQPpsZLayxFdGZ6O2AQKBgQDtrC9CsfznaWdPtLey
-H02JXS+roPJWXRL6ND1u9qA2rg+/12XsqgIofz7sFU7Xm3334wVqwNIaLqm7tIxM
-8R3LzwPz5SJnNTWbILmckolZ+98r1AHlfwM/6zhj9krWE2vYlu16vI4wpP6taxb8
-c2wscUcMcTjPhfQGbF78VlW7AQKBgQDkx1AIvHLKE0S6Apexjjj3sXgsroo+Kssp
-aRinANuNHDPyLhVoKiVJsTs1soZ44Ls1YmSVRPMVoHQxxIKDnW9oui7aOOwoChhg
-qqt5N7gVF0fxcD5gaut9zY7LdDT70mC6nj2TYCjMxObVDTDkl2kHg77Ct5aQ6i+/
-SX2jjhZR1wKBgQCCTiO/IkxxJ3XP2lnYW5csF54aL+yNcwwDh36jBq5CLF5QJK0p
-+u+h+lmqH+iRBxwiOyJuK/dFCY5fbhZ53LwkUIRvRJH/GcohGekJscGCRzhNFDhR
-9bf3ZGQPUiongpBfOChNYXFntB45P/xjPnjxSM3WQYEPLfbjSqktGbx5AQKBgQCN
-wzvCzhNLSHlT0ftxmLGQOrY+6cDcGORfOrJ9+bgSTqGZthipvUPx0BLiiUdua8NV
-pyywmlMO2ahmlmBRciLxAD28F32uqeLFM7yhlndJCm4YG/drA5X9FH0hcVbLnGc3
-/IonYnSlvnVTG6NqYrBAadCxE9YG4jbID5/80zTdbQKBgB/30lNgWCXNKs/RVsMU
-npTVDESCwaU19wTlbOeVS3vCFDapJmuv7K7uhi1b67Pt5wnaw3drKNddIOS53XZp
-yfsYjZWml8z46qyA5kK4scQ1LkYPnj4Zf+q9dF9i1mYGT2b65v/dzfrLr7//hcBZ
-23zayhJpJWDJqbUWliaDmY98
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCet5b8bT4hofmX
+L1wjTTPC8cSzmNRz54XwH9haLsT9qsIhGA6GfC9z2kEOhWqbDH1xO0Qj8olbjfVX
+wtMG7jROgMG2skJFTJ0X50CejG/mfVOUIXS3/nanIYzlF/crmd8kaNKk0+fDfKiO
+lcq4Aoll1DGU8G2eyYI2Tz4K2RMTeb+mwsgZr/1GrcK3HTF90lCKJqNXwq/TxrcI
+NpZbZOPh1a6RQjosUV84pd7oW9zWx9M8bugf4YzH97u4FsJw/onnfrymiyAt11mJ
+9CO/1tCgbHQAHXlyX0/PIjo+VpXifDDUc3B3vJhTNY1V05DYs+RMRG7RbAbv+UQT
+ovnqnT3bAgMBAAECggEBAIql0g2c2KPEvSXmx1RHQqpPTQeewCin3YcQKo1NQeRe
+YMtMGpQ8s68+v4oR3jinxoU9wp6sZnVGI0aQ+IubVrIlJBOW0PF8pdaVuwtFnxL3
+9CT4vN9mOPxzAIO4VcEO37EMqMY4HsPUh+JW1Am+nYwIu29iQEBvw4Hx4z7StilR
+13pCl+GrZ+c/+64SRKDt5vlnPRElm30410nCRR37TvdRqmWqfrcph7mZ7RRQTtE6
+UPAU100eR+RT/U7lLb3xzxFOtUZ1dZXCMobM/lzDHXpKb7zrBwcg4ugo8GhOkjlN
+UpV+b/pvHNsViqpk3Rp0ZznIl2j/iLYtO+eZ1T45qnECgYEAy61VFS0xcGHLHgQE
+YQYrA6res/RTHWnstnZDW9KJ6yPFmKS8/Dh4WS30ORP0bNuCsn0UUPOp4C/njrPF
+JjcpohyqFZK/0OmW+oomEJWbH1Rdv9NaViVfjDcydeV1YI+bGKT2LCTjVOvk7oT+
+Qwvndzk5ToYBCBZNTs0MqjboOCUCgYEAx32AeFp48eMWlyZ93qVgvRYFhUQtHKd8
+ofKzMPJCJT27zAUg1arDN04S5LepyoioTr/rFIMlxjawl5t3VsJA8/oYvmn3xnKW
+WZxnMhd9kTz/VtZU2acOzCXCUKAJYQDITemshC7vzayK7m2+i65nF3ygySmbun1Q
+sStWmtxSvf8CgYA6gTKEX7PozR50I8FCQWxPpGBOHqtVMpz9GGFm2cYmFeNnYkKq
+zGm9bBbP4mx+bFtRe94/Fo7AR+Jiuj6yKHw8BbHRepLApqholRA0CVVCnZBETqiP
+QDL+6sgC14Ns1jM90/ymV345YX0kCm2iwqOdOoG8jX90XDuenAW8SwNPRQKBgCar
+jVvHLeq1uotSStP9+uKsgiVzMct8LAy4n0O9slTFwsGpvNOuZmkyGTd84sz/8KBT
+U56GTaw+rby1xkxrG2wv7tv6bq1pSPvV9asgIpck29pZcdk/KP8p0qEOZMMJA8sW
+oPp+nr2f1M6jN2ycQh+raP+VsTpVUG8Hl6ItU1zBAoGActnprdCobGUplY/Q1YY0
+M14xXwF+lVoILrvU0Tu7ihxS4cNLRqxn+T0IR/zIiqnzsna1SqTfvanZg3sRQUCz
+gKwWLF96uXCwdBMaXGXS9gDHWdbNjmaPCcpjeKvA/mleEMoSbpKMriPd1ekUfTu1
+KoMtXj7ywkdsXmOibpWE1dY=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha1
index 7f99d4fec9534..6c794f664f171 100644
--- a/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha1
+++ b/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha1
@@ -1 +1 @@
-D8A8CAD72FF7EC2620AD14DC5E405AAD8D590A33
\ No newline at end of file
+C741148C2729C25172A40E45FF37C842DF99BA28
\ No newline at end of file
diff --git a/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha256
index 58154edba3fb6..880fda0c016d8 100644
--- a/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha256
+++ b/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha256
@@ -1 +1 @@
-216F69DCDF851F2A2FB2FF02E6979B74BFA1A6BE1506B7F9510397CF69CE5CDB
\ No newline at end of file
+9617E9454EB8B95594D2697E84753192FE832D88663FDBD9E436CF583D38F52F
\ No newline at end of file
diff --git a/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem b/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem
index 676e23c72157a..53bffa3c40199 100644
--- a/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem
+++ b/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem
@@ -1,44 +1,44 @@
-----BEGIN CERTIFICATE-----
-MIIDojCCAoqgAwIBAgIEJWcflTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDojCCAoqgAwIBAgIEfCXJSjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB+MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjB+MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY
SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEA1GZlPg548GQidASCrRZDXfJm3Li+vIazhm4MGhv6bJ2o+SYHfu5j
-PO51T5l0TGvuoJPnzwq+RlW5RW3/+fCMl0gt+awDDzjKCekn00w8KSk3rqGDYQk7
-cSVuoPJVyJJ/vo86bwVMB9TYyxPi1lEBeeyVaa2FvbzS+SZq2c9c087WVYRmj/81
-yGztUAwj2Zoru1ECuhHE8YMeyDqmi52XByUmI7Ywrzs4xc3pQ+AI62IsiUFbCNqw
-EPHhEcCgEwaUlWihvUfO5lJSKeUyn0WlQ8MmnUTnCCMYDDJKPublsuu7Ngl6xmEM
-H3u9iSbV6qWQwsC4PJcw5PO30yT5QkFe1wIDAQABozIwMDAPBgNVHRMBAf8EBTAD
-AQH/MB0GA1UdDgQWBBRuXVvr6fP/Pyp2uqsHQ7KTvYGuNTANBgkqhkiG9w0BAQsF
-AAOCAQEAaO2/QF2KG9LM1yKWe9ZbrvZa2dWgFHebY6HckDgE5rc5+nYug9zSEUi+
-QYKZfEAYbMZ2a3ymybsNsGpVPBYv6p2kaa6V9hrJb/awMI9dFg+o/U4v3dLQZmci
-6cSI/c2T9mqJ4tVUPZE5tyVXZBZrQEUOJwcrcH19qWtS2n5Zk4BjkFAysC/eICMw
-yD4kGI/djEkaABkCC7Xb75ySh5BJgC8ZZ/5gmKYiFZnV8d6ktXh7nPcqfsyuTuei
-NBTSZpdTJ5WWNpybnK7/QUeedk1imNkTqK1TOJ4yjxIlhpGOX0f9q9InCrdYZb6P
-SaRjfoPnX4zSLP5nv3QSDMcJjD/IrA==
+MIIBCgKCAQEAnreW/G0+IaH5ly9cI00zwvHEs5jUc+eF8B/YWi7E/arCIRgOhnwv
+c9pBDoVqmwx9cTtEI/KJW431V8LTBu40ToDBtrJCRUydF+dAnoxv5n1TlCF0t/52
+pyGM5Rf3K5nfJGjSpNPnw3yojpXKuAKJZdQxlPBtnsmCNk8+CtkTE3m/psLIGa/9
+Rq3Ctx0xfdJQiiajV8Kv08a3CDaWW2Tj4dWukUI6LFFfOKXe6Fvc1sfTPG7oH+GM
+x/e7uBbCcP6J5368posgLddZifQjv9bQoGx0AB15cl9PzyI6PlaV4nww1HNwd7yY
+UzWNVdOQ2LPkTERu0WwG7/lEE6L56p092wIDAQABozIwMDAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBT0V8yyi6DeSqcaTzB1DWftYWVlgTANBgkqhkiG9w0BAQsF
+AAOCAQEAyuXdfv9tkL2YgwzVFOSPkBTowGECU+7aEGtI45rRgkqw3x6HUl+zYcqa
+REj0dLsFKoixkYSKS33Q7mg09jUXOcqs76hk0nf21ITe0c4M9NZzZPrN7PLmKRw5
+gYwsXHGYGL+3FmqTNN+L6kk/Gd/KcuwY2NH1701Qj0c6uP+Z54lP45DKKaJujouU
+iIOGaLg2sU0r79baqzt7DvZVmlbqQZ8XTYykZo4O4E0hJ7WTNNwu+h1HoOanSfYE
+EWKzo3qS7JkvZG1hoQhNk6np8+KQOWGGlom4j5HhFaSvlMk+M4NyqvJsO4hA5B2D
+tEOIfINSAtpVNmi5I16idsVixhy34A==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
-MIIDeTCCAmGgAwIBAgIEaP5C8TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDeTCCAmGgAwIBAgIEUlBUJTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjB0MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ0WhcNMjUwOTEwMTQyODQ0WjB0MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO
-S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS
-ASa6t/0P3MHRRhLP2W25y2NJ9eBl6YMJj3oY058ixhHOhPLvdnWPcLHOh36BiNu8
-5+lX8w27vAz84qzloYQ44YJO+uvU9WBEoMa707IWTre3PSoYfe4y49fm9AwYkG2R
-qn7674TGn5eNZnSuBVzab7Fy5+zBDAhgCcB+z0MYP3COEyrmzkKY2rPTPu7K7o3r
-5FFGfxgceDzN6lukG12h75F+R64o8lvVBkHV5+1mwlx8SlthPH1HJB6/ZjX1m4gw
-yRlU9JfR00iphro4FYI1SgGKeSP7Fn1E0VRB/YwRAoZFguq62vlMImzh+2folFF4
-1XzEO+BKjUiRfYbRTpWRAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
-hvcNAQELBQADggEBAKRiAji29p93B9cA/9gjE5Zlb39LRtYA5+RBC0WTn8u5ku//
-XGpz3P8aDpO0BFAd/gNiyGRWQ7blHj0Mn02QHqW7T/yyyE0m/fASdXIoJApFDEtK
-n1mUcmVdiiIlQvXo/oi1RTwxckQNqFYm2M/XSrg0HLFVHsTQrdDskbs0SRQZK3qv
-5EluYHu7UvKAAmzepZhtyC1VNrZbP97cJ03ZupUyMo6NHLk0WxFHvYM3U7K/W7ui
-YuFWenPWmESFGHR923lF6HjzvkhHYXKPaYhTnKZJ1kjwhYfcdfSmbizJBBlryzEL
-+0KtahO1J5NBwYyVgo+Pkv5OEWwunzrpsY+TLR4=
+S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDj
+KEjkptHYsXzYYRGitba+fkBEhsuRVoAVphzopMVKven8XMHde8b0xJmqnqPYCPiN
+shU62bcM+fWENDjpuavcqvzC2nYK118mbrRkko2AurqCPKiaj+o16aogsPS+lBS5
+WnK4C369pesQsZz+SO/s/LwW8iAsOAaayoFvJaK5RCaU+v1C7M6gFDyeQFLCp7UR
+H4IZmWqR7A35mC76bdswyleTpN5gqAVjR2E3k/U1Azp42vUqNiNTNBzLLe9Gllbp
+UwAqUZkf1o10Ew+K4wsU6L/rGK/QbaNObJ1NEg0UnVQGjYCJiemKMIVt5OsB7aj9
+LWIzHFLF2xynv9NwOZ9fAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
+hvcNAQELBQADggEBADidce5kTcwIUD+CHiJ04YFZEpxSyMCk2R7baZuZS6igngHH
+z05oz4rnvavuaudFwvgj5f630pbel140DdWH/p7nsEsW6QEr9tThWDeabRiRcpq0
+ELd9kvYOGLlc0TZBAuFFLu60fe/NiGlWvALszIY166Bq+Y3xoHiLy5SJ56+O2REX
+AKnzx2IIiDkz+GPHcQnjaTIo8t26I4ArHjekh1DQ0soUtzav+IBZUgb6H0Q12NH0
+4GVTZv5fLAMufqzCzhzmbU28p45Hrj8ZFErim2ES2l/akOHJUbNrbMTz63G8ZbJk
+Inl2kjX/FBwuQ2d1idIJPU4B8djmQrJrb7urcog=
-----END CERTIFICATE-----
diff --git a/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha1
index 7f99d4fec9534..6c794f664f171 100644
--- a/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha1
+++ b/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha1
@@ -1 +1 @@
-D8A8CAD72FF7EC2620AD14DC5E405AAD8D590A33
\ No newline at end of file
+C741148C2729C25172A40E45FF37C842DF99BA28
\ No newline at end of file
diff --git a/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha256
index 58154edba3fb6..880fda0c016d8 100644
--- a/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha256
+++ b/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha256
@@ -1 +1 @@
-216F69DCDF851F2A2FB2FF02E6979B74BFA1A6BE1506B7F9510397CF69CE5CDB
\ No newline at end of file
+9617E9454EB8B95594D2697E84753192FE832D88663FDBD9E436CF583D38F52F
\ No newline at end of file
diff --git a/jstests/libs/ocsp/intermediate_only_ca_ocsp.pem b/jstests/libs/ocsp/intermediate_only_ca_ocsp.pem
deleted file mode 100644
index 6baeb77224224..0000000000000
--- a/jstests/libs/ocsp/intermediate_only_ca_ocsp.pem
+++ /dev/null
@@ -1,50 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDojCCAoqgAwIBAgIEYLGF9TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
-UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
-BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjEwMzA5MTY0MDMxWhcNNDEwMzExMTY0MDMxWjB+MQswCQYD
-VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
-dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY
-SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEApP3UQTlZVYFjzvRREJbdqYBw1zF+NWayd+AFUqWzrW35TECxnmR0
-PEr+ILEucOfiPB/AwRoTCMF0IJk1y6l2ljxGs9vuGD/MdBtnxzJ3cVbzPTtVm5Q4
-kAmVJz7O+2cw70XGD3hruDMKGkAixRwLXp16ENl0jyJ6V44JBRfOQcZLG3geJgve
-cbp1KwkTASaRcYv+93tr9z5s92a/2UVXRuSK/Rf1+x+U4+GRVJh4/k8i9nP/ieYg
-92OGqhWr1ETdSv66SZ+sHd+4OftMbETqBdiTGj7GM+EszAEUTPYDabTvQlOBtdZH
-NYOLHGMxKxdEj5EyzE4y8WO7yk4W+TZItwIDAQABozIwMDAPBgNVHRMBAf8EBTAD
-AQH/MB0GA1UdDgQWBBRRg4ZhgrLm0lO4jGm+fmVnaczaPzANBgkqhkiG9w0BAQsF
-AAOCAQEAZK3kybfwQ05q6BQevqkun9CKC3Vwhv6BLez5wOXW3gQ8BQdypKbSMEB8
-4RPEcy1zBU+6LPa8S+WE88EN85/0n/oS7N7MAgEpSWT8vflBm76nEfG4jG+l8h+Q
-yIp0e5/ITq/MtRx9EiRk+vU6l3Mkvqq+2a3T7pKhvE4jOIOyFtg5jr+p2n46WEw4
-g3N/BzbZLpz147KO7tYrelhIAAcbmeVUKlQOjtcljeGbZimRgt8kzJWBVNAS6tEj
-J8FTRLMX6HSTbVMx8tjq+MxF9hn1Ztc/3GIIuTvlGeTkLTS8atR4318YfMcZLlwm
-pt3Zd7lPfbW/gmFewm7GB5TL9rDfbA==
------END CERTIFICATE-----
------BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCk/dRBOVlVgWPO
-9FEQlt2pgHDXMX41ZrJ34AVSpbOtbflMQLGeZHQ8Sv4gsS5w5+I8H8DBGhMIwXQg
-mTXLqXaWPEaz2+4YP8x0G2fHMndxVvM9O1WblDiQCZUnPs77ZzDvRcYPeGu4Mwoa
-QCLFHAtenXoQ2XSPInpXjgkFF85BxksbeB4mC95xunUrCRMBJpFxi/73e2v3Pmz3
-Zr/ZRVdG5Ir9F/X7H5Tj4ZFUmHj+TyL2c/+J5iD3Y4aqFavURN1K/rpJn6wd37g5
-+0xsROoF2JMaPsYz4SzMARRM9gNptO9CU4G11kc1g4scYzErF0SPkTLMTjLxY7vK
-Thb5Nki3AgMBAAECggEAASkb7h2GKFjRp+oGC/TTuFaD9K+PcLa5OKilwPATdHva
-jhPCbBfOzYHFidtVNUwcRkn+5BzX127s7zHEtBsMD4B7CtbYNOl1+bcbosYTGwP+
-kAaz0nVXdIPsvarub8xJBtXZz9AMCe6p+odK91H8Ln0zF50/+aXHcIg6PgPt2n6U
-smChi15o1F6kdr+hwrqUpjW7NDN3Fs5lCH4dNw8I5PvpqPwl3IkwYG8e76A/9dJa
-Fe1mzrUcmXi57JwSePE+Q7/ncIfXYB964AkTMLabylaPsB5EKP587jfpEfXXfyXn
-Y+MLFCfP8dUXwu2nAr6vSWs3Ne4TGwWLLKGSP1UQuQKBgQDRBrQj75aN4hPulr9j
-MTLIXxNRBOEkKXv11mvZFEV1ljyyw3qCivIndJBLNLRDsY+cr6yOYVwvfF5sx6Si
-sF4N789yusRQr3iARJ67+fIJ04gOaIMW8iYzB9kr9eaLdpWSbbBkVG44aF28CiDb
-dgeEFFjXYY5u4T+V+YJPLuDrLQKBgQDKEc6SXndtATpU8Gq5DWcUthPwEVQmVYsF
-6EGWtU/fdVgTs1XmkFuRLy4VZcICK8+uGqN+bOMtr5gKJjEhAr2aDMqpm3keXdLz
-Xlf/El2zzQ1Pj+Jm69odeCqGHwXGQTMOF5bqvIngWi1A5ijS/N3BiNLwtzlcKm+P
-yJuJF+dh8wKBgQC7Nd7XpLlaIFcrxLZrl9/c2FKLqOwgoEsW9tGnHmHLnCCHF089
-ZkbWEa8+vFiLnJd8hVbuOsL/AMvtb63DzGSg5N0O67nybgZmE4972rPuGxfrl615
-Oq393JSkq9utoyr5d+aZJYmGWetCBGxDQuYeZL7hQM35/yIdJ9iPJPRrjQKBgCac
-rndjm7h1kprmcc44lGjtvfOSrBzDHdScI+RTcxbFCnaBPznWfdjJRioKjr7xdjbT
-mkgvMF3rfsb5s0uWhXppVVSBg+xci1G7xl7UOJmB5jg8y0tVaBFXg/Cq/uR6UvIv
-acQjEMmREbKkCEsAzLMNnRkoOcq1xSmZcLcKnUknAoGBAJjGDcvr/RsrP/fehxxs
-jqtxALg26wAUbfvgjrJxZ3sSmXvP++t8KcXHoAi9vbUXwHjdcq1GCiynmuJG/D4z
-u7oBsQnducfSTULsmdMIjnBTy6cdcilfgfX+3h/eUEDzF2R0vx3ugmJMUW4+iMm8
-CVLNHOr0uNpdrz5tOf6SpRhd
------END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/ocsp_responder.crt b/jstests/libs/ocsp/ocsp_responder.crt
index cdc1d6414ee74..dd723c0983b94 100644
--- a/jstests/libs/ocsp/ocsp_responder.crt
+++ b/jstests/libs/ocsp/ocsp_responder.crt
@@ -1,21 +1,21 @@
-----BEGIN CERTIFICATE-----
-MIIDgzCCAmugAwIBAgIEG9rCUjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDgzCCAmugAwIBAgIEHKwtmTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjBiMRAwDgYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ2WhcNMjUwOTEwMTQyODQ2WjBiMRAwDgYD
VQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMMCWxvY2FsaG9z
dDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMQ8wDQYDVQQHDAZPQ1NQLTMwggEi
-MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNTTbCpwZg6MARSlau3FXBzmM4
-+6SqXPf3xQ9L2hpulCFWWDIfvWpK3zVKbSVIU6L9lo8Cdrne4ZRE486BXo1iIvdY
-kpekvrS/m6BG91wN0/AI+NNaK/7Azdz4aScAs8cxbqigqeYqFLH/KAvP0XebzfZ6
-kfsqjPYRvvaur9QWIvgKdjBz6VEY01uIoZVWR7rSPAj+K3kYhfEUK3vsdQ30LdDh
-F7T81NO+w4ecuy1/aqMxc3oM1L3H7yh62a2tyBwv223KCoxH4WGvpmmTPZ/EGEgd
-07pGMFcii99GJ/yFqoqJo3akjXqms4WqeL0W7vlFOFV2SQw8+dPFSOOqV6iNAgMB
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUE9xVxnPbyMQJIrAP924qbpY0
+BI9gIq1W4SRMrXqcKbWDz1I2nhhiIWW4Iw7j5+5udX+1RMGIn5azyrxfrC93WfHi
+1MGoXobFz5B1amVJWSB6YUhvssEFxumcKhQCvypojYZw/3dZEWuwpu+7ffHc1gpu
+eQ7h84UObFPXMeh2Wa6UhnTL/UR51dA7KEHyM+xjRfCrua8IiTgKkYkR6Wb7XBvq
+6x3AhVpv5ix9Y5UL1vh49M7vyBsej2lz/IKf4pOEFXkrvWxX7LmiJDgEfiyXhcDM
+LaWzp89+CJdDVpQsduKhQuh6X08ILk1qLtDutHmkc7mpAqsWSMO4a7vEjrjvAgMB
AAGjLzAtMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMBMGA1UdJQQMMAoGCCsGAQUF
-BwMJMA0GCSqGSIb3DQEBCwUAA4IBAQCps3M1xiI8UZYXGd2ESdYBFB8Z8oK75SC0
-lgCB1u8ryjXPVcNPQSQfMDid+9uXHHQkdAZ0nbY/+gc9Iu3ekMS31bwROto4239s
-BlVH65hgDfOOcUlSbG68xgXZQtGHhx+bxJ+NMuPjkvEayJVI/jFj4/u1JBuKxXwf
-IHnUnjicgPK8d+txSEDlgaYEOt7udQc76Am2dVxejC3bCLK0WwvB/vVCWng1xiSu
-j2XsSJ24Xg8ppbEXP6hJ73UXOmFyrT9n2rtmDLLU0gNhiWhE3VPdB9FYzcteBknk
-9OR9cf45oRn//rGFwQIuK1KVQZWkn0j97fMHO4dDLLXwoJJZ0C5N
+BwMJMA0GCSqGSIb3DQEBCwUAA4IBAQBOcnFH8KQjIqdROk4/zavs7UTmKfm24nAm
+CQFEbPcdDV15ySg8ZHxmzHw+SEzvIuhTnIacTnOWoBgr4sx6y/p+hd712SME0okK
+FJz/qWVj+u5N5zQ9IUqqJ1+dxq8x2rJbZLVnws1ToWP+Z4j1JuxZ9AWEfDmHke/J
+M61hGksZSf8VulPjuB8HJuLe3jaxOptQ7fncjkpVXSH91gzakjsS3lO/U9RrwC3V
+ZCX4JKWcjLadiKgf4L5II0GAWMZpVCXmYkUmkLLRkaFuN7t59FCOfomK21Uf9KoB
+Nvyz2RKMD2fY86gYtCBza85hM2tf0ul6JPIt9KtxJTFAwbRJP5Wj
-----END CERTIFICATE-----
diff --git a/jstests/libs/ocsp/ocsp_responder.key b/jstests/libs/ocsp/ocsp_responder.key
index 07a72b35e8214..61e29babd4d8a 100644
--- a/jstests/libs/ocsp/ocsp_responder.key
+++ b/jstests/libs/ocsp/ocsp_responder.key
@@ -1,28 +1,28 @@
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDNTTbCpwZg6MAR
-Slau3FXBzmM4+6SqXPf3xQ9L2hpulCFWWDIfvWpK3zVKbSVIU6L9lo8Cdrne4ZRE
-486BXo1iIvdYkpekvrS/m6BG91wN0/AI+NNaK/7Azdz4aScAs8cxbqigqeYqFLH/
-KAvP0XebzfZ6kfsqjPYRvvaur9QWIvgKdjBz6VEY01uIoZVWR7rSPAj+K3kYhfEU
-K3vsdQ30LdDhF7T81NO+w4ecuy1/aqMxc3oM1L3H7yh62a2tyBwv223KCoxH4WGv
-pmmTPZ/EGEgd07pGMFcii99GJ/yFqoqJo3akjXqms4WqeL0W7vlFOFV2SQw8+dPF
-SOOqV6iNAgMBAAECggEAKe1C6a3Cl7fwwZc07LECsnm7ub9qeYBTA76yyQLZswt9
-Y2zPj7vXIUNEVtbDbmnmN3Ov8QjeHK5k4UzEt5bfuNL7QJNvdK2drCPUL6oEmq5V
-rv9h6mBhvm5MWk7fMD2GyImY1nbcEmv3jexXvTFCBAej9pQpfx8agbVGG5a88WgF
-EXVt8emrAVBtIOS+lql7Swi+jjDvtKZS3HQDZoBqHhqguS/BT8EG7uwqM4unUeIU
-IgmWzmFYWiylUtGZwqkD5qtymXokxJHdOyUewz0US4sHP1Pnk5He2Iz3EJlb6uG9
-ffCL1DobPmh5Ptk6RdMQ1HHgfcQzni3uIgHqYalB4QKBgQDwaZCe5+OUpIFI3yL3
-SQDHqTtumxdFrWKeK1+coXJKXl0jM6OlbqDizYbsHGOlNeA0QLpP+IbO70s7naSA
-99k4djmBu5c/I8rkKBkxHLEXk05VvPLUYlyPbfMmIEZt3V6J9vMdvg2JcbHNY1mJ
-3hzKJpGdLsl/PupZO6dYyFsjwwKBgQDanN7Y3b3gW1AdCsdPpCbue7VJnRUowN21
-tMqmoMf+ODrlTaeUibh690xYynGky7uXGv/+7b2pJ87u3RNFHLVh0YQPKBEY1U85
-zKvJq7nVmyZRDUUlZxbotg3KLfujFK3GIES9zSNgSeYu4VE3N/QuDtasWcVM/DHT
-SiCd8RDNbwKBgDWAuM+0eaBl7+PAzO5Nj9/fWLunxkkaah0bN/8Kqv3D/GROJLg3
-Y5S1+rXnJ4Hn+IqIcS5E9/Nrq8mLrsWI4w2+udOWZPWHSKr4BYs5gGvnu4I3N69u
-aHz/EiLFAgeHQXfgvoA7AJY6+D/mQVH+XVfbt4JMnEk2Pj83mGmYxTezAoGAOo74
-tpi3Kujqzcy8VN4Au7MRfZWBPHhlKy4xdm7tA3DmfdPORuCZNP8XSVV5GY6lQjWu
-Swg93JjYOm2nosSs5XXs7O5rnf5NiYsEnTHNxqUXn3BxjDJQFafmi7jDhgyc+8xa
-/Yh8qGdDLF2YlrT7SHcbE/G8AwIvCoz5rbUMLisCgYEAg/du2NWZj1d9VjkZOuP3
-ZMnc04SJfB2oD1Xd/zoq/cEDFpHCAMzFm2BVPK3OAvPqNHppH1u0oeu17a+OCfeB
-+t/3bsNDd5Ovm8HNfAvhevQhG69tL9bfQblqSoBg6Fd00iILiT9WKK86zcV4kVMa
-2LXdfgZvnpV5oroqetzqASA=
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDUE9xVxnPbyMQJ
+IrAP924qbpY0BI9gIq1W4SRMrXqcKbWDz1I2nhhiIWW4Iw7j5+5udX+1RMGIn5az
+yrxfrC93WfHi1MGoXobFz5B1amVJWSB6YUhvssEFxumcKhQCvypojYZw/3dZEWuw
+pu+7ffHc1gpueQ7h84UObFPXMeh2Wa6UhnTL/UR51dA7KEHyM+xjRfCrua8IiTgK
+kYkR6Wb7XBvq6x3AhVpv5ix9Y5UL1vh49M7vyBsej2lz/IKf4pOEFXkrvWxX7Lmi
+JDgEfiyXhcDMLaWzp89+CJdDVpQsduKhQuh6X08ILk1qLtDutHmkc7mpAqsWSMO4
+a7vEjrjvAgMBAAECggEBAJlMhe+m1SbhF/t2jIfuFj8r0v1k8nLOfjgX5JJcsA1k
+BOZznX0HiJ6ef5W8Gc5aX7qOyrKwMaldmqT5/nK111XRuYx6uEXJlmg0orqNHwwH
+FfgAjEjZkLdpUoJk1DQsFS6gZrcVoyLJ+esmsJH59zPnsICslzpzOhHz8iLS27N3
+zPuEhYi0yiXr4Yi8p/TloMuscTSmiWQy8CfIsFVs/FC4n40xkedvB4vEHUEfxb/D
+sbqPuKZ9nnu2EFCYZ+UpYOSho7q/60dBHpuszPpbLmk0ICm+2jda+32trI/ZxNxT
+ZopDwUlwfh6DoXz1Dj9O1Y54YPWcKlCeraJS/uSxobkCgYEA9LsRYDHtBDlxnOsl
+F2ALdIdvvuxAdJWg8gTaojbLnU18dujTtN3NvU4DYzl2KxKAuCHDNSpabWCd5CNp
+ouEx8CuA1GDA0J1aUXEDh+hBeqLatFeAVh5FNQAEKIbUa8H02K3CQ0pyY/XGC24b
+yVmW+P1mm0PeX9wn6QV5SSTyUFsCgYEA3dffC7NIJmJvmwxdc4a7JKL/OLfvvHEO
+sYjsKugqRFFALyTrR99/MmwWxvemoPCUhgGvYDUdgDZOtLK7aV0OTG3lb5FNQZyv
+odTMqgp1E1UTnjNDLvbe8BCwjpbzCr4a8Cu+tNHYKKTnTc+IQt3wJIvkODLpYXW/
+9enENerhHf0CgYBRjzpmNZ1IyCAd7JvK435N0okiNrfwYqFlpsp6PwqynM07D7Lm
+Vvkm+qXUXqUMC7mMI+xbfobuZsGX81lUJ/qtkMH5hVkc7SPZenFzzKptxXJfob2q
+daIY3K2GGBEY7GW+BIJ2TIw81f6vdn2aJ9pRlpmoawDkL4m5ddKK1t5OewKBgFHm
+d3b3WkSphLCGMR4MeJy+/GBxL7WvO5B5j08L5UbRhFOjWqVvMqNn2kTcewzhpMOQ
+bbGkCl8iLY5yidQNQdEfpVu6Ff6KHSEu0dxY/f4EI7j9DemRxJ52WuhZfCLUKlfO
+nppc+mOO9cH5Q0IEc7raVogU66pKcXMVOZX6P9/1AoGBAK3OS5YXFwwLoZ0Tqs4o
+p4wr1lcJnZT6PC8XV4s7G6xToQDNg3HzUIZ/zlAGQ8L5/69sJd4RePLuD8ULqQwW
+nYpTnY97k4fMzfm6Et19QDZt5D1eSjdyedyLeGLGJcB3PSvX6okUVuCeLTQt2JZU
+1h1KfS3ZnTJiOOI6rCnDU4Nu
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/ocsp_responder.pem b/jstests/libs/ocsp/ocsp_responder.pem
index ec54da767d2ee..e0ad37f96c70e 100644
--- a/jstests/libs/ocsp/ocsp_responder.pem
+++ b/jstests/libs/ocsp/ocsp_responder.pem
@@ -1,49 +1,49 @@
-----BEGIN CERTIFICATE-----
-MIIDgzCCAmugAwIBAgIEG9rCUjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDgzCCAmugAwIBAgIEHKwtmTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjBiMRAwDgYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ2WhcNMjUwOTEwMTQyODQ2WjBiMRAwDgYD
VQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMMCWxvY2FsaG9z
dDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMQ8wDQYDVQQHDAZPQ1NQLTMwggEi
-MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNTTbCpwZg6MARSlau3FXBzmM4
-+6SqXPf3xQ9L2hpulCFWWDIfvWpK3zVKbSVIU6L9lo8Cdrne4ZRE486BXo1iIvdY
-kpekvrS/m6BG91wN0/AI+NNaK/7Azdz4aScAs8cxbqigqeYqFLH/KAvP0XebzfZ6
-kfsqjPYRvvaur9QWIvgKdjBz6VEY01uIoZVWR7rSPAj+K3kYhfEUK3vsdQ30LdDh
-F7T81NO+w4ecuy1/aqMxc3oM1L3H7yh62a2tyBwv223KCoxH4WGvpmmTPZ/EGEgd
-07pGMFcii99GJ/yFqoqJo3akjXqms4WqeL0W7vlFOFV2SQw8+dPFSOOqV6iNAgMB
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUE9xVxnPbyMQJIrAP924qbpY0
+BI9gIq1W4SRMrXqcKbWDz1I2nhhiIWW4Iw7j5+5udX+1RMGIn5azyrxfrC93WfHi
+1MGoXobFz5B1amVJWSB6YUhvssEFxumcKhQCvypojYZw/3dZEWuwpu+7ffHc1gpu
+eQ7h84UObFPXMeh2Wa6UhnTL/UR51dA7KEHyM+xjRfCrua8IiTgKkYkR6Wb7XBvq
+6x3AhVpv5ix9Y5UL1vh49M7vyBsej2lz/IKf4pOEFXkrvWxX7LmiJDgEfiyXhcDM
+LaWzp89+CJdDVpQsduKhQuh6X08ILk1qLtDutHmkc7mpAqsWSMO4a7vEjrjvAgMB
AAGjLzAtMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMBMGA1UdJQQMMAoGCCsGAQUF
-BwMJMA0GCSqGSIb3DQEBCwUAA4IBAQCps3M1xiI8UZYXGd2ESdYBFB8Z8oK75SC0
-lgCB1u8ryjXPVcNPQSQfMDid+9uXHHQkdAZ0nbY/+gc9Iu3ekMS31bwROto4239s
-BlVH65hgDfOOcUlSbG68xgXZQtGHhx+bxJ+NMuPjkvEayJVI/jFj4/u1JBuKxXwf
-IHnUnjicgPK8d+txSEDlgaYEOt7udQc76Am2dVxejC3bCLK0WwvB/vVCWng1xiSu
-j2XsSJ24Xg8ppbEXP6hJ73UXOmFyrT9n2rtmDLLU0gNhiWhE3VPdB9FYzcteBknk
-9OR9cf45oRn//rGFwQIuK1KVQZWkn0j97fMHO4dDLLXwoJJZ0C5N
+BwMJMA0GCSqGSIb3DQEBCwUAA4IBAQBOcnFH8KQjIqdROk4/zavs7UTmKfm24nAm
+CQFEbPcdDV15ySg8ZHxmzHw+SEzvIuhTnIacTnOWoBgr4sx6y/p+hd712SME0okK
+FJz/qWVj+u5N5zQ9IUqqJ1+dxq8x2rJbZLVnws1ToWP+Z4j1JuxZ9AWEfDmHke/J
+M61hGksZSf8VulPjuB8HJuLe3jaxOptQ7fncjkpVXSH91gzakjsS3lO/U9RrwC3V
+ZCX4JKWcjLadiKgf4L5II0GAWMZpVCXmYkUmkLLRkaFuN7t59FCOfomK21Uf9KoB
+Nvyz2RKMD2fY86gYtCBza85hM2tf0ul6JPIt9KtxJTFAwbRJP5Wj
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDNTTbCpwZg6MAR
-Slau3FXBzmM4+6SqXPf3xQ9L2hpulCFWWDIfvWpK3zVKbSVIU6L9lo8Cdrne4ZRE
-486BXo1iIvdYkpekvrS/m6BG91wN0/AI+NNaK/7Azdz4aScAs8cxbqigqeYqFLH/
-KAvP0XebzfZ6kfsqjPYRvvaur9QWIvgKdjBz6VEY01uIoZVWR7rSPAj+K3kYhfEU
-K3vsdQ30LdDhF7T81NO+w4ecuy1/aqMxc3oM1L3H7yh62a2tyBwv223KCoxH4WGv
-pmmTPZ/EGEgd07pGMFcii99GJ/yFqoqJo3akjXqms4WqeL0W7vlFOFV2SQw8+dPF
-SOOqV6iNAgMBAAECggEAKe1C6a3Cl7fwwZc07LECsnm7ub9qeYBTA76yyQLZswt9
-Y2zPj7vXIUNEVtbDbmnmN3Ov8QjeHK5k4UzEt5bfuNL7QJNvdK2drCPUL6oEmq5V
-rv9h6mBhvm5MWk7fMD2GyImY1nbcEmv3jexXvTFCBAej9pQpfx8agbVGG5a88WgF
-EXVt8emrAVBtIOS+lql7Swi+jjDvtKZS3HQDZoBqHhqguS/BT8EG7uwqM4unUeIU
-IgmWzmFYWiylUtGZwqkD5qtymXokxJHdOyUewz0US4sHP1Pnk5He2Iz3EJlb6uG9
-ffCL1DobPmh5Ptk6RdMQ1HHgfcQzni3uIgHqYalB4QKBgQDwaZCe5+OUpIFI3yL3
-SQDHqTtumxdFrWKeK1+coXJKXl0jM6OlbqDizYbsHGOlNeA0QLpP+IbO70s7naSA
-99k4djmBu5c/I8rkKBkxHLEXk05VvPLUYlyPbfMmIEZt3V6J9vMdvg2JcbHNY1mJ
-3hzKJpGdLsl/PupZO6dYyFsjwwKBgQDanN7Y3b3gW1AdCsdPpCbue7VJnRUowN21
-tMqmoMf+ODrlTaeUibh690xYynGky7uXGv/+7b2pJ87u3RNFHLVh0YQPKBEY1U85
-zKvJq7nVmyZRDUUlZxbotg3KLfujFK3GIES9zSNgSeYu4VE3N/QuDtasWcVM/DHT
-SiCd8RDNbwKBgDWAuM+0eaBl7+PAzO5Nj9/fWLunxkkaah0bN/8Kqv3D/GROJLg3
-Y5S1+rXnJ4Hn+IqIcS5E9/Nrq8mLrsWI4w2+udOWZPWHSKr4BYs5gGvnu4I3N69u
-aHz/EiLFAgeHQXfgvoA7AJY6+D/mQVH+XVfbt4JMnEk2Pj83mGmYxTezAoGAOo74
-tpi3Kujqzcy8VN4Au7MRfZWBPHhlKy4xdm7tA3DmfdPORuCZNP8XSVV5GY6lQjWu
-Swg93JjYOm2nosSs5XXs7O5rnf5NiYsEnTHNxqUXn3BxjDJQFafmi7jDhgyc+8xa
-/Yh8qGdDLF2YlrT7SHcbE/G8AwIvCoz5rbUMLisCgYEAg/du2NWZj1d9VjkZOuP3
-ZMnc04SJfB2oD1Xd/zoq/cEDFpHCAMzFm2BVPK3OAvPqNHppH1u0oeu17a+OCfeB
-+t/3bsNDd5Ovm8HNfAvhevQhG69tL9bfQblqSoBg6Fd00iILiT9WKK86zcV4kVMa
-2LXdfgZvnpV5oroqetzqASA=
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDUE9xVxnPbyMQJ
+IrAP924qbpY0BI9gIq1W4SRMrXqcKbWDz1I2nhhiIWW4Iw7j5+5udX+1RMGIn5az
+yrxfrC93WfHi1MGoXobFz5B1amVJWSB6YUhvssEFxumcKhQCvypojYZw/3dZEWuw
+pu+7ffHc1gpueQ7h84UObFPXMeh2Wa6UhnTL/UR51dA7KEHyM+xjRfCrua8IiTgK
+kYkR6Wb7XBvq6x3AhVpv5ix9Y5UL1vh49M7vyBsej2lz/IKf4pOEFXkrvWxX7Lmi
+JDgEfiyXhcDMLaWzp89+CJdDVpQsduKhQuh6X08ILk1qLtDutHmkc7mpAqsWSMO4
+a7vEjrjvAgMBAAECggEBAJlMhe+m1SbhF/t2jIfuFj8r0v1k8nLOfjgX5JJcsA1k
+BOZznX0HiJ6ef5W8Gc5aX7qOyrKwMaldmqT5/nK111XRuYx6uEXJlmg0orqNHwwH
+FfgAjEjZkLdpUoJk1DQsFS6gZrcVoyLJ+esmsJH59zPnsICslzpzOhHz8iLS27N3
+zPuEhYi0yiXr4Yi8p/TloMuscTSmiWQy8CfIsFVs/FC4n40xkedvB4vEHUEfxb/D
+sbqPuKZ9nnu2EFCYZ+UpYOSho7q/60dBHpuszPpbLmk0ICm+2jda+32trI/ZxNxT
+ZopDwUlwfh6DoXz1Dj9O1Y54YPWcKlCeraJS/uSxobkCgYEA9LsRYDHtBDlxnOsl
+F2ALdIdvvuxAdJWg8gTaojbLnU18dujTtN3NvU4DYzl2KxKAuCHDNSpabWCd5CNp
+ouEx8CuA1GDA0J1aUXEDh+hBeqLatFeAVh5FNQAEKIbUa8H02K3CQ0pyY/XGC24b
+yVmW+P1mm0PeX9wn6QV5SSTyUFsCgYEA3dffC7NIJmJvmwxdc4a7JKL/OLfvvHEO
+sYjsKugqRFFALyTrR99/MmwWxvemoPCUhgGvYDUdgDZOtLK7aV0OTG3lb5FNQZyv
+odTMqgp1E1UTnjNDLvbe8BCwjpbzCr4a8Cu+tNHYKKTnTc+IQt3wJIvkODLpYXW/
+9enENerhHf0CgYBRjzpmNZ1IyCAd7JvK435N0okiNrfwYqFlpsp6PwqynM07D7Lm
+Vvkm+qXUXqUMC7mMI+xbfobuZsGX81lUJ/qtkMH5hVkc7SPZenFzzKptxXJfob2q
+daIY3K2GGBEY7GW+BIJ2TIw81f6vdn2aJ9pRlpmoawDkL4m5ddKK1t5OewKBgFHm
+d3b3WkSphLCGMR4MeJy+/GBxL7WvO5B5j08L5UbRhFOjWqVvMqNn2kTcewzhpMOQ
+bbGkCl8iLY5yidQNQdEfpVu6Ff6KHSEu0dxY/f4EI7j9DemRxJ52WuhZfCLUKlfO
+nppc+mOO9cH5Q0IEc7raVogU66pKcXMVOZX6P9/1AoGBAK3OS5YXFwwLoZ0Tqs4o
+p4wr1lcJnZT6PC8XV4s7G6xToQDNg3HzUIZ/zlAGQ8L5/69sJd4RePLuD8ULqQwW
+nYpTnY97k4fMzfm6Et19QDZt5D1eSjdyedyLeGLGJcB3PSvX6okUVuCeLTQt2JZU
+1h1KfS3ZnTJiOOI6rCnDU4Nu
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/ocsp_responder.pem.digest.sha1 b/jstests/libs/ocsp/ocsp_responder.pem.digest.sha1
index 9bff82f75c9d2..bd7ebfe3ea8ec 100644
--- a/jstests/libs/ocsp/ocsp_responder.pem.digest.sha1
+++ b/jstests/libs/ocsp/ocsp_responder.pem.digest.sha1
@@ -1 +1 @@
-8D7FAD707F23480F7BE38FC06F46794DEDB18CEA
\ No newline at end of file
+EE2B7FA024D58EA95D40EF923938FA34B8A6E9F8
\ No newline at end of file
diff --git a/jstests/libs/ocsp/ocsp_responder.pem.digest.sha256 b/jstests/libs/ocsp/ocsp_responder.pem.digest.sha256
index 8251d950e2f35..350edac368b49 100644
--- a/jstests/libs/ocsp/ocsp_responder.pem.digest.sha256
+++ b/jstests/libs/ocsp/ocsp_responder.pem.digest.sha256
@@ -1 +1 @@
-CE89D643467D21E63407A1C64C793FA72D5C5FCF32A97C1E317EFC5A367B9F4D
\ No newline at end of file
+1A354365C8D9618F76783975F8FC0F30BD097AFDBD7A15316D8BC2F2CE545A67
\ No newline at end of file
diff --git a/jstests/libs/ocsp/ocsp_server_intermediate_appended.pem b/jstests/libs/ocsp/ocsp_server_intermediate_appended.pem
deleted file mode 100644
index e3065485238bf..0000000000000
--- a/jstests/libs/ocsp/ocsp_server_intermediate_appended.pem
+++ /dev/null
@@ -1,48 +0,0 @@
-
------BEGIN CERTIFICATE-----
-MIIELzCCAxegAwIBAgIEc3NuKDANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
-UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
-BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwYSW50ZXJt
-ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIwMDQyMTE5MTQ1MloXDTQwMDQyMzE5MTQ1
-MlowgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN
-TmV3IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVs
-MSUwIwYDVQQDDBxTZXJ2ZXIgT0NTUCBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkq
-hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8o7m7QpIMUZ2r6HOmhuqNF25x0odb9Bg
-rSLm7Hvb3WBu6jwWPrrnPerR/nODVEY4Qo7mOclgCsooJx3HaPYPgRYffRQMJ+I5
-lpvsRsBjW7CnS0amz9QcbGnIhMeFU45gCn51CTLPoBJ7hB9F4Z02bOJEMkkXkhtm
-kkiVysUs6po+t2+w8tojOScZdeDUtwfStKJ7Xb9B79Ko3BCcITXJUxDBcqUEJF+E
-v3YQuQg/QKNTO+L39aFFo8WNfuP09txdjT/+T8PZq826ccohRdSrJ5lq1hXmmKXp
-3p6Ut35aE4tjj6KSjDonMkYcvdNHQ0aL2p8x4JjwgwAuNwawTUbYIwIDAQABo4Gv
-MIGsMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMB
-BggrBgEFBQcDAjAdBgNVHQ4EFgQUyC6Gv0rfoato44VsaVig1SmminYwOAYIKwYB
-BQUHAQEELDAqMCgGCCsGAQUFBzABhhxodHRwOi8vbG9jYWxob3N0OjgxMDAvc3Rh
-dHVzMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC
-AQEAogdunlFL04lqVbZyqPvN/5TtrEtM87invrzTYZ8UmT5Q4Kr8mHRsumBuVwDu
-bE+umrPtQVvu0XYqsjmjmOk7hTIK6PFuF6rLQCUBHVXBZggTNKFFBWphQ8odUbPG
-FmOqSlkZAkcNo3dLpxRbfDru2ARxeE2+sRCPWwUZc7utqpLoZ0deuKdDSlA/VcGJ
-5wf0sjmcjvJRRUSYeJcUox4ySL+4WtFu33LhYZKgnrMNegaJ6UyIlwB4ihMyi9sV
-yDlsY+vGqivqqMUw8V6tdUekCYPUlHWXeICqsRIBII+xMzqTv1rXPzNyAvyVYrBi
-hG10rdLfnQWn2vpYKU5b3Vo1yg==
------END CERTIFICATE-----
------BEGIN CERTIFICATE-----
-MIIDojCCAoqgAwIBAgIEYLGF9TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
-UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
-BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjEwMzA5MTY0MDMxWhcNNDEwMzExMTY0MDMxWjB+MQswCQYD
-VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
-dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY
-SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEApP3UQTlZVYFjzvRREJbdqYBw1zF+NWayd+AFUqWzrW35TECxnmR0
-PEr+ILEucOfiPB/AwRoTCMF0IJk1y6l2ljxGs9vuGD/MdBtnxzJ3cVbzPTtVm5Q4
-kAmVJz7O+2cw70XGD3hruDMKGkAixRwLXp16ENl0jyJ6V44JBRfOQcZLG3geJgve
-cbp1KwkTASaRcYv+93tr9z5s92a/2UVXRuSK/Rf1+x+U4+GRVJh4/k8i9nP/ieYg
-92OGqhWr1ETdSv66SZ+sHd+4OftMbETqBdiTGj7GM+EszAEUTPYDabTvQlOBtdZH
-NYOLHGMxKxdEj5EyzE4y8WO7yk4W+TZItwIDAQABozIwMDAPBgNVHRMBAf8EBTAD
-AQH/MB0GA1UdDgQWBBRRg4ZhgrLm0lO4jGm+fmVnaczaPzANBgkqhkiG9w0BAQsF
-AAOCAQEAZK3kybfwQ05q6BQevqkun9CKC3Vwhv6BLez5wOXW3gQ8BQdypKbSMEB8
-4RPEcy1zBU+6LPa8S+WE88EN85/0n/oS7N7MAgEpSWT8vflBm76nEfG4jG+l8h+Q
-yIp0e5/ITq/MtRx9EiRk+vU6l3Mkvqq+2a3T7pKhvE4jOIOyFtg5jr+p2n46WEw4
-g3N/BzbZLpz147KO7tYrelhIAAcbmeVUKlQOjtcljeGbZimRgt8kzJWBVNAS6tEj
-J8FTRLMX6HSTbVMx8tjq+MxF9hn1Ztc/3GIIuTvlGeTkLTS8atR4318YfMcZLlwm
-pt3Zd7lPfbW/gmFewm7GB5TL9rDfbA==
------END CERTIFICATE-----
diff --git a/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem b/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem
index 93ac0f42be110..e8fbc703889c2 100644
--- a/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem
+++ b/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem
@@ -1,75 +1,75 @@
-----BEGIN CERTIFICATE-----
-MIIELzCCAxegAwIBAgIEUOothTANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
+MIIELzCCAxegAwIBAgIEfW+T1TANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwYSW50ZXJt
-ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIyMDEyNzIxNTk0N1oXDTI0MDQzMDIxNTk0
-N1owgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN
+ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIzMDYwOTE0Mjg0NloXDTI1MDkxMDE0Mjg0
+NlowgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN
TmV3IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVs
MSUwIwYDVQQDDBxTZXJ2ZXIgT0NTUCBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkq
-hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz46Uuj2QZUYHdpn1Hqcus0Bp+G+DXB4z
-+Oy0gfsDcVjb3zJIuTr4Oa50kZs1ZxzAAi0EuZbhmaxb1UIoF1pWKfGrFfP6xRna
-X39unrP5iZ8w7M6f6Op6LsygKMe4Tx2IdtC69rNfwxlaeFhCpIzG6gzNQWD/xFVp
-pLpaqqPHaIIgp3KSv4qLDwBT7gFcCHUuj/O71V5wkipeEn+512ly98bHn8whq3fk
-w7tKTmvy1kMxRxB/Bc3ZbhRJJsXZkcRv4M+qrdDkz3/+IIUNkMTs+6fPeVsDVVEF
-wfNIbQMPznUyfppmdahghwSpuHiAzzFAR1QZI/6SGc+E6VZn8nEalQIDAQABo4Gv
+hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+W6RJhUIsx3hsjCKQNfqacGiMPTz2NE0
+YKoqYrKM5w4xCgwR5lRkOW9kC9ASsm232iQrffGD8l5huymc1RXog2sEh4JV/5f3
+VaNq6Lk6x9spn7roaQ8LbyeFsXEH32r3PM0YRrNs7tSWN2uaUqXF07OvhN1o5/dx
+28IVAyhLC06F3gqWEDCgn0bd6EpPMtSdKfnvJ4YrNRFrB3MtAJ6VDBHgjMVbXrrl
+8GVmiGRA/y4dDbuT5yK3+gmu7XrJhUKIMGbUKYuRR1NiH+yNUaaAlkoyrVE3gV+I
+Wwz1e+sl/3oc+fHsmXtX9kX+NxhSfR0wm4Tn//CjTQxGY/uu/FQuqwIDAQABo4Gv
MIGsMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMB
-BggrBgEFBQcDAjAdBgNVHQ4EFgQUTwxQ4b2GGlnEqB9M2bAXp5OirKMwOAYIKwYB
+BggrBgEFBQcDAjAdBgNVHQ4EFgQUbcLZ6+ThLq8kjSDkl4rjudqckFYwOAYIKwYB
BQUHAQEELDAqMCgGCCsGAQUFBzABhhxodHRwOi8vbG9jYWxob3N0OjgxMDAvc3Rh
dHVzMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC
-AQEAIQc2TM7zkixdANoFswyN2Q32xlQPeXGOWOJneTepeuqBtVwQ0+aZ6wCE91b1
-okTQDizDPFWUROWdffpGk/EI4+Q+J/HONOiq3J0Rz9Dak/VhApZ50UJ9jwZttra9
-SGFw4xQz7AG6ftYKT/78GA5KTubQU/gKZBopwNx1VJi6aFnwSosbofMviJQOcVlX
-Ga+I2LgvB73PPtxUbk0LIHV8k+wY3DgseYyViqOxFlrbc4/stqYHE5y23I+539AX
-8x1Sc2WtO198gOAXobLiYkW6mUqSPVENMTOVa2hC5rsaigYMi1HbRQa57s4+5UGr
-3i8BejcVaI7mfbce3lCoUbAgGg==
+AQEATpC8AxX4/QQ3JuqD7+qDMKVCY6hhOIETDp7MJvRAF9I/8P7p4AWXDQuROaXp
+bGKm7Q/29UaHvF23YfMl10QCc2cb9ybYHqzbKtGKqIAjnCtsFjI51Hs3rb34RGIt
+/DT6IqYqix+aR5MyVA1EmuA2eV8WM51vjcvlOPhrz4dYzecEgB5Q7yJ/pT8++8vK
+huhAq8wwMo4pMsrtzq5AVGV6ND43gB7Hn2LHD4RNIDfUbvSG32vBJHm5MJ+WiR9l
+Ljhdyc8UGZTzG9MQ9KVXhNuPF8m0aGA/5euuV03JJBODpwftS7QkdbUCvgl0EK3i
+D8PVv2KR4n72yXTvgVXNIGFG7Q==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDPjpS6PZBlRgd2
-mfUepy6zQGn4b4NcHjP47LSB+wNxWNvfMki5Ovg5rnSRmzVnHMACLQS5luGZrFvV
-QigXWlYp8asV8/rFGdpff26es/mJnzDszp/o6nouzKAox7hPHYh20Lr2s1/DGVp4
-WEKkjMbqDM1BYP/EVWmkulqqo8dogiCncpK/iosPAFPuAVwIdS6P87vVXnCSKl4S
-f7nXaXL3xsefzCGrd+TDu0pOa/LWQzFHEH8FzdluFEkmxdmRxG/gz6qt0OTPf/4g
-hQ2QxOz7p895WwNVUQXB80htAw/OdTJ+mmZ1qGCHBKm4eIDPMUBHVBkj/pIZz4Tp
-VmfycRqVAgMBAAECggEAaaLMqVkqA5E3vTIwYjB+gTa4eHvw5FJu6zM1z628m9S4
-2wkRoEyWB2114KGY9WjZhIH7FKXpHjht6MUp+HC+x8w64gpxyB0XWmFWIKnCyDtg
-QDYNhxLHqsf0f6zebk2+pZIoheTXSUm/FN7+1BbeKkLnuG2w3vEFupQqDw0aMWKV
-z0bX5oXeFqm1piCqj0ng5eFRDFD9JSXvcrGxJ2sxT8CHgEQM+uDIYRsU8K21Wq2W
-I+MuyMz9La1sfjHITGQ5MdunHSYvjNuHCXqM3YQWR0Bh3h1DNNCsMTh5HZeRWVsd
-Haof4VjRScwoW1q27T/2mnohf4sNu0cGhRYlfwtPJQKBgQD7QeE4pw4R0HjYlqb+
-FBLlbTdKdkm9eUg7heOV/KOwiungE9j6pwxuYosC0McVYWWG9SMKY0q8/UsPVZIm
-CzZ0Vs1gB9CG8UW999YkqvKM0YvetOpNmaW2gc0fY/TPgMqQZU8l7VdUudiXB8hv
-aDYPdYd27/B/1Lz+he12XfDUOwKBgQDTeYh8Zm93qrPPbg2ieESY6V8VpnFdOG9I
-EUtgiElXiSlyB0pqV8+DC1UWmFsRz/3i62rp8o58UcJPMe2SJz///W9C+ZZyNtW/
-Rbx/40I5pBkvK0bZEXgULUVnFlvgb3a3PsLyhxk8eL5dfp2NvtKTXvfGaKus9QYb
-vEoEPobvbwKBgFMDiDskjrR6EYNV+ySVU0z0EcGLZX+xk5j++puylg6dRvpe9GCU
-Uroh2tX6TtyUimvVkFc9SRM6CNOvLRNevwYfK8nfqxj6nFVQjjMdO/gkv7a8RXGQ
-Iz0yk6gcaWUpo0OkBUt9qE34/UOhMasFXl8rMK+uROKnUi4x56wlC43DAoGAf4RD
-NrAV+tSmBChadGONCZ9/RHDO3uVOxOgYyaakgvIkWavnxWQZru1Aa5WHJKCEeTZ3
-i4ZFQNWUE2kJ1h1wzA2n73zMqSZDkUidt4fzwQogXX79A5szCweZV+X1lMnhjfF8
-X/3yy7ILKBlXK8eq9k5Hng4zpuFAw6yv/QsvFmUCgYBPAceHxnjW9D/NWIstCQmf
-fxWiJyYpSfo3T5snWSKbUpZAFBu0RXLRiwBUteY7/1aZCPAsr5SM1tCSRKgiZbGp
-OJXlyTv5IJMDvCA233EzuUVxhc8xxXMYQCspOd9E7Wf1+PMAf211mijeOt4sXvHv
-4wQJwTDVo0ahgYMHJlHi0A==
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD5bpEmFQizHeGy
+MIpA1+ppwaIw9PPY0TRgqipisoznDjEKDBHmVGQ5b2QL0BKybbfaJCt98YPyXmG7
+KZzVFeiDawSHglX/l/dVo2rouTrH2ymfuuhpDwtvJ4WxcQffavc8zRhGs2zu1JY3
+a5pSpcXTs6+E3Wjn93HbwhUDKEsLToXeCpYQMKCfRt3oSk8y1J0p+e8nhis1EWsH
+cy0AnpUMEeCMxVteuuXwZWaIZED/Lh0Nu5PnIrf6Ca7tesmFQogwZtQpi5FHU2If
+7I1RpoCWSjKtUTeBX4hbDPV76yX/ehz58eyZe1f2Rf43GFJ9HTCbhOf/8KNNDEZj
++678VC6rAgMBAAECggEAao1M/BOoL7voGhKaPLD/tkW9X2SEdm6IDXMjwB2+C0YI
+tN4LF3WdituGxXURR5+PFmS1H4v4baTb4vQXxv8g4GLrAGgxDIqCYdb5aIkYDyAU
+W+OgPKDspYMgnXhHgK1VCGgkoq8rLasqsGoK9ptSMuljZUKf+de+j74M89hWlnEQ
+4JSy3C3xvHZZn/LYT50N+moiw7dh9a6ceDK2AoU8ZJq0iSL8My2bs4uVhurKPKFP
+vrbKerhRP/sh2h0LosnB+2Wp9zwSqV5L5hn+W2WVqfPxgWtFwrnzX+RYuvsAN3bn
+rAHZ4HzOfEs+dDNYySJ1UZXFR3CuXD7qa+ApbNFAgQKBgQD+LttPGhSzAYh46lGG
+iOAngpGQBCKaOrnhxsK1VwPZMWACV5Q4sfLdrR+mAroLjSqFrVko+qe44hqmngVL
+OfSirk5B12kObtSrjvKn5gR2WyASl5P0QNQl8TolhWI3gEQJH0k2jN5X0l24cJ2N
+HbDpzU0DObiNfB16tyYQHjo5mwKBgQD7NwQV1LUOn3ZIZ5nUFwOWd9IH4s++pwPd
+m/39YCD65UYQ8OhfTJrVcn/cz0ETsL9DP/2sthoa7PPhYaXLi6qcfZmsuJE/dVOv
++pecSn672gCURsgPcPl/XX/siRmZgb45SDEyrl4GYoK/SbyZlgnvpJo42lraaD0e
+UBbJzLP4MQKBgCUV/RWLfp2niZvqxD5W1i2tlUNn9wx9qQVSFLKUoZEc7R8qXAvx
+mrfRPJ7iIvbwf9XFNw8Nk08cXjsVLzyMli7uM6jTnxZmAU2Oq8TngJssLH/J8eJf
+WxS2H3+9+FiUtFiIYgw9fWte3CG+/J3MSTWzqJrh3xV1mG+BLWKIpoIlAoGAH9So
+v9tj1aZ+5k26QBVqbvZftoAWsqGW682IMUKs6x2B80OTLgAW3lTTvrbEGCqdEXha
+PDgWtrKvdC9bQp8/zvRbNHducAv9vp6R90u5IzRMPn15e/tkoa7HNsFobPrzj26G
+TVWqtERnLLW7H/rS53qD0BBa0rHCjMS5HnjBlQECgYEA9XUgBaG+4sC4+PPLeFuF
+0NvxdtVFBU4/x9B4eGWLNuvmekwF5rnU/FjMBBeUW+W05VGsaFxjGUnk+DowZjyL
+MaLGMY7k2AxoeCB+mZ3HbtKDfi2W/AcLmH8Vz3DpqlmiFZTIYdbyeP+Pnbg88Ckc
++z1mB2pKj2JKsaptU5anL2Q=
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
-MIIDojCCAoqgAwIBAgIEJWcflTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDojCCAoqgAwIBAgIEfCXJSjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB+MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjB+MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY
SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEA1GZlPg548GQidASCrRZDXfJm3Li+vIazhm4MGhv6bJ2o+SYHfu5j
-PO51T5l0TGvuoJPnzwq+RlW5RW3/+fCMl0gt+awDDzjKCekn00w8KSk3rqGDYQk7
-cSVuoPJVyJJ/vo86bwVMB9TYyxPi1lEBeeyVaa2FvbzS+SZq2c9c087WVYRmj/81
-yGztUAwj2Zoru1ECuhHE8YMeyDqmi52XByUmI7Ywrzs4xc3pQ+AI62IsiUFbCNqw
-EPHhEcCgEwaUlWihvUfO5lJSKeUyn0WlQ8MmnUTnCCMYDDJKPublsuu7Ngl6xmEM
-H3u9iSbV6qWQwsC4PJcw5PO30yT5QkFe1wIDAQABozIwMDAPBgNVHRMBAf8EBTAD
-AQH/MB0GA1UdDgQWBBRuXVvr6fP/Pyp2uqsHQ7KTvYGuNTANBgkqhkiG9w0BAQsF
-AAOCAQEAaO2/QF2KG9LM1yKWe9ZbrvZa2dWgFHebY6HckDgE5rc5+nYug9zSEUi+
-QYKZfEAYbMZ2a3ymybsNsGpVPBYv6p2kaa6V9hrJb/awMI9dFg+o/U4v3dLQZmci
-6cSI/c2T9mqJ4tVUPZE5tyVXZBZrQEUOJwcrcH19qWtS2n5Zk4BjkFAysC/eICMw
-yD4kGI/djEkaABkCC7Xb75ySh5BJgC8ZZ/5gmKYiFZnV8d6ktXh7nPcqfsyuTuei
-NBTSZpdTJ5WWNpybnK7/QUeedk1imNkTqK1TOJ4yjxIlhpGOX0f9q9InCrdYZb6P
-SaRjfoPnX4zSLP5nv3QSDMcJjD/IrA==
+MIIBCgKCAQEAnreW/G0+IaH5ly9cI00zwvHEs5jUc+eF8B/YWi7E/arCIRgOhnwv
+c9pBDoVqmwx9cTtEI/KJW431V8LTBu40ToDBtrJCRUydF+dAnoxv5n1TlCF0t/52
+pyGM5Rf3K5nfJGjSpNPnw3yojpXKuAKJZdQxlPBtnsmCNk8+CtkTE3m/psLIGa/9
+Rq3Ctx0xfdJQiiajV8Kv08a3CDaWW2Tj4dWukUI6LFFfOKXe6Fvc1sfTPG7oH+GM
+x/e7uBbCcP6J5368posgLddZifQjv9bQoGx0AB15cl9PzyI6PlaV4nww1HNwd7yY
+UzWNVdOQ2LPkTERu0WwG7/lEE6L56p092wIDAQABozIwMDAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBT0V8yyi6DeSqcaTzB1DWftYWVlgTANBgkqhkiG9w0BAQsF
+AAOCAQEAyuXdfv9tkL2YgwzVFOSPkBTowGECU+7aEGtI45rRgkqw3x6HUl+zYcqa
+REj0dLsFKoixkYSKS33Q7mg09jUXOcqs76hk0nf21ITe0c4M9NZzZPrN7PLmKRw5
+gYwsXHGYGL+3FmqTNN+L6kk/Gd/KcuwY2NH1701Qj0c6uP+Z54lP45DKKaJujouU
+iIOGaLg2sU0r79baqzt7DvZVmlbqQZ8XTYykZo4O4E0hJ7WTNNwu+h1HoOanSfYE
+EWKzo3qS7JkvZG1hoQhNk6np8+KQOWGGlom4j5HhFaSvlMk+M4NyqvJsO4hA5B2D
+tEOIfINSAtpVNmi5I16idsVixhy34A==
-----END CERTIFICATE-----
diff --git a/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha1
index 0ea8b53ffbad6..0e6a68a74ddc3 100644
--- a/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha1
+++ b/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha1
@@ -1 +1 @@
-A1F079E1453B9E8B79FBD4A7A7EE430851A0DF3A
\ No newline at end of file
+9B5ECB2AB3DA8633F3ADE0592FA7FC3C1D7B1A32
\ No newline at end of file
diff --git a/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha256
index 1d29ebcc2e439..7e7f3a8d3d4db 100644
--- a/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha256
+++ b/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha256
@@ -1 +1 @@
-C4A40BF1602DACA5BD94BEEC95996961422C70F749F709F3D5E776E2A6CA96E3
\ No newline at end of file
+D640AD852DC92F4DC209C4DB27E8103FD771986C13C1250C19578BD34858E8D1
\ No newline at end of file
diff --git a/jstests/libs/ocsp/server_intermediate_ca_ocsp.pem b/jstests/libs/ocsp/server_intermediate_ca_ocsp.pem
deleted file mode 100644
index 3aa49df8d1b55..0000000000000
--- a/jstests/libs/ocsp/server_intermediate_ca_ocsp.pem
+++ /dev/null
@@ -1,53 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIELzCCAxegAwIBAgIEc3NuKDANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
-UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
-BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwYSW50ZXJt
-ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIwMDQyMTE5MTQ1MloXDTQwMDQyMzE5MTQ1
-MlowgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN
-TmV3IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVs
-MSUwIwYDVQQDDBxTZXJ2ZXIgT0NTUCBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkq
-hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8o7m7QpIMUZ2r6HOmhuqNF25x0odb9Bg
-rSLm7Hvb3WBu6jwWPrrnPerR/nODVEY4Qo7mOclgCsooJx3HaPYPgRYffRQMJ+I5
-lpvsRsBjW7CnS0amz9QcbGnIhMeFU45gCn51CTLPoBJ7hB9F4Z02bOJEMkkXkhtm
-kkiVysUs6po+t2+w8tojOScZdeDUtwfStKJ7Xb9B79Ko3BCcITXJUxDBcqUEJF+E
-v3YQuQg/QKNTO+L39aFFo8WNfuP09txdjT/+T8PZq826ccohRdSrJ5lq1hXmmKXp
-3p6Ut35aE4tjj6KSjDonMkYcvdNHQ0aL2p8x4JjwgwAuNwawTUbYIwIDAQABo4Gv
-MIGsMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMB
-BggrBgEFBQcDAjAdBgNVHQ4EFgQUyC6Gv0rfoato44VsaVig1SmminYwOAYIKwYB
-BQUHAQEELDAqMCgGCCsGAQUFBzABhhxodHRwOi8vbG9jYWxob3N0OjgxMDAvc3Rh
-dHVzMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC
-AQEAogdunlFL04lqVbZyqPvN/5TtrEtM87invrzTYZ8UmT5Q4Kr8mHRsumBuVwDu
-bE+umrPtQVvu0XYqsjmjmOk7hTIK6PFuF6rLQCUBHVXBZggTNKFFBWphQ8odUbPG
-FmOqSlkZAkcNo3dLpxRbfDru2ARxeE2+sRCPWwUZc7utqpLoZ0deuKdDSlA/VcGJ
-5wf0sjmcjvJRRUSYeJcUox4ySL+4WtFu33LhYZKgnrMNegaJ6UyIlwB4ihMyi9sV
-yDlsY+vGqivqqMUw8V6tdUekCYPUlHWXeICqsRIBII+xMzqTv1rXPzNyAvyVYrBi
-hG10rdLfnQWn2vpYKU5b3Vo1yg==
------END CERTIFICATE-----
------BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDyjubtCkgxRnav
-oc6aG6o0XbnHSh1v0GCtIubse9vdYG7qPBY+uuc96tH+c4NURjhCjuY5yWAKyign
-Hcdo9g+BFh99FAwn4jmWm+xGwGNbsKdLRqbP1BxsaciEx4VTjmAKfnUJMs+gEnuE
-H0XhnTZs4kQySReSG2aSSJXKxSzqmj63b7Dy2iM5Jxl14NS3B9K0ontdv0Hv0qjc
-EJwhNclTEMFypQQkX4S/dhC5CD9Ao1M74vf1oUWjxY1+4/T23F2NP/5Pw9mrzbpx
-yiFF1KsnmWrWFeaYpenenpS3floTi2OPopKMOicyRhy900dDRovanzHgmPCDAC43
-BrBNRtgjAgMBAAECggEBAPIQ4y0U4c8rPy8wD/uEOGxiTREySgZYsuKWvlarlVRs
-9MQWiyy3YidMvZ1uslXcbjEeY2ywJ4UdEs1WzrdVOUveRDaTVz5Gaqp/mWFShtXu
-ikZ5j+hBCsy3FUJNzCUDJZ3TbgFsEADz8Qh+HUN3neU0OlLk1v0dE1RR1Au0k4rb
-yvMFRDcHLQ2u6AoZm1vkaV+/E8REObb5lutgs2719JJapAlbPT49ttlkfXvgt5kv
-Bnvt80S5+PEuyLNVRsdsRLaZZ4tZpmYenObb4kjiIbCHGRBkHXwXdsnLuqmxXSMb
-52cUsBFGaPUtvIUQh41kGSUNdnKjf1SndJKqE4m6nUECgYEA+gxQx8SGMuy7jEqD
-A/qU+aFF8brqeCb29YifY1eMjox+PvC4+2kG06Y3C/dvbA7eRxdU1PN5R/nPZMrX
-+WxNbsnSJGtvvxZplygpj9DNzwKCH+4Z9dLk/+f7HqKv55c0eLt22PjnQ9GwVNEG
-UnEWDo6Wl5F6qw2HAdRGuQbvBjsCgYEA+FTyQuxOgWpjCw9FrtV3+nRqISGaKZMM
-pqvzPQQuA7Xer2UR4aW2lGtaA8y8Xgt2rBAPIlMggCIUXmWdkH6pwHSvIWhzCMhx
-cyFTAFFsFcQkhCIArVbGvhbBgR0Srtb8ncFx+qbqg1N4Uwm60trBQWgAapZpFhDi
-hXqRmSoDDzkCgYEAlGE+hmz+XbXRTVziBjhqsv+aq+mJPaeRoP5j5uWLCQQh3mOm
-wbn/TRUzUSyRuAPSr0kPFBcu/yEkiuE77EzyXi3xP59pfnFkU0iH8Ums94y7fwsh
-6JgvQBR/FhzgWYOGpaZIzlRVmA8UniAzqjRlLFo8ztCLhHnQhatcFGwi5wUCgYEA
-047KtOjMGMShjBJ+sut5Qw1aPM97nl+AL53douWkrdSK2bGpAitC2D58eTA6aYQq
-nXsw6XUYAxEFeUXobej6hNLjP/rTxW+99u803th+1Cw9T7QID6QVvGt2fqBeAkV1
-AJCEoZ0BvM+nelaXqnpimW4YrLVm4T2RPVWmJG3+HUECgYB+q+DztAUDCiVgVtxR
-wkwnl8WPgZI01b+bCP3d9HgL6zLt/AOYBDfsKuhQ23CPhNJvVmq3gi9xvufBM6jA
-lWhttgN+G72VmQmA84yXgi7b3T73E8ft0u0thJjPaddzAJOuLyYKzLI0KgOYe4Hk
-Glm8Afrwqqz3QQPj1mqrK5Rvlg==
------END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/server_ocsp.pem b/jstests/libs/ocsp/server_ocsp.pem
index 86d73b9cb8ee4..9cb0ac85d3e51 100644
--- a/jstests/libs/ocsp/server_ocsp.pem
+++ b/jstests/libs/ocsp/server_ocsp.pem
@@ -1,52 +1,52 @@
-----BEGIN CERTIFICATE-----
-MIIEBDCCAuygAwIBAgIEKx0QGjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIEBDCCAuygAwIBAgIEGnfTOTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBiMRAwDgYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ0WhcNMjUwOTEwMTQyODQ0WjBiMRAwDgYD
VQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMMCWxvY2FsaG9z
dDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMQ8wDQYDVQQHDAZPQ1NQLTEwggEi
-MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC7kdmu54NATVQJ17vNVuL5M+t4
-iExQWHSVi8lp771E+4z1hJqnQHH1Voi2HuWS/nolxvap/O0V6GbDmWbdFRoBL6cP
-ASgrZPn263SLfgms31sIYMUthPmPK3UpxyQByFPpV+4oBDCnRRos1+XOKvAhCd+3
-GctudX/OiPRTU0iShURYyKytF0LCxNAhXqxDB8UNpSakM8OOwz5QOVP+0Px+HUT5
-0ULHueof6GHJXoACeQlXsMWdLcXsyj7Gl+ogpBAAbOZyNzVHSJzjGW5krTvwEsZ6
-9lb22kfYi03BsTxueQmC6uGFJWBxwnkfstQ+2u36hP+IKwJMjMG5u+edAUQ1AgMB
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCul4pkcbY3hc98DR2yj8+RubSK
+sajO47va1vOEy4v3HVo7/kFQg6GRWCTXWzb8uzsqq81a/5dGbqqrWjzwWKH4oyRr
+G7J9WDCukiWDqC0v1TkGgvdFYtNSJrmW4sOtsFLBteMbuaCpYBQ3DNpRjUazJTx0
+tZu5bb+MKAVHKNIlptXfZ1CGg8iQTb2e3iBoYFSknF/XdP8GUXU3PtdUpmx5yuwT
+BaFTExfdDj8uQuxz1xT1crYb/+akxMO6aqW6EFNFEvkFJWxu5GGnHqKBfgfsvKGr
+D4pA/W4kWwVo0wfQ6JdQdMjdOITDLLWUU5w/ecF2oYbSyfEw29n2ZzyMhIIzAgMB
AAGjga8wgawwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB
-BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQqVWQYrG9+vpv1nnGGws3T6mqQqDA4
+BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRGjjyMczVRJuyPkj517L6To2Mk3TA4
BggrBgEFBQcBAQQsMCowKAYIKwYBBQUHMAGGHGh0dHA6Ly9sb2NhbGhvc3Q6ODEw
MC9zdGF0dXMwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB
-CwUAA4IBAQB5+mPWOi1CCc9TT+TDb5A07U+QzkPqVfQc2xME4qVozj07B7BeD5ep
-i7g6uxAT4jLutve/k8M2cpq13oFyMC1C2e66Zw+8qKdPNXVbdyJhDnz6hGLZPE7W
-QwTB9FMmZOKB8012ZnB2gTcWqcsG2rcglex4iZtYiVxA7KzrCaYqgfX34clFfc9/
-qOIGqBiMp9ic/Z5C2irdY+t2vYs4gW12nrsIYJFbg6pjy2e7oQoKQ7b2cxzAYZvJ
-vISodMjrsxj68A9XlXQ2lLcXwiEd3czSAx3sDd2dieZ9JcNybPXpaC+RF2GpPd6F
-3Eu4OzInD6UWtHSJgvmriF2rpqV1LTXM
+CwUAA4IBAQCGaXzfjXEei5VeWf8DeTPnys+jda5sRTlIisoPpemrUH1tT0hNed0r
+jhCWUmju1rpoxvfvbesdp7vAeQnUkZUJgnYnLhrzXG5u/CS9CGplbxtwnaXpe2Wb
+zEPFBtIrKH/ssw99zP4MiGfQiqP6yGKF0q+1q4DgQs7d5ZEQzpQVLDEccko89JTe
+T1Ts0gZHB1tBcR5IHKJ8it320loYAkahMLythT2dRZ0+cLWiRjNnm9HIVAuPpEOl
+ifj7ndH696aMQp97URqRdwR6vN3rFS6mPJhEzKohNx4C4ET9Dm6juvuSAmRx1nkm
+PF2TLpBz0dCO9BB8iCY6mxmWmSHazU3W
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC7kdmu54NATVQJ
-17vNVuL5M+t4iExQWHSVi8lp771E+4z1hJqnQHH1Voi2HuWS/nolxvap/O0V6GbD
-mWbdFRoBL6cPASgrZPn263SLfgms31sIYMUthPmPK3UpxyQByFPpV+4oBDCnRRos
-1+XOKvAhCd+3GctudX/OiPRTU0iShURYyKytF0LCxNAhXqxDB8UNpSakM8OOwz5Q
-OVP+0Px+HUT50ULHueof6GHJXoACeQlXsMWdLcXsyj7Gl+ogpBAAbOZyNzVHSJzj
-GW5krTvwEsZ69lb22kfYi03BsTxueQmC6uGFJWBxwnkfstQ+2u36hP+IKwJMjMG5
-u+edAUQ1AgMBAAECggEBALf2q26s6ADoFtV0fmA9qG1L69WSsG5Y4zdNG6VIGfUP
-VsfyX5BMV7iNP5aHpRhPeFOW2ZQNWiaTgj6zNLz/Fjs1ln7T3qb46WSwt+ScIDcp
-9Wm5J4qmkfFGaSopg3owFYSV1iHvQhy8XJjAw0Y6vLtaqM03glt66HgTwLX8x//T
-AKNCd0Jj47SKmlZlBD1te4SSrzylJiRL4hYn0Gs0VE8y5hF5c0GCS/+rbQwPTUlC
-jVlxiBvPCVwzYyjeRNQQuOE9MFv4MvLJ/R4W7rUyf+G3wuOZySwZnHWJRwQOnwdc
-dILgdrYMWo8WDuCSbTez0iBoHxgVSgb7D4x+gjROx2ECgYEA52+tBZhoqpiNQQNT
-REH8ngSz5bVAwp9mvUQQb/rp4J+X3VptCrcg8krXOAFICHybKuZq0KFG/qZW7Aaz
-eeHyXfaNnobm6RNIMOPF/u6vFbjF+LK4+bV37tniZdTlqap5xV0iRWlFvTouERRe
-vhhx5qeUv1IVO11U4kqqSqzqk20CgYEAz3pIhDNI8y9r2GFaETjOrKkNd+HbOjeg
-4vewUHMpiATey/uWHLUYaRUPdKrjhJytNYaMtmfNSLnZGOkwGaZ9nPQ3tz2hkwAx
-smya0LUeZLZwbV65xlQ/VTwE7vYQhcUaN9LXu+a3JHS7iYZa0nyDcWZjqUuum3AI
-SeML7kthrukCgYAEpM4A9bzr52G2Mz3heb164jdF3awt+4rRM2NtC8jTlAA1FdJo
-S6JimjPVUNlEKlPmuXbdNs843teRptTFFk6Jqh/PVX+en59sM7XOVrW2pt16DwP9
-JrIAXejYCDyPefc1iZfdcq5OjLNN7m20PMNUUHqAmEIKzeWdIMhn9S5DxQKBgAyg
-EFGbHVFPDWz5X5W0sz7mep9U6xY71Fp7YAtFNr3ELhqTkKrmijm6wloDHB0xu4iK
-S32+C0vlo0RUzEyDSRmH8uv/oBll5aBJ45HXs6XFREojfYKIFeKF6HbyGkdJmzLS
-bbZNb8+UGuVfe59bXFRGOsJOjK2Av8BfRJaXl7YhAoGAbGO2av1IdTHz9eQPVX18
-muU/i6UvsIl4Rb9poJTuwO/I74pJHctwggnLVaj8I/xLzbO3D87CHV49NOLLtn1D
-VZUoodANrh7qfQAIbB5csSg2rQaID6kAQl1w4Os/3jwiRAmYkQ890wL+q0S68xG+
-V9n4HmUO0QV1gsutC1zq+D0=
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCul4pkcbY3hc98
+DR2yj8+RubSKsajO47va1vOEy4v3HVo7/kFQg6GRWCTXWzb8uzsqq81a/5dGbqqr
+WjzwWKH4oyRrG7J9WDCukiWDqC0v1TkGgvdFYtNSJrmW4sOtsFLBteMbuaCpYBQ3
+DNpRjUazJTx0tZu5bb+MKAVHKNIlptXfZ1CGg8iQTb2e3iBoYFSknF/XdP8GUXU3
+PtdUpmx5yuwTBaFTExfdDj8uQuxz1xT1crYb/+akxMO6aqW6EFNFEvkFJWxu5GGn
+HqKBfgfsvKGrD4pA/W4kWwVo0wfQ6JdQdMjdOITDLLWUU5w/ecF2oYbSyfEw29n2
+ZzyMhIIzAgMBAAECggEBAJYVPOlFet9VztuTptDFoShjHBkRycw3plfj6ahhFWuT
+NutVkK32yZ/Yxqq+BZHSHXV8BGbSSidIoLv2MWZmuq2zCG5ue8wWUJrwlQENQ/LX
+d1LqqEdkQcZf9bAINTAM38cXZSiDNOIE7ru398CdEoaTXtnw9/G5ryx7hrX6GFQ2
+H0CWdsCXgiDm8if2PsBtWOZwpFjxEl0gCFR4l1pAqs/XzPP9HJ69nvKDXF+gjAVM
+qe4qfMT0sDUl2tjzG8A4tyET56bFO8C1R0R2+tWqBc4iCOwQiHQHQhwGtYD7qEGK
++ZxTKyt6Y8a85eERQm4t95nhTYkA/zGkwaQXVwhlnakCgYEA4yOH7Sf8+y6v/uOC
+ODirWJ6v5evCnhjYMvnaOaLzcvhOqsUvYWuyxtQcCMFvwN9wrdtnfsNe5rXPlShj
+7RROfAM6NcCvTaakeInvtyEnE04VanwpLnGckEZgCr7TG7lWwOcLrJh5Tox26841
+u10Lcb68nt9gOKvQ97IXRWnZ0q0CgYEAxMa+S+48jsoMR5PNCtT5jZ3UeIg3QDIZ
+5QGvI8bwJZOYZcRIXfDc+c00JKmOkd9K36cJJPaf5RCkiuqJPgmcfc7RUh8Jb+F4
+G32pgPlEUpqzV+1q45WwIg3oXBFgeUxs5w0DIBUneX9Tdw22cHGXJmr47OcJeeiN
+uZIbbkArJF8CgYA/iefzOpV1Ook3FzONQKUGBFYWTk5B5ZdNI0Gdj+zkQ3vWH5Ty
+fqsjHaC9/kahwJ+HsvGPr11z0nZANm1Fm8GcxVZaRQ2E/lHR+lwZcWe56cTp5dOr
+T1LJtTYWq2zou37+NWO5o2mDxJ1bt3KmeA/EgOPI2ZnUIJQzRhlmbIbnfQKBgDz6
+bbwv/potKLMBrVe++fqVv2L+q0h8fiPGatTGcGLkoyReOCLMYl4S5ia6WJEBxj0a
+kS3gM1qT1rmpxo/wAIvIDHvLXGxMTaEPRvjNxgtnH06PJ0GRgHx9HNVzGRddxJ2x
+HZfSlmIDQAUzvaaIvNNN8QfQ7NHXbBvmmBOJVRU3AoGAMqedJ8pnseyzSC2/c0Ps
+IPIISVimfFWA17rOGohVwCHkXIFAdPt3ntjSjKv0BmGzVB9arbdo1hVmsW+Rz7XC
+Nu8u9gNC6SRhHbFZ/ys2AYd7YZD5QQ85vDKE53w8WRatLP1JAm3SiU+5Ks3ZoFDO
+kRgRWrZACkM84BZZAUmfsXQ=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/server_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/server_ocsp.pem.digest.sha1
index 1afca61e9e0f4..8a7ad665ed0e2 100644
--- a/jstests/libs/ocsp/server_ocsp.pem.digest.sha1
+++ b/jstests/libs/ocsp/server_ocsp.pem.digest.sha1
@@ -1 +1 @@
-08FBF18FA233B42A1591279E979CFDD9DFB4977F
\ No newline at end of file
+8A8E33FE14DF4D8DDEE9CB28A69DCC2F26D4133F
\ No newline at end of file
diff --git a/jstests/libs/ocsp/server_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/server_ocsp.pem.digest.sha256
index 219b235711695..17cbef1241961 100644
--- a/jstests/libs/ocsp/server_ocsp.pem.digest.sha256
+++ b/jstests/libs/ocsp/server_ocsp.pem.digest.sha256
@@ -1 +1 @@
-5A5D30BEEF694D55BD4BD0FA5E699382C2B6DFBEF1EAF186146698EF2E1359D6
\ No newline at end of file
+E52DB1E98B2210DAFE8530E33C8DEA24ECA2D5D358D67BC0778F78B2F46A571D
\ No newline at end of file
diff --git a/jstests/libs/ocsp/server_ocsp_mustStaple.pem b/jstests/libs/ocsp/server_ocsp_mustStaple.pem
index e277b30c9e06b..74476bf7bbb98 100644
--- a/jstests/libs/ocsp/server_ocsp_mustStaple.pem
+++ b/jstests/libs/ocsp/server_ocsp_mustStaple.pem
@@ -1,52 +1,52 @@
-----BEGIN CERTIFICATE-----
-MIIEFzCCAv+gAwIBAgIEL05J0DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIEFzCCAv+gAwIBAgIEPJLrJjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBiMRAwDgYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjBiMRAwDgYD
VQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMMCWxvY2FsaG9z
dDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMQ8wDQYDVQQHDAZPQ1NQLTEwggEi
-MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKUbaM1TrAvHpIVi/qLnAgMelQ
-lWu1sLh2kPnaXyIyfLij3KcEH2xeAdDlUfR3fVX10AawYOQix5hkOgD4Th8bBy4z
-m9q61e8aeBLHbMjXFcI4yXn7S+sBrUZzOfeKTHkAtjfmXn7zS9NMMYYQG9KEzwSl
-XdvktQyI2EzitrM+2gRY2f9abMndScE/1Y4EuNSWvAz0ln8NSQoKsxB/qGkSkbtw
-mgXrmxP79NGs93x9zDp4UIP9goYkRsmoKxdYrrwbibyYog7tMTpjpUaEce1kDiYl
-iZbyyKGX1M0LfDnEwlcwaDD7zMHgMwmKt4dDyi4yUey3LTFDxC4+0FQDrGMBAgMB
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAMrvAIe4zQtig1aRLfcTYvf2d
+faCWNTPoKwnL6FKUYqupD4FBQ56ryVV37k5uXAiJwh4HnI2BNKtvZTpskWVmrw6S
+Y4dGmT6dbHU4cz830PK7tb6nhEkVeqobcwKvCpebudcUXJ4hoIM6uHN9htGmDYjy
+e/sRuuwTq+B7H7mh8Wo/RQ5C3N9DTXHuKuP5gR3c9o7MHqQ5qkaY5+kuB3afYF4u
+KtwU1ANUkE7iVR1TyZ0LvDBj7G7LkNkD25XIUg7Z9gQos4+eScN3E3wQOvfY7z+2
+EyLjX5AJ6ziwvglowqEhnqzCtfwneggUyTMKa6gMi2uHi9Q77R1G7Uq+dpqtAgMB
AAGjgcIwgb8wCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB
-BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSEaix3tjioPatVVkObBqSsU7G3XDA4
+BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBT6h8diKVNTYVy95EtxT+qshlpIMDA4
BggrBgEFBQcBAQQsMCowKAYIKwYBBQUHMAGGHGh0dHA6Ly9sb2NhbGhvc3Q6ODEw
MC9zdGF0dXMwEQYIKwYBBQUHARgEBTADAgEFMBoGA1UdEQQTMBGCCWxvY2FsaG9z
-dIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAAotxVRA/9sEuLGa9sJE0ERhuN7yr
-GcWad5nkDdYSmg68GditxeUQDc86jACfU+/Dv7Ni+kvkPmlKrreThsyJbIix36LT
-bC21fouGk9wAszhc2ENZzR9bE6PB+qVwL6axVkS4XFyXrPZBxlL0R+WiNUOH0RZI
-leOlD1LuLEAejYg10q2sof3JpEGVjDzJMGqM1kaSTJ6R78CgkHdE0OlmLmJNknCl
-hvdK68W7dwCUyHoUX1VucnK3XowvsvWa0r1dp0yyFWvRbcglxI1lt1f5hfUZaNVD
-m0NfpxxjkpC1jzmS5K40Gt+XvXltBGCIUeFduMoqt0NHk698WoCd8LuZdw==
+dIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA4SbsqrppLAEH6HczQWF53KFIwTXK
+EbQSkHySWVHkQZd64rI4FtiubecsvorbpeQjnWTvpJPJmwNSmd2rp86D2bnCB8El
+2t2WUZH48+iCBcojeka7ppsU/B4y2I9qo+I18EZ3MvOxr3F78qSEiKkfE/hyJgNe
+f5yyX/MrpcyY70b58M6IazMBxDii/pDJIC1bMmwSvbKYxX4vZLbEA/npJNMUvfgS
+8oJLd6sEUPP/5Sy5XeO4rkq2UN0swtdCEOgBk93T3Pvofm91ZuA90/ep5/rDzPBe
+UehkzSMSGAKrv/DVG1Wkbl8nOZRN0n3CVI0kZ+cHDyC3MWaaI8Q07Xyi9g==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDKUbaM1TrAvHpI
-Vi/qLnAgMelQlWu1sLh2kPnaXyIyfLij3KcEH2xeAdDlUfR3fVX10AawYOQix5hk
-OgD4Th8bBy4zm9q61e8aeBLHbMjXFcI4yXn7S+sBrUZzOfeKTHkAtjfmXn7zS9NM
-MYYQG9KEzwSlXdvktQyI2EzitrM+2gRY2f9abMndScE/1Y4EuNSWvAz0ln8NSQoK
-sxB/qGkSkbtwmgXrmxP79NGs93x9zDp4UIP9goYkRsmoKxdYrrwbibyYog7tMTpj
-pUaEce1kDiYliZbyyKGX1M0LfDnEwlcwaDD7zMHgMwmKt4dDyi4yUey3LTFDxC4+
-0FQDrGMBAgMBAAECggEAMJNT6BZeB9544ZtH38sMgr5ZDU8C0FnAwD7orkR/Gm4V
-iso0k2nUA/IdrKFzt6ixFda/dGOfAra3YQEIMJXZJA0iS6PU7VzmDo3bvGPjCLkh
-q668CdjdcMagjpPRWjQoLUKSw03W3yVKcxXE4WM8tURbEjUp0YyimMyOETTabnFO
-LAkWun3UtQBYlvs8TdQkowYZsOXjEqVac2aynaOSWBhx1g63q6pwvs4AtLDfBGcN
-7QeL3xe0vvrEZh5hAV73NatZI5Xx8jzv8C47XgPlsLTzsFJYOgKAPyPNKzeV/lnL
-6rtQer8rbQwABOI1lMsM1eRmYm/FFZZB5d2srtvtnQKBgQD59/y3ZG7mDh44PUyJ
-OtFk7LV3m8vS07hv/6ofLycYiT2k8T6gJF9WdsccFUhUgmLP1MC6VFLWEGWBFY7J
-6FjEkyX50QKNVnq/p8j/kHSycJW1C3lMtUsukJAczCxhAKKG/tJkWNV4PSOLBiCD
-+qd3aja5ECiMvucTVTbRLdX+pwKBgQDPM2c71KmtCF9kgx2aYqaYRSB021QVyeq3
-eoLIzEvizehebtF4MGW5oLoS73b/rZwsBDHRImq7eK1gs8FMF68ZDcYhD/jVSsj9
-v/lPKkas6Kg4AgAkS0ZS/+nqhjOyeATmMps63HfMHVGQPL1/sr8mopnvtrtfbtyg
-G8xDS1uuFwKBgBj/fdxxRM3o94i0SKUopqwrJ/KwN+/7kGRS8xZvRr+jafDG13Z4
-bLhe5Iagcj1RVMRoCGYZ7LbPqPcByufSPp5aAOA80L6FuXzVMLquHZ2CuNYEMbtE
-HiKn/mGC4aVJxPcvIKc8YwzFQHq0wCeyt3CvxI05WnTI2p8KVJMSvGXdAoGAKWAr
-akAyTdJRMkIYhD7U461SKOOva9ZxX4hAW5aoRLc3grwAak7H0YSuA5/8FFx3xjZk
-OE32IE+d16StoeNM3OTqPqnw167iagGO6GcZy+d9yAlmO9koRuTBskpwQmBDIjDS
-3LMv3Pux0OrAMKLiFvX7ZWoJCqqZtgM3C+0ddqsCgYEA6pSgc3sHam8lXftWq9zq
-7YBhHmjfeGcDslz6NTekW3IrDrEcMpwZLqFxkXOMlM/bbLSGsz46xW8Nt8uJLN7h
-+q6RtUmgtlUUznTAsBD2aZNm7e5TUgWDK7VyTeDbd8ld7GLBInVEbplTfVAJ3fHC
-REyG1gCUF350GsgxXdsXmg0=
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDAMrvAIe4zQtig
+1aRLfcTYvf2dfaCWNTPoKwnL6FKUYqupD4FBQ56ryVV37k5uXAiJwh4HnI2BNKtv
+ZTpskWVmrw6SY4dGmT6dbHU4cz830PK7tb6nhEkVeqobcwKvCpebudcUXJ4hoIM6
+uHN9htGmDYjye/sRuuwTq+B7H7mh8Wo/RQ5C3N9DTXHuKuP5gR3c9o7MHqQ5qkaY
+5+kuB3afYF4uKtwU1ANUkE7iVR1TyZ0LvDBj7G7LkNkD25XIUg7Z9gQos4+eScN3
+E3wQOvfY7z+2EyLjX5AJ6ziwvglowqEhnqzCtfwneggUyTMKa6gMi2uHi9Q77R1G
+7Uq+dpqtAgMBAAECggEASmYZKYm45Fz28GNjAtn/jgsVlpZfyGV132RxmV4oQUpk
+ur/GwBXF4SfBjvhPoga8Q19CMp4WZLKXvjpIQGiUAQi4slAWuGxvwY6PXbe2/AtF
+OwZidAQQKmFeYdvGaS3HfD+XE0m66LdytRNbmXbLxuSsY3k8uiNKaV76OY8RU68s
+5Kd9W0g+/TgasNmLR/VTvIjG9JMAZs7XP5ufPjzPdOktwyV4zndVfQOBLJoQbgVe
+4gN0NrKxXUvp3E3aoFEY4Zc/RTraF+M1G4oUrvGSzT2fHA0adE1mSl9RJn4dFmNS
+z+GJ2MRMFzBYXMJWPHyZAbnXhKJ6dONDnOwKHeix3QKBgQDsjQig8c9T6Y48KA+W
+rwkxZ0ZQP5y2MiAyk6BZS+QSWmX/9P2Q37C5efe+xD/shR8rlrakMo2D5Gprabjp
+jGpg+0wJHb+WC9RJlMuASNCIlhB22IkN2xTB3x/BVC6kOEWwIsMTjbfWXBgRFEs+
+I03mqyUE9OTUZLm+yUTOR+fCOwKBgQDQACfCq0fy2Y8tcv6NGgZ2gbtj14PJa8gO
+kSH+mpT/95fnIBu+894TC71NdwsJ6l4lS8lFfk5RMjHKAPItBkDuvP0kdjWqFmV7
+WYPkUSf1EguixmNrEIxUpykyYYreXiqGlVJu+Qzpi8Ommv03CpaFKfOAXvIgWa5L
+FXi0Y2WgNwKBgDo6LRmQ7Dv3ja95HP02cMjcZw5x2h1vEUXh2OKiL63k2p66q3/x
+0AqX30fx/to1moqcOrEIUJVasGdoSsASdE0TDpBf5j0FBGhoW/9j7RDx/3OBsZrb
+hqyCQ8rO8fwybdUBeYtioxH88V+i4zKUdiSFlEM4FOvDL4Wq/WjZgk7pAoGAAsr9
+va9vF+Oz+HhC0sI2tACp831BV8MKvKdWPYT12zoH4CKePMIpiTfvIssmasuq1/Vc
+joJTquNxp7S7i785v/rpq0OrFM2YLz/UdYxhbPkBDv5690URnVFhTDvjEXlSONxo
+bvUJJ1mpFuOd4s9RlhgqHN8pgSWzIW74X/O1uaECgYBDPHqAQkC0nr10CLca58e9
+qYVd4/vVrhbbnxJwyl9rjuBJVp/y9hssDNhDKiJuTMz3+4ds5XvZRQbWLIt/V9qr
+gT4mtqHAfk76Cjg0/RQcPUyNd4e4Gd/HObnF4lEC9z5j8zBN/DCwP/PewLJQYcri
+Q5gP/QsoVfc2J4pATQz/lw==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha1 b/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha1
index b366ff279f9f3..5959c42e3ab8a 100644
--- a/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha1
+++ b/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha1
@@ -1 +1 @@
-4B0BF83D70980E620C8D195C6979D3D6BE8323E7
\ No newline at end of file
+E36475B1125E0147754258152F35A57B85B6146D
\ No newline at end of file
diff --git a/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha256 b/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha256
index edb2eb6b5c0a2..3a6fb59e452e8 100644
--- a/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha256
+++ b/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha256
@@ -1 +1 @@
-BC9FAEBBDE885879057893157EDDB79EA405369F3E1A98AD69EC20CAB996C764
\ No newline at end of file
+5CA31EA44018281579AC45F1D58C0C2F17613635998216E87FAC70FDB40F1C74
\ No newline at end of file
diff --git a/jstests/libs/ocsp/server_ocsp_revoked.pem b/jstests/libs/ocsp/server_ocsp_revoked.pem
index 96c6e71db498b..cb824644d0308 100644
--- a/jstests/libs/ocsp/server_ocsp_revoked.pem
+++ b/jstests/libs/ocsp/server_ocsp_revoked.pem
@@ -1,52 +1,52 @@
-----BEGIN CERTIFICATE-----
-MIIEBDCCAuygAwIBAgIETZmtxDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIEBDCCAuygAwIBAgIEXojk4zANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBiMRAwDgYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjBiMRAwDgYD
VQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMMCWxvY2FsaG9z
dDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMQ8wDQYDVQQHDAZPQ1NQLTEwggEi
-MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDxzlF6pDVDoDTxU9mhC1nYgC7y
-NkQ2gozEValQXwF0ck4F4qMQVkrz4vFSEG5HPAykK5Yv6MQ6RcyEB1w5F+FrI6oj
-PexL6Pr/iQ8kWkNAPqIPZRXBlddri1butlDCTLPHXsU7bqdjFCAnlBKCRJJ4WqYF
-kfvtPAfZkN6s+0C4SXHi5+xY5d7NzDGzzWnhhcCsfOZJu6jpFRqoAV5CFvSsMvJF
-r51W8e9kf6sG1zEU/iel5rz3CJNPhPcARcPQ2+pZnRAMOKwmloe90le6QxYA/3tP
-DbxFn3lDZ4lBXB/t5M72LXQXiJIcorUV5cxoQ2W1jClzg6HUSfxSbCXoZz5HAgMB
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC4z1jlUx5rFqNRnLeroecRO20h
+rWnFL+FQZUlDvRGpiUfVIK0vvTmQdEk690PHFEcnu7nKCVSxVrJLm9A6/SOidOSO
+BoZNkF4D/Q6kmKEad7T4xz/Ma1x4xmk+rkbTEAe3IpJEQ9pyMJ2iTzOEOKQ+08j8
+xca5No+V88OKnTtp8bk4MRykNceB+8GIqYNFt7ir9MoKEjJPbySZsXRtHXPuEOQd
+N+pWIdchDiidIpLihCviCCQO0n9q6mjZ7ev3nF7M2HzrMeEVkHdARVFWwqPh9tV8
+iB9299Sb/i/B2UKQe1CoRP1U6/Q7ElMYwFt5+CDpgMFBvx+7N2G/3OQFWqX1AgMB
AAGjga8wgawwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB
-BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQnIxUeRtcliLQ8WYOTMPNUOfKVhzA4
+BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQvmFQ9FdhyABOmhW2ubwTx0cOTPTA4
BggrBgEFBQcBAQQsMCowKAYIKwYBBQUHMAGGHGh0dHA6Ly9sb2NhbGhvc3Q6ODEw
MC9zdGF0dXMwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB
-CwUAA4IBAQC02XPR8svJ7M3F7pMrd2TWjdo5q6r6VuQ255k76hUWnyhFMO+rJAfE
-0qOwvTxSC3BTn7uopYgmtZ16IA2kXW99kM0khSOWS2rvrKwaXNH5N4m5vpJvvnYn
-4tRy+VN75HdUxIjGFPDv590yrc2rolGK/4BENErK+uQL1s64+6BGZHhxu67UQIni
-P2XtpNl7FOqqKTN1C36LNPz2WT+rNY6BArKeKNPrChkjjf05XDBazXHK0mWBL5Fb
-S4lw/5D+RuyC7IUxn3H8EKWXRhm8m2+PbtXjcTmXDf8MmGdmq5VcRPIKhMjrxX/T
-xZ7XdGSu/Q8KvsO3taacxoh4uqvX8kNu
+CwUAA4IBAQDYtwwX7/EO1/tSNSLF1cl+maF3NvNxxCdQGKooE1DCMwLHq4OnehQi
+pCaRD/uks4Kb/Ji/yWafkOOPT40UUa9KCVEs9XkZygSXamTwkm2FIn+gsxNpWeP0
+sprkljUPN640RNzQyInrRwPdGvvO/Mdv4WrLKiAg7TpHlynveRiu4+dk58nVSyUO
+Mi1TisTqsXDz6i6nXMgnt6b1JIy9xdAqONq98RkgcxbBQ3dYv8oU4YehKBO8e3RJ
+4nxhCHOD6pqNTCPD4LyiTOG28J1l4oCslGEjTXYHmebSRW6pYmJIbV8C+Zl7YK5X
+vIp7oO1z8YiA7Wf7fTobR0Xg5JmXAp/K
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDxzlF6pDVDoDTx
-U9mhC1nYgC7yNkQ2gozEValQXwF0ck4F4qMQVkrz4vFSEG5HPAykK5Yv6MQ6RcyE
-B1w5F+FrI6ojPexL6Pr/iQ8kWkNAPqIPZRXBlddri1butlDCTLPHXsU7bqdjFCAn
-lBKCRJJ4WqYFkfvtPAfZkN6s+0C4SXHi5+xY5d7NzDGzzWnhhcCsfOZJu6jpFRqo
-AV5CFvSsMvJFr51W8e9kf6sG1zEU/iel5rz3CJNPhPcARcPQ2+pZnRAMOKwmloe9
-0le6QxYA/3tPDbxFn3lDZ4lBXB/t5M72LXQXiJIcorUV5cxoQ2W1jClzg6HUSfxS
-bCXoZz5HAgMBAAECggEBAK+gGq3o9obXpZxiOn8tr2QANhjWMVUmHPe8/+zQFE5v
-dxvvMGB9TJM3Ee9435/9jXDv93V0qpFogNKkRedx4NfgX4KZRzcbULKo6caNiKrw
-0uQ7l0Gzg3MpEUrwQFffPfRgcQBprknYJEKa2ZakF34y2MLS9RepsknjXthk5Ozz
-gcg6jqa7JBl/Cu+d1CUetUDcz/Xizt5CCW0yZ3Qt+UgmyNRNgIuSAUFkoJRFwiBf
-8/cZ5fkhatck/vsdxwGzvP/oTR9P0AuMXUEBSj+KNMbiuVfeAg84T5Q533scU300
-lZCfQGsNDmTTTiTNqgqdP0d/6XgbQFhh+gBdhm8bHoECgYEA+d8UnCaG967ixP1x
-IIexVjABzMY5fdXjqJxmhJstU+jKZ7gATq6Pl7Qh8RgOfdXdJnD+17UtqAip8bvF
-dKdVYpHZ10nwvChQ6BnALSDGAqsfU3p8U0UEh1XLtMmPPaNPsuRtENRlHlQcrp4N
-pr7u/kWrDsNNJpExrq48rMF1m00CgYEA97yYavOWWNq+NBkvbxx14Ha0UZqkIfKp
-RbvFw6x6CMPsn5OAjklI44CpnLa7J+XiKiD0IWPkMO57jsaKc6Z/vUi5QX6/dwgD
-1W/CmGU7ZVXr23JYTitnECaF9AWMRIJ4yL6Z1OEkAPtexTLEtLgBT+tzupzpgchJ
-2wJ0yxcaLeMCgYEAiGYFOyQw4v3dnkj3Oxm3bWPxZ5YBhjZ++ui9cb+/o6Fc8/dW
-e50Al4BmUtSd/IUFHtnp01h8ntBz7JhitkYt7wvNDZ+4QQ4E9F4yMLBGRuigxhID
-0fTH/xSPiZXJko0WkYHuI5S9yiuCKLPwochyb+0Z7oogEGCL7V4BkxBcIbUCgYEA
-ggJWVqJYv26kLMEe1IeyEMca4ExwYGRxkuf05Dhqnpj7X89PwtwQxVurJ1P6KfkJ
-VKSQmclLYqZ07zugQwsYtGQq3IVAw53QLT2GeOV/YzHRIgwC7Zr3blFZMPOMZhO2
-gVsNbdttQpqoZIK9Gj4Kaj+dL0zTekl7ANVsJ3yLP5MCgYAuES4K0f1Gob9yT2VN
-OvvwoMxYlkPKrziLmJFtDGp5vl4VzTsnFfbw/ykccG6UXt5EWpWW0pI7olRp7cWD
-2nfCek9OkkMRl5KoQMhG+UPk8i0JS7+wEW1NirLwXEEwL/VDdwoj/llA7MWkr5Ug
-yxWrUGrJoSqX/nlkiSrarBkH/w==
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC4z1jlUx5rFqNR
+nLeroecRO20hrWnFL+FQZUlDvRGpiUfVIK0vvTmQdEk690PHFEcnu7nKCVSxVrJL
+m9A6/SOidOSOBoZNkF4D/Q6kmKEad7T4xz/Ma1x4xmk+rkbTEAe3IpJEQ9pyMJ2i
+TzOEOKQ+08j8xca5No+V88OKnTtp8bk4MRykNceB+8GIqYNFt7ir9MoKEjJPbySZ
+sXRtHXPuEOQdN+pWIdchDiidIpLihCviCCQO0n9q6mjZ7ev3nF7M2HzrMeEVkHdA
+RVFWwqPh9tV8iB9299Sb/i/B2UKQe1CoRP1U6/Q7ElMYwFt5+CDpgMFBvx+7N2G/
+3OQFWqX1AgMBAAECggEAK7Hie73uODWPAbHC/1J1Nm7Ne6PrmRKb/A0miFyAe4wY
+rrxeFiZwIMZNtiJWJRCXTj6lS65vFsq/tv6s+fV14mguzaGfSWFzyQ+g0avAk2hw
+Ik3BuMw9fdSzd31vA2IL4PHMkVVVpZ/29TQyovVdZ/lc0N7WAF51Dmgm+HP6Yy67
+0tla/Bky4fvjy2MvSkxBT4TPqs+Co5ICj4xOABDOtMmsKMwLgqGHs7/K0+4GwFKr
+vrKt5DpAQmrrWbp6E6DyJLCb+GVTm4KFdbYUnJ/aV6RruxmUvIEIKjV4QYCPxmxC
+myVgoFeFgEditvmiTUYC+X5DRGngtsJc2OxLxG5j4QKBgQDd+12dbN1BkOoTFUWs
+Glol1kcLhzkUdy5Mx/KdcmBr2hi+ZtilXp+ydH64YZQolJ82rAxZe1ve/uQnQVec
+DbB6GSeWEV2T5GtYR1W1lOBvh8jGaGt5JcghjqZRHWKb6m0r86JkrLTZBzbVykwB
+bQzMFt06vMJbJBVVMl5Xf9Bh3QKBgQDVIa2um9fBWY31oxq0kMPlvjuT7NBWFRuN
+TF61d8Jr25eDoQ5b1n9x25J8P5LMeBERq9cz/AZ8ymiYhXEPOwImyzSEbrc9f9Fs
+fLw4qqFXgHKRNaD5BauqV2HGX3TUO0sCCXCn3IlJXegHhN7T9r4GgLR2w2to9+Xe
+LBkUSHDu+QKBgQDE3qjw9FrqSmZ+lDAdaaWnxvltTPkfoG+MowwV/RlsrZmiS746
+M8h4UX8NcWUf9Geb6kxtk80v4WlhMz9K5I0uTF7iZfZTMpLNUT6/cxD1eTxkUrl2
+UT7EHCp+6AvLccZu432TweQGUrKcOvTbkuZ1npWIAV5xzHeq/qfS/EVIdQKBgE3/
+dUBhaCQqUfmDJ1LNZ+O8KrGICPZ34w1i1YYzvcMbKefSGR8i8KEKaiFuYprRle2R
+pcAwi28/4+dtbmLUhYUM+Ls+iXDIRiwdugRC7ajIKSVbKv33RUuhjva6GFdTG7Az
+JDdKTjO7Wh7mUWXg9soKcADGo5hWoJkeKk9x82KBAoGAPh14iiYunVA+zgJU3opM
+kL/bZmxWD/ATEaRiVHDCZ/jBVcU/ywwzt2niyQdx1Taybb8IRT6mA6nTkxnnYIQS
+zdT1Zabo3HAYAFwXPIi2TgQ8eCjSiRE5VYbaxfLqcVIpj27bGtrzz9aHIw+ci+ga
+pBVHu0GJc/4LiGs4NJpj3Es=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha1 b/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha1
index a16c01face3ef..75795fd866804 100644
--- a/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha1
+++ b/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha1
@@ -1 +1 @@
-5DDD9151B27748CE669027BE752EBD7FEBF18392
\ No newline at end of file
+A96EFE717775551966C031244B7C6412FA8D3E66
\ No newline at end of file
diff --git a/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha256 b/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha256
index 7fcefa06e76f3..71085ab2d5c28 100644
--- a/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha256
+++ b/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha256
@@ -1 +1 @@
-FC98F8A109CC247A33B8194C6482D93DF966983C5F6269783F2D3EEEBDC3EFD7
\ No newline at end of file
+8A1618C8FBEBB727C367D5124AA77FAEA8AA09C9C8CBADC8E5DFD38C399F1B67
\ No newline at end of file
diff --git a/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem b/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem
index dbcd621d38691..0d8c990481b93 100644
--- a/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem
+++ b/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem
@@ -1,53 +1,53 @@
-----BEGIN CERTIFICATE-----
-MIIELzCCAxegAwIBAgIEUkVd6jANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
+MIIELzCCAxegAwIBAgIEJPVjeTANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwYSW50ZXJt
-ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIyMDEyNzIxNTk0N1oXDTI0MDQzMDIxNTk0
-N1owgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN
+ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIzMDYwOTE0Mjg0NVoXDTI1MDkxMDE0Mjg0
+NVowgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN
TmV3IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVs
MSUwIwYDVQQDDBxTZXJ2ZXIgT0NTUCBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkq
-hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAs91RJ/ZwqHuim2gGkgsDFa8WkaEGj070
-gxP0sW15N+IvsNXyHx43KTyN7B117hgRDS/nKQHUyX8g044dBvmAmmOqyXfsXA6j
-TXUXovcx/IMSlxwgx9D1mrhNOxSrsUFE2sb+5Ibp9f/ORL5bU6dE6iMqJMldUQcr
-VrlE4oiq6y8T9ykArP9zTpcddARPTbUoOGDuhoVS/cjvNgFdcsR0L4LX6r7wKJ03
-H5L9k43GxXjyjtnXtSFjG7YVAwWU/FoXpqngn3okqNIgSLZoCAM06E+TLCapJjOp
-110HgGLoQmkrvfpjKTmfhsVVGR4WquQOqzr4BLZVB9qP9JTqwuK99QIDAQABo4Gv
+hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvPjzpFEZvcBHspirINBNt1aoaO1k/n+E
+BGm4Vxwz2F7LfjiCOPXmb0Z6vPW8Xf5rnmanrxei6obTD4eCF5SodSrCSrV3ccG/
+KRgEOPjMJZc6/0IY2jrLqVDAnIzz6sQd2GayQNLU88CmuYnZ2Nzah38TTl5ppnrj
+oy9qC6Xy7sFkHN7gQLLG9Am/gt15hm+9HDSvxir7mRlRKn9FskhJcYz8svKalxKW
+DNekW9CJ/z0CTxrFM0/DAc34GB69Og1VuJIrSjexY3DbltivEZFCduciM09gOVDp
+TlsFtmEpYu4i9LMWXWsa5ks+SPMqhnHoBCQHQ4Gb1vwQeiV5ok42OwIDAQABo4Gv
MIGsMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMB
-BggrBgEFBQcDAjAdBgNVHQ4EFgQUl1R5DS4QffEZ0Tb/zR51ubuiP6kwOAYIKwYB
+BggrBgEFBQcDAjAdBgNVHQ4EFgQUhV3aQ2Q65JKAn7LJq9aVEAntaMwwOAYIKwYB
BQUHAQEELDAqMCgGCCsGAQUFBzABhhxodHRwOi8vbG9jYWxob3N0OjgxMDAvc3Rh
dHVzMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC
-AQEA0PM+cliksO+QdC6fY3kCWIPX+rcKO0aqkw0ib2Tt55uHZR1xAiX5Go1cR0Xj
-etgzLl9zKZhx1ybkLgnAo2psPx0PIZc0hoSRSJMm5v06jKl9zuQ1PU81HHAQmnbh
-+Ut54tVXsl3e2Si/0YLVKQCVFXcn7cqZqUTu5mcaRSlLm5pqAj4n95C+STwYUEuc
-FZaZ5mYNqu2KBqZ3v5QVE7/qAPf3CVCmhi0bKJ8i1vssg8oIqqVXK3nrnhEo0dFU
-QaOYMPWfntVwX3EIvVY8gMyFvzkxSz8dfA3ep/OxQoRECSq41GDbfIQMty1pnA3B
-jOO8x5hcv2ibfukDCLIQkKeP6g==
+AQEAa+Shlr1ZXFLZ9xKv8irm8sPaWwlcos8WOBDbbQ0hzzAcTaHFGR6aAzYK07qV
+1Bht3/yRFjEYuRrx3rZUtMhgS32T59h/zX9VWsSset9XR+qxdPqsO/WjePEyfHhg
+Lt7FBAUQ/Cbf8FyiDY07f5R4adCUtHJk/1becD2+xdXo/zw2XezIIw4Quyfbk+Sy
+G7O1oLzre7EQ/2RYgsDNuck7UvMOhyZ9j0V9PhLtIymjkccjNp6K0C6H9rxKYJck
+r1QHPQO4ghjyJawTTwNxkbztK2ABSM0IV3pIuKrj6rtqgblBJ9wkhOOvsLft9WK0
+sZPO8N6G9bUmuDcHn5HGKeLZBA==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCz3VEn9nCoe6Kb
-aAaSCwMVrxaRoQaPTvSDE/SxbXk34i+w1fIfHjcpPI3sHXXuGBENL+cpAdTJfyDT
-jh0G+YCaY6rJd+xcDqNNdRei9zH8gxKXHCDH0PWauE07FKuxQUTaxv7khun1/85E
-vltTp0TqIyokyV1RBytWuUTiiKrrLxP3KQCs/3NOlx10BE9NtSg4YO6GhVL9yO82
-AV1yxHQvgtfqvvAonTcfkv2TjcbFePKO2de1IWMbthUDBZT8WhemqeCfeiSo0iBI
-tmgIAzToT5MsJqkmM6nXXQeAYuhCaSu9+mMpOZ+GxVUZHhaq5A6rOvgEtlUH2o/0
-lOrC4r31AgMBAAECggEAKmIFZKhSpbD0gTBDFBjaMjre05QhKpXNUjHaoBCO1tdK
-q53OfvysvN/TBYdvqZosMnJOQ0B2NsDpKFC9kE4surSvoufKX2rnmjH0Tud29Oyh
-7c+n/c3Egh/ZuwlE5/DcW702hP0xllEf9tzwQkcRGycMIDz+60AIO1hO3h6cP+yj
-ko174JxUGb3DQpEjbkUQ8kWYSAA9adpepX9Qtk3u/e4OZdCj+YJ/rGmvCUqOHHFO
-ZvdYADVNNHvaZ1jlXufr9C2C/8eM6j7vkRZI4e0UznIqQGn8O45H27wgnMy4P6HZ
-zBzoTAla/f9QcUP1TuAy/yzmSFvhcn8Qic9hcfkmQQKBgQDoRSdQB/bf3p1cYNE6
-Yu0qSA+a8PFQKtmJX5lK5cn6N14a4zhSUBubiMkv632ygq2P7KtsALI/ehLr+lxE
-0N7a7X+ESuhtMry3JKAoEomZGupfbdi0OU6PgapWC21w6Y6cSfsQOtqWvJgfa2gW
-6KLnwFlWb3hhCmXm/namIJ0rBQKBgQDGPYj38O/DyqeQoZtRiGNQ0Y7mtnEgPyup
-8Dv5U6CoJ8TI5pYnHjz9/fmJtRljiFhThvuHlA3XZZxZVg4KTPqVsN6EfedCbi3I
-Og3URUcaRtsrcfrLmzYKvEb8b66McaKFzkwzom/1hXWgsM3gjAF/hA4uzIYdyo44
-p1hiFeMaMQKBgQDXozw4ROyiMt8NYKVe+3EMDBLQ2lhvARktPJ/otSWrM7Qeak1j
-vhOjdn6yCoOMM15HfIY5ovvZis/+XVVEXlZIEq6Md68Jkk06CrrV+T/d8Ose5bCG
-wZ16BfvKHpngdjV8TALWso013KmuodzlR93WIvHOGXc9QJYSurQUz7qasQKBgBrK
-jC0+AZlLVRQF0zDUpt9wQsjJVUaPYv5HekN62tZ+8WhZSWel/YYyAbxrni/GaHF5
-Z5ruFmTK1bN5HtKjjqYWTixHu65Np6BMwDu01SQm+U3IzKUhp88RnOJW0ZcncFh3
-BfNge5MJ41jBLEGxii5KkYQfnex+yHO9ogM3hAaxAoGBAM8tFeQID9C71X1Kh37+
-SPypUqfUUIsD5zdimOVmfrDxUmzJ8u6BRLIv+nQ1CYxaGbIvv9nTjo8zPmf4QBqF
-XA7L0jr/ahUGtDh9eH4HdzReAMcF3T+a0pi423OjW1l3jiDYZtcwznA2C/0DYKGv
-s2/1z/Pi3w7kVEJsWfMJgMUy
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC8+POkURm9wEey
+mKsg0E23Vqho7WT+f4QEabhXHDPYXst+OII49eZvRnq89bxd/mueZqevF6LqhtMP
+h4IXlKh1KsJKtXdxwb8pGAQ4+Mwllzr/QhjaOsupUMCcjPPqxB3YZrJA0tTzwKa5
+idnY3NqHfxNOXmmmeuOjL2oLpfLuwWQc3uBAssb0Cb+C3XmGb70cNK/GKvuZGVEq
+f0WySElxjPyy8pqXEpYM16Rb0In/PQJPGsUzT8MBzfgYHr06DVW4kitKN7FjcNuW
+2K8RkUJ25yIzT2A5UOlOWwW2YSli7iL0sxZdaxrmSz5I8yqGcegEJAdDgZvW/BB6
+JXmiTjY7AgMBAAECggEAXpmiHpga8srXGakjwgGFZkqTdDOjY2QiirMB/Vm4+pA5
+/q37QtiWyw3VU9MQwV0kMt/hAd4rSIzGC/giP/vDeSQ2r3+4k9ISCFhlhL/IMulK
+N463MkhMvC4YvkYxtU0IQ9TlV140DPJchHVmARJOs5YB7DGHYjgSzRZnV/1zZ+G8
+HxjeBRdKjQBthQSid0CPg+YlT/LgFLuoHyMBH/K3ZOHUpZiCPWPwr3MbA3KM6Zvo
+2lCurzzsaURkqlyd6wrBoZyAKcyQsKRtRAuynAQ1NOEKJPuhYt46vROreHDhsI7G
+qiKypV1kzyLp9oL02gddMHXa/4OF+F3jr0JLGty4gQKBgQDg5AJK9XDm71shqKfi
+zCel2R5Oj9SsvGb4wximIKalCwYN3S2t8lJBu1dOXx1hthcFrexKYbNIdQCompYP
+OYJrXvCWS5Vixn1Qip1f895BUIux0HvG0d5vBmAPeiIF64JXUg66wO8xDzgVjAbM
+ZaNXyqpvW+BgPjhQ2Seo2gyB6QKBgQDXHPtRdIpaFfNDW5jYveDp+FJsBsTMRknf
+V6VLdgKXa2L3BFwNvpvjjfXcVZw+udEiOPv4mnZ/Hv4EZ30BF0J3pL3YnOdkC9ao
+QiiHrJi262Fge8BCtYrXdEBPeCO3xjI/LLZ8Yh4S0TWLyx4X3MtiyAb0z9ueYh0b
+mfQPGnhcgwKBgGHSbDjk2E5rkS1r6lMZ0KkwbyFnKPBWJ0hPvLoOe85QpDqFmegO
+/r6+2bQqZWJqTUtYIu8b3ltHIZk1XM6Uimlf67DEgd9sTXgqQ1hIgMXSXmFjOWP7
+SBDtKf2xd3kxvwlylRug7qpdX1zoAtEH6Ow4KLi1szzJ878fJkQpTMV5AoGAeBz0
+r8z1G8w54xiYRh3wInTIAQ3egXmV8iHMaFBirg4GWouSoxIVlYBFElyzO8sh3YwY
+Ff6Zv+2cRPVMNXm5YXkZkIQ8J/78Q0N9whQ7yZew7eDrv9QCzL9a0YTx3MHMeL4M
+v3NB+5vZ3E70ZLqizmuGhtgxd0StoCXcwzoyjuUCgYArVGS5ZYSlujndoIvmBmJs
+4jSG9sRJmyY/8JGyXKQpEjboAo/0vbT2ld6rvPaDfajzSQbmZHVMtZHxfA2t+3Tg
+IrehJ8U3SgB1GGLDIBs9l0ydIWw4SHbO4LziYXsv7C1apjz/9dIPO0cQLOcdibvP
+xuHjgAc+f0lPvcM4ciTcEg==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha1
index cacb2b7e84946..df2230ab416b5 100644
--- a/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha1
+++ b/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha1
@@ -1 +1 @@
-0FD31772F5F6802195A98A4F06A4F347047BC7EA
\ No newline at end of file
+1025010C4C3F520A1B9AF30416135B30C7DF0E54
\ No newline at end of file
diff --git a/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha256
index f7c96ead3942f..a96c4b6eb25c6 100644
--- a/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha256
+++ b/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha256
@@ -1 +1 @@
-342CA437B1471B4A8D4231604B4D42435E0C0E4E738B498F6FDDEE7F4151F7C4
\ No newline at end of file
+77672A5A46B5D04B099CE011203DE3CB78853AF0F65688E3A575586FC72174CC
\ No newline at end of file
diff --git a/jstests/libs/ocsp_server_intermediate_appended.pem b/jstests/libs/ocsp_server_intermediate_appended.pem
deleted file mode 100644
index 317bb9ecb14b9..0000000000000
--- a/jstests/libs/ocsp_server_intermediate_appended.pem
+++ /dev/null
@@ -1,26 +0,0 @@
-
------BEGIN CERTIFICATE-----
-MIIELzCCAxegAwIBAgIEc3NuKDANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
-UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
-BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwYSW50ZXJt
-ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIwMDQyMTE5MTQ1MloXDTQwMDQyMzE5MTQ1
-MlowgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN
-TmV3IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVs
-MSUwIwYDVQQDDBxTZXJ2ZXIgT0NTUCBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkq
-hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8o7m7QpIMUZ2r6HOmhuqNF25x0odb9Bg
-rSLm7Hvb3WBu6jwWPrrnPerR/nODVEY4Qo7mOclgCsooJx3HaPYPgRYffRQMJ+I5
-lpvsRsBjW7CnS0amz9QcbGnIhMeFU45gCn51CTLPoBJ7hB9F4Z02bOJEMkkXkhtm
-kkiVysUs6po+t2+w8tojOScZdeDUtwfStKJ7Xb9B79Ko3BCcITXJUxDBcqUEJF+E
-v3YQuQg/QKNTO+L39aFFo8WNfuP09txdjT/+T8PZq826ccohRdSrJ5lq1hXmmKXp
-3p6Ut35aE4tjj6KSjDonMkYcvdNHQ0aL2p8x4JjwgwAuNwawTUbYIwIDAQABo4Gv
-MIGsMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMB
-BggrBgEFBQcDAjAdBgNVHQ4EFgQUyC6Gv0rfoato44VsaVig1SmminYwOAYIKwYB
-BQUHAQEELDAqMCgGCCsGAQUFBzABhhxodHRwOi8vbG9jYWxob3N0OjgxMDAvc3Rh
-dHVzMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC
-AQEAogdunlFL04lqVbZyqPvN/5TtrEtM87invrzTYZ8UmT5Q4Kr8mHRsumBuVwDu
-bE+umrPtQVvu0XYqsjmjmOk7hTIK6PFuF6rLQCUBHVXBZggTNKFFBWphQ8odUbPG
-FmOqSlkZAkcNo3dLpxRbfDru2ARxeE2+sRCPWwUZc7utqpLoZ0deuKdDSlA/VcGJ
-5wf0sjmcjvJRRUSYeJcUox4ySL+4WtFu33LhYZKgnrMNegaJ6UyIlwB4ihMyi9sV
-yDlsY+vGqivqqMUw8V6tdUekCYPUlHWXeICqsRIBII+xMzqTv1rXPzNyAvyVYrBi
-hG10rdLfnQWn2vpYKU5b3Vo1yg==
------END CERTIFICATE-----
diff --git a/jstests/libs/optimizer_utils.js b/jstests/libs/optimizer_utils.js
index e4c8b14212258..e0a61618a68e8 100644
--- a/jstests/libs/optimizer_utils.js
+++ b/jstests/libs/optimizer_utils.js
@@ -1,18 +1,27 @@
-load("jstests/libs/analyze_plan.js");
+import {getAggPlanStage, isAggregationPlan} from "jstests/libs/analyze_plan.js";
/**
- * Utility for checking if the query optimizer is enabled.
+ * Utility for checking if the Cascades optimizer code path is enabled (checks framework control).
*/
-function checkCascadesOptimizerEnabled(theDB) {
- const param = theDB.adminCommand({getParameter: 1, featureFlagCommonQueryFramework: 1});
- return param.hasOwnProperty("featureFlagCommonQueryFramework") &&
- param.featureFlagCommonQueryFramework.value;
+export function checkCascadesOptimizerEnabled(theDB) {
+ const val = theDB.adminCommand({getParameter: 1, internalQueryFrameworkControl: 1})
+ .internalQueryFrameworkControl;
+ return val == "tryBonsai" || val == "tryBonsaiExperimental" || val == "forceBonsai";
+}
+
+/**
+ * Utility for checking if the Cascades optimizer feature flag is on.
+ */
+export function checkCascadesFeatureFlagEnabled(theDB) {
+ const featureFlag = theDB.adminCommand({getParameter: 1, featureFlagCommonQueryFramework: 1});
+ return featureFlag.hasOwnProperty("featureFlagCommonQueryFramework") &&
+ featureFlag.featureFlagCommonQueryFramework.value;
}
/**
* Given the result of an explain command, returns whether the bonsai optimizer was used.
*/
-function usedBonsaiOptimizer(explain) {
+export function usedBonsaiOptimizer(explain) {
if (!isAggregationPlan(explain)) {
return explain.queryPlanner.winningPlan.hasOwnProperty("optimizerPlan");
}
@@ -31,7 +40,7 @@ function usedBonsaiOptimizer(explain) {
*
* This is useful for finding the access path part of a plan, typically a PhysicalScan or IndexScan.
*/
-function leftmostLeafStage(node) {
+export function leftmostLeafStage(node) {
for (;;) {
if (node.queryPlanner) {
node = node.queryPlanner;
@@ -55,7 +64,7 @@ function leftmostLeafStage(node) {
/**
* Retrieves the cardinality estimate from a node in explain.
*/
-function extractLogicalCEFromNode(node) {
+export function extractLogicalCEFromNode(node) {
const ce = node.properties.logicalProperties.cardinalityEstimate[0].ce;
assert.neq(ce, null, tojson(node));
return ce;
@@ -64,7 +73,7 @@ function extractLogicalCEFromNode(node) {
/**
* Get a very simplified version of a plan, which only includes nodeType and nesting structure.
*/
-function getPlanSkeleton(node, options = {}) {
+export function getPlanSkeleton(node, options = {}) {
const {extraKeepKeys = [], keepKeysDeep = [], printFilter = false, printLogicalCE = false} =
options;
@@ -111,7 +120,7 @@ function getPlanSkeleton(node, options = {}) {
* This is completely ignorant of the structure of a query: for example if there
* are literals match the predicate, it will also match those.
*/
-function findSubtrees(tree, predicate) {
+export function findSubtrees(tree, predicate) {
let result = [];
const visit = subtree => {
if (typeof subtree === 'object' && subtree != null) {
@@ -133,7 +142,7 @@ function findSubtrees(tree, predicate) {
return result;
}
-function printBound(bound) {
+export function printBound(bound) {
if (!Array.isArray(bound.bound)) {
return [false, ""];
}
@@ -156,7 +165,7 @@ function printBound(bound) {
return [true, result];
}
-function prettyInterval(compoundInterval) {
+export function prettyInterval(compoundInterval) {
// Takes an array of intervals, each one applying to one component of a compound index key.
// Try to format it as a string.
// If either bound is not Constant, return the original JSON unchanged.
@@ -189,7 +198,7 @@ function prettyInterval(compoundInterval) {
return result.trim();
}
-function prettyExpression(expr) {
+export function prettyExpression(expr) {
switch (expr.nodeType) {
case 'Variable':
return expr.name;
@@ -228,7 +237,7 @@ function prettyExpression(expr) {
}
}
-function prettyOp(op) {
+export function prettyOp(op) {
// See src/mongo/db/query/optimizer/syntax/syntax.h, PATHSYNTAX_OPNAMES.
switch (op) {
/* comparison operations */
@@ -280,7 +289,7 @@ function prettyOp(op) {
* Helper function to remove UUIDs of collections in the supplied database from a V1 or V2 optimizer
* explain.
*/
-function removeUUIDsFromExplain(db, explain) {
+export function removeUUIDsFromExplain(db, explain) {
const listCollsRes = db.runCommand({listCollections: 1}).cursor.firstBatch;
let plan = explain.queryPlanner.winningPlan.optimizerPlan.plan.toString();
@@ -291,7 +300,7 @@ function removeUUIDsFromExplain(db, explain) {
return plan;
}
-function navigateToPath(doc, path) {
+export function navigateToPath(doc, path) {
let result;
let field;
@@ -310,15 +319,15 @@ function navigateToPath(doc, path) {
}
}
-function navigateToPlanPath(doc, path) {
+export function navigateToPlanPath(doc, path) {
return navigateToPath(doc, "queryPlanner.winningPlan.optimizerPlan." + path);
}
-function navigateToRootNode(doc) {
+export function navigateToRootNode(doc) {
return navigateToPath(doc, "queryPlanner.winningPlan.optimizerPlan");
}
-function assertValueOnPathFn(value, doc, path, fn) {
+export function assertValueOnPathFn(value, doc, path, fn) {
try {
assert.eq(value, fn(doc, path));
} catch (e) {
@@ -328,15 +337,15 @@ function assertValueOnPathFn(value, doc, path, fn) {
}
}
-function assertValueOnPath(value, doc, path) {
+export function assertValueOnPath(value, doc, path) {
assertValueOnPathFn(value, doc, path, navigateToPath);
}
-function assertValueOnPlanPath(value, doc, path) {
+export function assertValueOnPlanPath(value, doc, path) {
assertValueOnPathFn(value, doc, path, navigateToPlanPath);
}
-function runWithParams(keyValPairs, fn) {
+export function runWithParams(keyValPairs, fn) {
let prevVals = [];
try {
@@ -371,7 +380,7 @@ function runWithParams(keyValPairs, fn) {
}
}
-function round2(n) {
+export function round2(n) {
return (Math.round(n * 100) / 100);
}
@@ -379,7 +388,7 @@ function round2(n) {
* Force cardinality estimation mode: "histogram", "heuristic", or "sampling". We need to force the
* use of the new optimizer.
*/
-function forceCE(mode) {
+export function forceCE(mode) {
assert.commandWorked(
db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "forceBonsai"}));
assert.commandWorked(
diff --git a/jstests/libs/os_helpers.js b/jstests/libs/os_helpers.js
index 60a52a31eccf2..0182bd21ec70a 100644
--- a/jstests/libs/os_helpers.js
+++ b/jstests/libs/os_helpers.js
@@ -6,19 +6,93 @@ function isLinux() {
return getBuildInfo().buildEnvironment.target_os == "linux";
}
-function isRHEL8() {
- if (_isWindows()) {
- return false;
+function isMacOS() {
+ return getBuildInfo().buildEnvironment.target_os == "macOS";
+}
+
+// See "man 5 os-release" for documentation
+function readOsRelease() {
+ try {
+ const os_release = cat("/etc/os-release");
+
+ let lines = os_release.split("\n");
+
+ let tags = {};
+
+ for (let line of lines) {
+ let vp = line.replaceAll("\"", "").split("=");
+ tags[vp[0]] = vp[1];
+ }
+
+ return tags;
+ } catch {
+ // ignore
}
+ assert(!isLinux(), "Linux hosts should always have /etc/os-release.");
+
+ return {};
+}
+
+/**
+ * Check if Linux OS is given identifier. Identifiers are always lower case strings.
+ *
+ * @param {string} distro ID of the distro in os-release
+ * @returns
+ */
+function isDistro(distro) {
+ let tags = readOsRelease();
+ return tags.hasOwnProperty("ID") && tags["ID"] === distro;
+}
+
+/**
+ * Check if Linux OS is given identifier and specific version. Do not use for matching major
+ * versions like RHEL 8, isRHELMajorVerison.
+ *
+ * @param {string} distro ID of the distro in os-release
+ * @returns
+ */
+function isDistroVersion(distro, version) {
+ let tags = readOsRelease();
+ return tags.hasOwnProperty("ID") && tags["ID"] === distro &&
+ tags.hasOwnProperty("VERSION_ID") && tags["VERSION_ID"] === version;
+}
+
+/**
+ * Is it RHEL and is it 7, 8 or 9?
+ * @param {string} majorVersion
+ * @returns True if majorVersion = 8 and version is 8.1, 8.2 etc.
+ */
+function isRHELMajorVerison(majorVersion) {
+ let tags = readOsRelease();
+ return tags.hasOwnProperty("ID") && tags["ID"] === "rhel" &&
+ tags.hasOwnProperty("VERSION_ID") && tags["VERSION_ID"].startsWith(majorVersion);
+}
+
+/**
+ * Example
+NAME="Red Hat Enterprise Linux"
+VERSION="8.7 (Ootpa)"
+ID="rhel"
+ID_LIKE="fedora"
+VERSION_ID="8.7"
+PLATFORM_ID="platform:el8"
+PRETTY_NAME="Red Hat Enterprise Linux 8.7 (Ootpa)"
+ANSI_COLOR="0;31"
+CPE_NAME="cpe:/o:redhat:enterprise_linux:8::baseos"
+HOME_URL="https://www.redhat.com/"
+DOCUMENTATION_URL="https://access.redhat.com/documentation/red_hat_enterprise_linux/8/"
+BUG_REPORT_URL="https://bugzilla.redhat.com/"
+
+REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 8"
+REDHAT_BUGZILLA_PRODUCT_VERSION=8.7
+REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
+REDHAT_SUPPORT_PRODUCT_VERSION="8.7"
+ */
+function isRHEL8() {
// RHEL 8 disables TLS 1.0 and TLS 1.1 as part their default crypto policy
// We skip tests on RHEL 8 that require these versions as a result.
- const grep_result = runProgram('grep', 'Ootpa', '/etc/redhat-release');
- if (grep_result == 0) {
- return true;
- }
-
- return false;
+ return isRHELMajorVerison("8");
}
function isSUSE15SP1() {
@@ -38,69 +112,100 @@ function isSUSE15SP1() {
}
function isUbuntu() {
- if (_isWindows()) {
- return false;
- }
-
// Ubuntu 18.04 and later compiles openldap against gnutls which does not
// support SHA1 signed certificates. ldaptest.10gen.cc uses a SHA1 cert.
- const grep_result = runProgram('grep', 'ID=ubuntu', '/etc/os-release');
- if (grep_result == 0) {
- return true;
- }
-
- return false;
+ return isDistro("ubuntu");
}
+/**
+ * Example:
+NAME="Ubuntu"
+VERSION="18.04.6 LTS (Bionic Beaver)"
+ID=ubuntu
+ID_LIKE=debian
+PRETTY_NAME="Ubuntu 18.04.6 LTS"
+VERSION_ID="18.04"
+HOME_URL="https://www.ubuntu.com/"
+SUPPORT_URL="https://help.ubuntu.com/"
+BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
+PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
+VERSION_CODENAME=bionic
+UBUNTU_CODENAME=bionic
+ */
function isUbuntu1804() {
- if (_isWindows()) {
- return false;
- }
-
// Ubuntu 18.04's TLS 1.3 implementation has an issue with OCSP stapling. We have disabled
// stapling on this build variant, so we need to ensure that tests that require stapling
// do not run on this machine.
- const grep_result = runProgram('grep', 'bionic', '/etc/os-release');
- if (grep_result === 0) {
- return true;
- }
-
- return false;
+ return isDistroVersion("ubuntu", "18.04");
}
function isUbuntu2004() {
- if (_isWindows()) {
- return false;
- }
-
// Ubuntu 20.04 disables TLS 1.0 and TLS 1.1 as part their default crypto policy
// We skip tests on Ubuntu 20.04 that require these versions as a result.
- const grep_result = runProgram('grep', 'focal', '/etc/os-release');
- if (grep_result == 0) {
- return true;
- }
+ return isDistroVersion("ubuntu", "20.04");
+}
- return false;
+/**
+ * Example:
+PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"
+NAME="Debian GNU/Linux"
+VERSION_ID="12"
+VERSION="12 (bookworm)"
+VERSION_CODENAME=bookworm
+ID=debian
+HOME_URL="https://www.debian.org/"
+SUPPORT_URL="https://www.debian.org/support"
+BUG_REPORT_URL="https://bugs.debian.org/"
+ */
+function isDebian() {
+ return isDistro("debian");
}
-function isDebian10() {
- if (_isWindows()) {
- return false;
- }
+/**
+ * Example:
+NAME="Fedora Linux"
+VERSION="38 (Workstation Edition)"
+ID=fedora
+VERSION_ID=38
+VERSION_CODENAME=""
+PLATFORM_ID="platform:f38"
+PRETTY_NAME="Fedora Linux 38 (Workstation Edition)"
+ANSI_COLOR="0;38;2;60;110;180"
+LOGO=fedora-logo-icon
+CPE_NAME="cpe:/o:fedoraproject:fedora:38"
+DEFAULT_HOSTNAME="fedora"
+HOME_URL="https://fedoraproject.org/"
+DOCUMENTATION_URL="https://docs.fedoraproject.org/en-US/fedora/f38/system-administrators-guide/"
+SUPPORT_URL="https://ask.fedoraproject.org/"
+BUG_REPORT_URL="https://bugzilla.redhat.com/"
+REDHAT_BUGZILLA_PRODUCT="Fedora"
+REDHAT_BUGZILLA_PRODUCT_VERSION=38
+REDHAT_SUPPORT_PRODUCT="Fedora"
+REDHAT_SUPPORT_PRODUCT_VERSION=38
+SUPPORT_END=2024-05-14
+VARIANT="Workstation Edition"
+VARIANT_ID=workstation
+ */
+function isFedora() {
+ return isDistro("fedora");
+}
- // Debian 10 disables TLS 1.0 and TLS 1.1 as part their default crypto policy
- // We skip tests on Debian 10 that require these versions as a result.
- try {
- // this file exists on systemd-based systems, necessary to avoid mischaracterizing debian
- // derivatives as stock debian
- const releaseFile = cat("/etc/os-release").toLowerCase();
- const prettyName = releaseFile.split('\n').find(function(line) {
- return line.startsWith("pretty_name");
- });
- return prettyName.includes("debian") &&
- (prettyName.includes("10") || prettyName.includes("buster") ||
- prettyName.includes("bullseye"));
- } catch (e) {
- return false;
- }
+/**
+ * Note: Amazon 2022 was never released for production. It became Amazon 2023.
+ *
+ * Example:
+NAME="Amazon Linux"
+VERSION="2022"
+ID="amzn"
+ID_LIKE="fedora"
+VERSION_ID="2022"
+PLATFORM_ID="platform:al2022"
+PRETTY_NAME="Amazon Linux 2022"
+ANSI_COLOR="0;33"
+CPE_NAME="cpe:2.3:o:amazon:amazon_linux:2022"
+HOME_URL="https://aws.amazon.com/linux/"
+BUG_REPORT_URL="https://github.com/amazonlinux/amazon-linux-2022"
+*/
+function isAmazon2023() {
+ return isDistroVersion("amzn", "2022") || isDistroVersion("amzn", "2023");
}
diff --git a/jstests/libs/override_methods/check_metadata_consistency.js b/jstests/libs/override_methods/check_metadata_consistency.js
index 9418d098442be..46e8651f797bc 100644
--- a/jstests/libs/override_methods/check_metadata_consistency.js
+++ b/jstests/libs/override_methods/check_metadata_consistency.js
@@ -1,6 +1,4 @@
-'use strict';
-
-load('jstests/libs/check_metadata_consistency_helpers.js'); // For MetadataConsistencyChecker
+import {MetadataConsistencyChecker} from "jstests/libs/check_metadata_consistency_helpers.js";
ShardingTest.prototype.checkMetadataConsistency = function() {
if (jsTest.options().skipCheckMetadataConsistency) {
diff --git a/jstests/libs/override_methods/check_routing_table_consistency.js b/jstests/libs/override_methods/check_routing_table_consistency.js
index c9a47714132e5..0a51e0cc640e4 100644
--- a/jstests/libs/override_methods/check_routing_table_consistency.js
+++ b/jstests/libs/override_methods/check_routing_table_consistency.js
@@ -1,6 +1,6 @@
-'use strict';
-
-load('jstests/libs/check_routing_table_consistency_helpers.js');
+import {
+ RoutingTableConsistencyChecker
+} from "jstests/libs/check_routing_table_consistency_helpers.js";
ShardingTest.prototype.checkRoutingTableConsistency = function() {
if (jsTest.options().skipCheckRoutingTableConsistency) {
diff --git a/jstests/libs/override_methods/crud_ops_as_bulkWrite.js b/jstests/libs/override_methods/crud_ops_as_bulkWrite.js
new file mode 100644
index 0000000000000..2b597788f3d82
--- /dev/null
+++ b/jstests/libs/override_methods/crud_ops_as_bulkWrite.js
@@ -0,0 +1,431 @@
+/**
+ * Overrides the runCommand method to convert specified CRUD ops into bulkWrite commands.
+ * Converts the bulkWrite responses into the original CRUD response.
+ */
+(function() {
+'use strict';
+
+let originalRunCommand = Mongo.prototype.runCommand;
+
+const commandsToBulkWriteOverride = new Set(["insert", "update", "delete", "findandmodify"]);
+
+const commandsToAlwaysFlushBulkWrite = new Set([
+ "aggregate",
+ "mapreduce",
+ "authenticate",
+ "logout",
+ "applyops",
+ "checkshardingindex",
+ "cleanuporphaned",
+ "cleanupreshardcollection",
+ "commitreshardcollection",
+ "movechunk",
+ "moveprimary",
+ "moverange",
+ "mergechunks",
+ "refinecollectionshardkey",
+ "split",
+ "splitvector",
+ "killallsessions",
+ "killallsessionsbypattern",
+ "dropconnections",
+ "filemd5",
+ "fsync",
+ "fsyncunlock",
+ "killop",
+ "setfeaturecompatibilityversion",
+ "shutdown",
+ "currentop",
+ "listdatabases",
+ "listcollections",
+ "committransaction",
+ "aborttransaction",
+ "preparetransaction",
+ "endsessions",
+ "killsessions"
+]);
+
+let numOpsPerResponse = [];
+let nsInfos = [];
+let bufferedOps = [];
+let letObj = null;
+let ordered = true;
+let bypassDocumentValidation = null;
+const maxBatchSize = 5;
+
+function resetBulkWriteBatch() {
+ numOpsPerResponse = [];
+ nsInfos = [];
+ bufferedOps = [];
+ letObj = null;
+ bypassDocumentValidation = null;
+ ordered = true;
+}
+
+function checkNamespaceStoredInBufferedOps(ns) {
+ return nsInfos.findIndex((element) => element.ns == ns) != -1;
+}
+
+function getLetFromCommand(cmdObj) {
+ if (cmdObj.hasOwnProperty("updates")) {
+ if (cmdObj.updates[0].hasOwnProperty("let")) {
+ return cmdObj.updates[0].let;
+ }
+ } else if (cmdObj.hasOwnProperty("deletes")) {
+ if (cmdObj.deletes[0].hasOwnProperty("let")) {
+ return cmdObj.updates[0].let;
+ }
+ } else if (cmdObj.hasOwnProperty("let")) {
+ return cmdObj.let;
+ }
+ return null;
+}
+
+function opCompatibleWithCurrentBatch(cmdObj) {
+ if (numOpsPerResponse.length >= maxBatchSize) {
+ return false;
+ }
+
+ // If bypassDocumentValidation is not set we can continue. If the stored
+ // bypassDocumentValidation and the command bypassDocumentValidation are the same we can
+ // continue.
+ let cmdBypassDocumentValidation = cmdObj.hasOwnProperty("bypassDocumentValidation") &&
+ (cmdObj.bypassDocumentValidation == true);
+ if (bypassDocumentValidation != null &&
+ (cmdBypassDocumentValidation != bypassDocumentValidation)) {
+ return false;
+ }
+
+ const currentCmdLet = getLetFromCommand(cmdObj);
+
+ // If 'letObj' is null then we can always continue. If 'letObj' is not null and cmdObj.let is
+ // then we can always continue. If both objects are not null and they are the same we can
+ // continue.
+ if (letObj != null && currentCmdLet != null && 0 === bsonWoCompare(letObj, currentCmdLet)) {
+ return false;
+ }
+
+ // If saved ordered is false or the incoming ordered is false we must flush the batch.
+ let newOrdered = cmdObj.hasOwnProperty("ordered") ? cmdObj.ordered : true;
+ if (!ordered || !newOrdered) {
+ return false;
+ }
+
+ return true;
+}
+
+function flushCurrentBulkWriteBatch(options) {
+ if (bufferedOps.length == 0) {
+ return {};
+ }
+
+ // Should not be possible to reach if bypassDocumentValidation is not set.
+ assert(bypassDocumentValidation != null);
+
+ let bulkWriteCmd = {
+ "bulkWrite": 1,
+ "ops": bufferedOps,
+ "nsInfo": nsInfos,
+ "ordered": (ordered != null) ? ordered : true,
+ "bypassDocumentValidation": bypassDocumentValidation,
+ };
+
+ if (letObj != null) {
+ bulkWriteCmd["let"] = letObj;
+ }
+
+ let resp = {};
+ resp = originalRunCommand.apply(this, ["admin", bulkWriteCmd, options]);
+
+ let response = convertBulkWriteResponse(bulkWriteCmd, resp);
+ let finalResponse = response;
+
+ let expectedResponseLength = numOpsPerResponse.length;
+
+ // Retry on ordered:true failures by re-running subset of original bulkWrite command.
+ while (finalResponse.length != expectedResponseLength) {
+ // Need to figure out how many ops we need to subset out. Every entry in numOpsPerResponse
+ // represents a number of bulkWrite ops that correspond to an initial CRUD op. We need to
+ // make sure we split at a CRUD op boundary in the bulkWrite.
+ for (let i = 0; i < response.length; i++) {
+ let target = numOpsPerResponse.shift();
+ for (let j = 0; j < target; j++) {
+ bufferedOps.shift();
+ }
+ }
+ bulkWriteCmd.ops = bufferedOps;
+
+ resp = originalRunCommand.apply(this, ["admin", bulkWriteCmd, options]);
+ response = convertBulkWriteResponse(bulkWriteCmd, resp);
+ finalResponse = finalResponse.concat(response);
+ }
+
+ resetBulkWriteBatch();
+ return response;
+}
+
+function processFindAndModifyResponse(current, isRemove, resp) {
+ // findAndModify will only ever be a single op so we can freely replace
+ // the existing response.
+ resp = {ok: 1, value: null};
+ if (current.hasOwnProperty("value")) {
+ resp["value"] = current.value;
+ }
+ let lastErrorObject = {};
+ lastErrorObject["n"] = current.n;
+ if (current.hasOwnProperty("upserted")) {
+ lastErrorObject["upserted"] = current.upserted._id;
+ }
+ if (!isRemove) {
+ lastErrorObject["updatedExisting"] = current.nModified != 0;
+ }
+ resp["lastErrorObject"] = lastErrorObject;
+ return resp;
+}
+
+function initializeResponse(op) {
+ if (op.hasOwnProperty("update")) {
+ // Update always has nModified field set.
+ return {"n": 0, "nModified": 0, "ok": 1};
+ }
+ return {"n": 0, "ok": 1};
+}
+
+/**
+ * The purpose of this function is to take a server response from a bulkWrite command and to
+ * transform it to an array of responses for the corresponding CRUD commands that make up the
+ * bulkWrite.
+ *
+ * 'cmd' is the bulkWrite that was executed to generate the response
+ * 'orig' is the bulkWrite command response
+ */
+function convertBulkWriteResponse(cmd, bulkWriteResponse) {
+ let responses = [];
+ if (bulkWriteResponse.ok == 1) {
+ let cursorIdx = 0;
+ for (let numOps of numOpsPerResponse) {
+ let num = 0;
+ let resp = initializeResponse(cmd.ops[cursorIdx]);
+ while (num < numOps) {
+ if (cursorIdx >= bulkWriteResponse.cursor.firstBatch.length) {
+ // this can happen if the bulkWrite encountered an error processing
+ // an op with ordered:true set. This means we have no more op responses
+ // left to process so push the current response we were building and
+ // return.
+ // If the last response has writeErrors set then it was in the middle of an op
+ // otherwise we are beginning a new op response and should not push it.
+ if (resp.writeErrors) {
+ responses.push(resp);
+ }
+ return responses;
+ }
+
+ let current = bulkWriteResponse.cursor.firstBatch[cursorIdx];
+
+ // findAndModify returns have a different format. Detect findAndModify
+ // by the precense of 'return' field in the op.
+ if (cmd.ops[cursorIdx].hasOwnProperty("return")) {
+ resp = processFindAndModifyResponse(
+ current, cmd.ops[cursorIdx].hasOwnProperty("delete"), resp);
+ } else {
+ if (current.ok == 0) {
+ // Normal write contains an error.
+ if (!resp.hasOwnProperty("writeErrors")) {
+ resp["writeErrors"] = [];
+ }
+ let writeError = {index: num, code: current.code, errmsg: current.errmsg};
+ resp["writeErrors"].push(writeError);
+ } else {
+ resp.n += current.n;
+ if (current.hasOwnProperty("nModified")) {
+ resp.nModified += current.nModified;
+ }
+ if (current.hasOwnProperty("upserted")) {
+ if (!resp.hasOwnProperty("upserted")) {
+ resp["upserted"] = [];
+ }
+ resp["upserted"].push(current.upserted);
+ }
+ }
+ }
+ cursorIdx += 1;
+ num += 1;
+ }
+ responses.push(resp);
+ }
+ }
+ return responses;
+}
+
+function getNsInfoIdx(nsInfoEntry) {
+ let idx = nsInfos.findIndex((element) => element.ns == nsInfoEntry);
+ if (idx == -1) {
+ idx = nsInfos.length;
+ nsInfos.push({ns: nsInfoEntry});
+ }
+ return idx;
+}
+
+function processInsertOp(nsInfoIdx, doc) {
+ return {insert: nsInfoIdx, document: doc};
+}
+
+function processUpdateOp(nsInfoIdx, cmdObj, update) {
+ let op = {
+ "update": nsInfoIdx,
+ "filter": update.q,
+ "updateMods": update.u,
+ "multi": update.multi ? update.multi : false,
+ "upsert": update.upsert ? update.upsert : false,
+ };
+
+ ["arrayFilters", "collation", "hint", "sampleId"].forEach(property => {
+ if (cmdObj.hasOwnProperty(property)) {
+ op[property] = cmdObj[property];
+ }
+ });
+
+ if (update.hasOwnProperty("let")) {
+ letObj = update.let;
+ }
+
+ return op;
+}
+
+function processDeleteOp(nsInfoIdx, cmdObj, deleteCmd) {
+ let op = {
+ "delete": nsInfoIdx,
+ "filter": deleteCmd.q,
+ "multi": deleteCmd.limit ? deleteCmd.limit == 0 : false
+ };
+
+ ["collation", "hint", "sampleId"].forEach(property => {
+ if (cmdObj.hasOwnProperty(property)) {
+ op[property] = cmdObj[property];
+ }
+ });
+
+ if (deleteCmd.hasOwnProperty("let")) {
+ letObj = deleteCmd.let;
+ }
+
+ return op;
+}
+
+function processFindAndModifyOp(nsInfoIdx, cmdObj) {
+ let op = {};
+
+ if (cmdObj.hasOwnProperty("remove") && (cmdObj.remove == true)) {
+ // is delete.
+ op["delete"] = nsInfoIdx;
+ op["return"] = true;
+ } else {
+ // is update.
+ op["update"] = nsInfoIdx;
+ op["updateMods"] = cmdObj.update;
+ op["return"] = cmdObj.new ? "post" : "pre";
+ if (cmdObj.hasOwnProperty("upsert")) {
+ op["upsert"] = cmdObj.upsert;
+ }
+ if (cmdObj.hasOwnProperty("arrayFilters")) {
+ op["arrayFilters"] = cmdObj.arrayFilters;
+ }
+ }
+
+ op["filter"] = cmdObj.query;
+
+ ["sort", "collation", "hint", "sampleId"].forEach(property => {
+ if (cmdObj.hasOwnProperty(property)) {
+ op[property] = cmdObj[property];
+ }
+ });
+
+ if (cmdObj.hasOwnProperty("fields")) {
+ op["returnFields"] = cmdObj.fields;
+ }
+
+ if (cmdObj.hasOwnProperty("let")) {
+ letObj = cmdObj.let;
+ }
+
+ return op;
+}
+
+Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
+ /**
+ * After SERVER-76660 this function will be used to direct a command to 2 different clusters.
+ * The main cluster will always execute originalRunCommand and the second will follow the
+ * current execution path below and their responses will be compared (if the bulkWrite path
+ * executed anything).
+ */
+
+ let cmdName = Object.keys(cmdObj)[0].toLowerCase();
+ if (commandsToBulkWriteOverride.has(cmdName)) {
+ let response = {};
+ if (!opCompatibleWithCurrentBatch(cmdObj)) {
+ response = flushCurrentBulkWriteBatch.apply(this, [options]);
+ }
+
+ // Set bypassDocumentValidation if necessary.
+ if (bypassDocumentValidation == null) {
+ bypassDocumentValidation = cmdObj.hasOwnProperty("bypassDocumentValidation")
+ ? cmdObj.bypassDocumentValidation
+ : false;
+ }
+
+ ordered = cmdObj.hasOwnProperty("ordered") ? cmdObj.ordered : true;
+
+ let nsInfoEntry = dbName + "." + cmdObj[cmdName];
+ let nsInfoIdx = getNsInfoIdx(nsInfoEntry);
+
+ let numOps = 0;
+
+ // Is insert
+ if (cmdName === "insert") {
+ assert(cmdObj.documents);
+ for (let doc of cmdObj.documents) {
+ bufferedOps.push(processInsertOp(nsInfoIdx, doc));
+ numOps += 1;
+ }
+ } else if (cmdName === "update") {
+ assert(cmdObj.updates);
+ for (let update of cmdObj.updates) {
+ bufferedOps.push(processUpdateOp(nsInfoIdx, cmdObj, update));
+ numOps += 1;
+ }
+ } else if (cmdName === "delete") {
+ assert(cmdObj.deletes);
+ for (let deleteCmd of cmdObj.deletes) {
+ bufferedOps.push(processDeleteOp(nsInfoIdx, cmdObj, deleteCmd));
+ numOps += 1;
+ }
+ } else if (cmdName === "findandmodify") {
+ bufferedOps.push(processFindAndModifyOp(nsInfoIdx, cmdObj));
+ numOps += 1;
+ } else {
+ throw new Error("Unrecognized command in bulkWrite override");
+ }
+
+ numOpsPerResponse.push(numOps);
+
+ return response;
+ } else if (commandsToAlwaysFlushBulkWrite.has(cmdName)) {
+ flushCurrentBulkWriteBatch.apply(this, [options]);
+ } else {
+ // Commands which are selectively allowed. If they are operating on a namespace which we
+ // have stored in our buffered ops then we will flush, if not then we allow the command to
+ // execute normally.
+ if (typeof cmdObj[cmdName] === 'string') {
+ // Should be the collection that the command is operating on, can make full namespace.
+ const ns = dbName + "." + cmdObj[cmdName];
+ if (checkNamespaceStoredInBufferedOps(ns)) {
+ flushCurrentBulkWriteBatch.apply(this, [options]);
+ }
+ }
+ // Otherwise is an always allowed command (like `isMaster`).
+ }
+
+ // Not a bulkWrite supported CRUD op, execute the command unmodified.
+ return originalRunCommand.apply(this, arguments);
+};
+})();
diff --git a/jstests/libs/override_methods/golden_overrides.js b/jstests/libs/override_methods/golden_overrides.js
new file mode 100644
index 0000000000000..51f562b74ba11
--- /dev/null
+++ b/jstests/libs/override_methods/golden_overrides.js
@@ -0,0 +1,24 @@
+// Override print to output to both stdout and the golden file.
+// This affects everything that uses print: printjson, jsTestLog, etc.
+globalThis.print = (() => {
+ const original = globalThis.print;
+ return function print(...args) {
+ // Imitate GlobalInfo::Functions::print::call.
+ let str = args.map(a => a == null ? '[unknown type]' : a).join(' ');
+
+ // Make sure each print() call ends in a newline.
+ //
+ // From manual testing, it seems (print('a'), print('b')) behaves the same as
+ // (print('a\n'), print('b\n')); that behavior must be to ensure each print call appears on
+ // its own line for readability. In the context of golden testing, we want to match that
+ // behavior, and this also ensures the test output is a proper text file
+ // (newline-terminated).
+ if (str.slice(-1) !== '\n') {
+ str += '\n';
+ }
+
+ _writeGoldenData(str);
+
+ return original(...args);
+ };
+})();
diff --git a/jstests/libs/override_methods/implicit_v1_resume_token_changestreams.js b/jstests/libs/override_methods/implicit_v1_resume_token_changestreams.js
deleted file mode 100644
index b0fc5dc0de82e..0000000000000
--- a/jstests/libs/override_methods/implicit_v1_resume_token_changestreams.js
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Loading this file overrides 'runCommand' with a function that modifies any $changeStream
- * aggregation to use $_generateV2ResumeTokens:false.
- */
-(function() {
-"use strict";
-
-load("jstests/libs/override_methods/override_helpers.js"); // For 'OverrideHelpers'.
-
-// Override runCommand to set $_generateV2ResumeTokens on all $changeStreams.
-function runCommandV1Tokens(conn, dbName, cmdName, cmdObj, originalRunCommand, makeRunCommandArgs) {
- if (OverrideHelpers.isAggregationWithChangeStreamStage(cmdName, cmdObj)) {
- // Make a copy to avoid mutating the user's original command object.
- cmdObj = Object.assign({}, cmdObj, {$_generateV2ResumeTokens: false});
- }
- return originalRunCommand.apply(conn, makeRunCommandArgs(cmdObj));
-}
-
-// Always apply the override if a test spawns a parallel shell.
-OverrideHelpers.prependOverrideInParallelShell(
- "jstests/libs/override_methods/implicit_v1_resume_token_changestreams.js");
-
-// Override the default runCommand with our custom version.
-OverrideHelpers.overrideRunCommand(runCommandV1Tokens);
-})();
\ No newline at end of file
diff --git a/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js b/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js
index 15d86e21abb2a..c9c67b25ab491 100644
--- a/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js
+++ b/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js
@@ -65,4 +65,4 @@ DB.prototype.watch = function(pipeline, options) {
pipeline = Object.assign([], pipeline);
pipeline.unshift(ChangeStreamPassthroughHelpers.nsMatchFilter(this, 1));
return this.getMongo().watch(pipeline, options);
-};
\ No newline at end of file
+};
diff --git a/jstests/libs/override_methods/implicit_whole_db_changestreams.js b/jstests/libs/override_methods/implicit_whole_db_changestreams.js
index a9cfd81505483..3b6204ef55568 100644
--- a/jstests/libs/override_methods/implicit_whole_db_changestreams.js
+++ b/jstests/libs/override_methods/implicit_whole_db_changestreams.js
@@ -166,4 +166,4 @@ DB.prototype.runCommand = function(cmdObj, extra, queryOptions, noPassthrough) {
this._runCommandImpl = (noPassthrough ? originalRunCommandImpl : passthroughRunCommandImpl);
return originalRunCommand.apply(this, [cmdObj, extra, queryOptions]);
};
-}());
\ No newline at end of file
+}());
diff --git a/jstests/libs/override_methods/implicitly_configure_query_analyzer.js b/jstests/libs/override_methods/implicitly_configure_query_analyzer.js
index 8b81df69efa5c..f407b81b30dcc 100644
--- a/jstests/libs/override_methods/implicitly_configure_query_analyzer.js
+++ b/jstests/libs/override_methods/implicitly_configure_query_analyzer.js
@@ -12,7 +12,7 @@ load("jstests/libs/override_methods/override_helpers.js"); // For 'OverrideHelp
load("jstests/libs/override_methods/shard_collection_util.js");
const kShardProbability = 0.5;
-const kSampleRate = 1000; // per second.
+const kSamplesPerSecond = 1000; // per second.
// Save a reference to the original methods in the IIFE's scope.
// This scoping allows the original methods to be called by the overrides below.
@@ -37,8 +37,8 @@ function configureQueryAnalyzer({db, collName}) {
let result;
try {
- result =
- db.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: kSampleRate});
+ result = db.adminCommand(
+ {configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: kSamplesPerSecond});
} catch (e) {
print(`Failed to configure query analyzer: ${tojsononeline({ns, e})}`);
if (!isNetworkError(e)) {
@@ -47,8 +47,11 @@ function configureQueryAnalyzer({db, collName}) {
}
if (!result.ok) {
if (result.code === ErrorCodes.CommandNotFound ||
- result.code === ErrorCodes.NamespaceNotFound) {
+ result.code === ErrorCodes.NamespaceNotFound ||
+ result.code === ErrorCodes.CommandNotSupportedOnView ||
+ result.code === ErrorCodes.IllegalOperation) {
print(`Failed to configure query analyzer: ${tojsononeline({ns, result})}`);
+ return;
}
assert.commandWorked(result);
}
diff --git a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
index cd765a33754c9..3f3baff00460b 100644
--- a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
+++ b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
@@ -32,30 +32,6 @@ DB.prototype.createCollection = function() {
return createCollResult;
}
- // We check feature flags on both primary and secondaries in case a step down happens after this
- // check.
- const featureResults = FixtureHelpers
- .runCommandOnAllShards({
- db: this.getSiblingDB('admin'),
- cmdObj: {getParameter: 1, featureFlagShardedTimeSeries: 1}
- })
- .map(result => assert.commandWorked(result));
-
- // The feature can only be used if the version associated with the feature is greater than or
- // equal to the FCV version. The getParameter does not consider the FCV value when checking for
- // whether the feature flag is enabled. So we run an additional getParameter command to fetch
- // the FCV state.
- const fcvResult =
- assert.commandWorked(FixtureHelpers.getPrimaryForNodeHostingDatabase(this).adminCommand(
- {getParameter: 1, featureCompatibilityVersion: 1}));
- const isTimeseriesShardingEnabled = featureResults.every(
- result => result.featureFlagShardedTimeSeries.value &&
- MongoRunner.compareBinVersions(fcvResult.featureCompatibilityVersion.version,
- result.featureFlagShardedTimeSeries.version) >= 0);
- if (!isTimeseriesShardingEnabled) {
- return createCollResult;
- }
-
const timeField = arguments[1]["timeseries"]["timeField"];
ShardingOverrideCommon.shardCollectionWithSpec({
db: this,
@@ -85,6 +61,11 @@ DB.prototype.getCollection = function() {
try {
TestData.doNotOverrideReadPreference = true;
collStats = this.runCommand({collStats: collection.getName()});
+ if (!collStats.ok && collStats.codeName == "CommandNotSupportedOnView") {
+ // In case we catch CommandNotSupportedOnView it means the collection was actually a
+ // view and should be returned without attempting to shard it (which is not allowed)
+ return collection;
+ }
} finally {
TestData.doNotOverrideReadPreference = testDataDoNotOverrideReadPreferenceOriginal;
}
diff --git a/jstests/libs/override_methods/inject_dollar_tenant.js b/jstests/libs/override_methods/inject_dollar_tenant.js
index a336d8d20238d..66d318c826b32 100644
--- a/jstests/libs/override_methods/inject_dollar_tenant.js
+++ b/jstests/libs/override_methods/inject_dollar_tenant.js
@@ -23,7 +23,7 @@ function runCommandWithDollarTenant(
let res = originalRunCommand.apply(conn, makeRunCommandArgs(cmdToRun));
const prefixedDbName = kTenantId + "_" + dbName;
- assertExpectedDbNameInResponse(res, dbName, prefixedDbName);
+ assertExpectedDbNameInResponse(res, dbName, prefixedDbName, tojsononeline(res));
updateDbNamesInResponse(res, dbName, prefixedDbName);
return res;
}
diff --git a/jstests/libs/override_methods/inject_security_token.js b/jstests/libs/override_methods/inject_security_token.js
index 813384be6138a..4e7f8bf823bff 100644
--- a/jstests/libs/override_methods/inject_security_token.js
+++ b/jstests/libs/override_methods/inject_security_token.js
@@ -60,7 +60,6 @@ function prepareSecurityToken(conn) {
const kCmdsAllowedWithSecurityToken = new Set([
`abortTransaction`,
`aggregate`,
- `availableQueryOptions`,
`buildinfo`,
`buildinfo`,
`collMod`,
@@ -137,7 +136,7 @@ function runCommandWithResponseCheck(
let res = originalRunCommand.apply(conn, makeRunCommandArgs(cmdObj));
const prefixedDbName = kTenantId + "_" + dbName;
- assertExpectedDbNameInResponse(res, dbName, prefixedDbName);
+ assertExpectedDbNameInResponse(res, dbName, prefixedDbName, tojsononeline(res));
updateDbNamesInResponse(res, dbName, prefixedDbName);
return res;
}
diff --git a/jstests/libs/override_methods/inject_tenant_prefix.js b/jstests/libs/override_methods/inject_tenant_prefix.js
index 4e55802487726..9a91d6558474d 100644
--- a/jstests/libs/override_methods/inject_tenant_prefix.js
+++ b/jstests/libs/override_methods/inject_tenant_prefix.js
@@ -106,6 +106,11 @@ function prependTenantIdToDbNameIfApplicable(dbName) {
return dbName;
}
+ if (extractOriginalDbName(dbName) !== dbName) {
+ // dbName already has a tenantId prefix
+ return dbName;
+ }
+
let prefix;
// If running shard split passthroughs, then assign a database to a randomly selected tenant
if (usingMultipleTenants()) {
@@ -320,6 +325,19 @@ function extractTenantMigrationError(resObj, errorCode) {
}
}
}
+
+ // BulkWrite command has errors contained in a cursor response. The error will always be
+ // in the first batch of the cursor response since getMore is not allowed to run with
+ // tenant migration / shard merge suites.
+ if (resObj.cursor) {
+ if (resObj.cursor.firstBatch) {
+ for (let opRes of resObj.cursor.firstBatch) {
+ if (opRes.code && opRes.code == errorCode) {
+ return {code: opRes.code, errmsg: opRes.errmsg};
+ }
+ }
+ }
+ }
return null;
}
@@ -391,6 +409,15 @@ function modifyCmdObjForRetry(cmdObj, resObj) {
}
cmdObj.deletes = retryOps;
}
+
+ if (cmdObj.bulkWrite) {
+ let retryOps = [];
+ // For bulkWrite tenant migration errors always act as if they are executed as
+ // `ordered:true` meaning we will have to retry every op from the one that errored.
+ retryOps =
+ cmdObj.ops.slice(resObj.cursor.firstBatch[resObj.cursor.firstBatch.length - 1].idx);
+ cmdObj.ops = retryOps;
+ }
}
/**
@@ -533,6 +560,7 @@ function runCommandRetryOnTenantMigrationErrors(
let nModified = 0;
let upserted = [];
let nonRetryableWriteErrors = [];
+ let bulkWriteResponse = {};
const isRetryableWrite =
cmdObjWithTenantId.txnNumber && !cmdObjWithTenantId.hasOwnProperty("autocommit");
@@ -575,6 +603,31 @@ function runCommandRetryOnTenantMigrationErrors(
// Add/modify the shells's n, nModified, upserted, and writeErrors, unless this command is
// part of a retryable write.
if (!isRetryableWrite) {
+ // bulkWrite case.
+ if (cmdObjWithTenantId.bulkWrite) {
+ // First attempt store the whole response.
+ if (numAttempts == 1) {
+ bulkWriteResponse = resObj;
+ } else {
+ // The last item from the previous response is guaranteed to be a
+ // tenant migration error. Remove it to append the retried response.
+ let newIdx = bulkWriteResponse.cursor.firstBatch.pop().idx;
+ // Iterate over new response and change the indexes to start with newIdx.
+ for (let opRes of resObj.cursor.firstBatch) {
+ opRes.idx = newIdx;
+ newIdx += 1;
+ }
+
+ // Add the new responses (with modified indexes) onto the original responses.
+ bulkWriteResponse.cursor.firstBatch =
+ bulkWriteResponse.cursor.firstBatch.concat(resObj.cursor.firstBatch);
+
+ // Add new numErrors onto old numErrors. Subtract one to account for the
+ // tenant migration error that was popped off.
+ bulkWriteResponse.numErrors += resObj.numErrors - 1;
+ }
+ }
+
if (resObj.n) {
n += resObj.n;
}
@@ -651,8 +704,14 @@ function runCommandRetryOnTenantMigrationErrors(
// Store the connection to the recipient so the next commands can be rerouted.
const donorConnection = getRoutingConnection(conn);
const migrationStateDoc = getOperationStateDocument(donorConnection);
- setRoutingConnection(
- conn, connect(migrationStateDoc.recipientConnectionString).getMongo());
+
+ const otherConn = connect(migrationStateDoc.recipientConnectionString).getMongo();
+ if (conn.getAutoEncryptionOptions() !== undefined) {
+ otherConn.setAutoEncryption(conn.getAutoEncryptionOptions());
+ otherConn.toggleAutoEncryption(conn.isAutoEncryptionEnabled());
+ }
+
+ setRoutingConnection(conn, otherConn);
// After getting a TenantMigrationCommitted error, wait for the python test fixture
// to do a dbhash check on the donor and recipient primaries before we retry the
@@ -707,6 +766,9 @@ function runCommandRetryOnTenantMigrationErrors(
if (nonRetryableWriteErrors.length > 0) {
resObj.writeErrors = nonRetryableWriteErrors;
}
+ if (cmdObjWithTenantId.bulkWrite) {
+ resObj = bulkWriteResponse;
+ }
}
return resObj;
}
@@ -733,12 +795,19 @@ Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
return resObj;
};
-// Override all base methods on the Mongo prototype to try to proxy the call to the underlying
+Mongo.prototype.getDbNameWithTenantPrefix = function(dbName) {
+ return prependTenantIdToDbNameIfApplicable(dbName);
+};
+
+// Override base methods on the Mongo prototype to try to proxy the call to the underlying
// internal routing connection, if one exists.
// NOTE: This list is derived from scripting/mozjs/mongo.cpp:62.
['auth',
+ 'cleanup',
'close',
'compact',
+ 'getAutoEncryptionOptions',
+ 'isAutoEncryptionEnabled',
'cursorHandleFromId',
'find',
'generateDataKey',
@@ -766,6 +835,22 @@ Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
};
});
+// The following methods are overridden so that the method applies to both
+// the proxy connection and the underlying internal routing connection, if one exists.
+['toggleAutoEncryption',
+ 'unsetAutoEncryption',
+ 'setAutoEncryption',
+].forEach(methodName => {
+ const $method = Mongo.prototype[methodName];
+ Mongo.prototype[methodName] = function() {
+ let rc = getRoutingConnection(this);
+ if (rc !== this) {
+ $method.apply(rc, arguments);
+ }
+ return $method.apply(this, arguments);
+ };
+});
+
OverrideHelpers.prependOverrideInParallelShell(
"jstests/libs/override_methods/inject_tenant_prefix.js");
}());
diff --git a/jstests/libs/override_methods/network_error_and_txn_override.js b/jstests/libs/override_methods/network_error_and_txn_override.js
index cbb6ac98ab1f0..902277b25603e 100644
--- a/jstests/libs/override_methods/network_error_and_txn_override.js
+++ b/jstests/libs/override_methods/network_error_and_txn_override.js
@@ -243,10 +243,22 @@ function isRetryableMoveChunkResponse(res) {
res.code === ErrorCodes.CallbackCanceled;
}
-function isFailedToSatisfyPrimaryReadPreferenceError(msg) {
- const kReplicaSetMonitorError =
- /^Could not find host matching read preference.*mode: "primary"/;
- return msg.match(kReplicaSetMonitorError);
+function isFailedToSatisfyPrimaryReadPreferenceError(res) {
+ const kReplicaSetMonitorError = /Could not find host matching read preference.*mode:.*primary/;
+ if (res.hasOwnProperty("errmsg")) {
+ return res.errmsg.match(kReplicaSetMonitorError);
+ }
+ if (res.hasOwnProperty("message")) {
+ return res.message.match(kReplicaSetMonitorError);
+ }
+ if (res.hasOwnProperty("writeErrors")) {
+ for (let writeError of res.writeErrors) {
+ if (writeError.errmsg.match(kReplicaSetMonitorError)) {
+ return true;
+ }
+ }
+ }
+ return false;
}
function hasError(res) {
@@ -797,6 +809,17 @@ function shouldRetryWithNetworkErrorOverride(
res, cmdName, startTime, logError, shouldOverrideAcceptableError = true) {
assert(configuredForNetworkRetry());
+ if (isFailedToSatisfyPrimaryReadPreferenceError(res) &&
+ Date.now() - startTime < 5 * 60 * 1000) {
+ // ReplicaSetMonitor::getHostOrRefresh() waits up to 15 seconds to find the
+ // primary of the replica set. It is possible for the step up attempt of another
+ // node in the replica set to take longer than 15 seconds so we allow retrying
+ // for up to 5 minutes.
+ logError("Failed to find primary when attempting to run command," +
+ " will retry for another 15 seconds");
+ return kContinue;
+ }
+
if (RetryableWritesUtil.isRetryableWriteCmdName(cmdName)) {
if ((cmdName === "findandmodify" || cmdName === "findAndModify") &&
isRetryableExecutorCodeAndMessage(res.code, res.errmsg)) {
@@ -850,18 +873,6 @@ function shouldRetryWithNetworkErrorOverride(
return kContinue;
}
- if (res.hasOwnProperty("errmsg") &&
- isFailedToSatisfyPrimaryReadPreferenceError(res.errmsg) &&
- Date.now() - startTime < 5 * 60 * 1000) {
- // ReplicaSetMonitor::getHostOrRefresh() waits up to 15 seconds to find the
- // primary of the replica set. It is possible for the step up attempt of another
- // node in the replica set to take longer than 15 seconds so we allow retrying
- // for up to 5 minutes.
- logError("Failed to find primary when attempting to run command," +
- " will retry for another 15 seconds");
- return kContinue;
- }
-
// Some sharding commands return raw responses from all contacted shards and there won't
// be a top level code if shards returned more than one error code, in which case retry
// if any error is retryable.
@@ -963,7 +974,7 @@ function shouldRetryWithNetworkExceptionOverride(
if (numNetworkErrorRetries === 0) {
logError("No retries, throwing");
throw e;
- } else if (isFailedToSatisfyPrimaryReadPreferenceError(e.message) &&
+ } else if (isFailedToSatisfyPrimaryReadPreferenceError(e) &&
Date.now() - startTime < 5 * 60 * 1000) {
// ReplicaSetMonitor::getHostOrRefresh() waits up to 15 seconds to find the
// primary of the replica set. It is possible for the step up attempt of another
diff --git a/jstests/libs/override_methods/override_fixtures_changestream_multitenancy.js b/jstests/libs/override_methods/override_fixtures_changestream_multitenancy.js
index c51aa8093784d..34721dd954576 100644
--- a/jstests/libs/override_methods/override_fixtures_changestream_multitenancy.js
+++ b/jstests/libs/override_methods/override_fixtures_changestream_multitenancy.js
@@ -94,4 +94,4 @@ ShardingTest = function(params) {
// Extend the new 'ShardingTest' fixture with the properties of the original one.
Object.extend(ShardingTest, originalShardingTest);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/libs/override_methods/retry_aborted_db_and_index_creation.js b/jstests/libs/override_methods/retry_aborted_db_and_index_creation.js
new file mode 100644
index 0000000000000..c0301a73d7992
--- /dev/null
+++ b/jstests/libs/override_methods/retry_aborted_db_and_index_creation.js
@@ -0,0 +1,79 @@
+/**
+ * Overrides Mongo.prototype.runCommand to retry interrupted create index and create database
+ * commands. Was modeled partly on retry_on_killed_session.js.
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/override_methods/override_helpers.js");
+
+const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+
+Mongo.prototype.runCommand = function runCommand(dbName, cmdObj, options) {
+ return runWithRetries(this, cmdObj, mongoRunCommandOriginal, arguments);
+};
+
+const kCreateIndexCmdNames = new Set(["createIndexes", "createIndex"]);
+const kMaxRetryCount = 100;
+
+// Returns if the command should retry on IndexBuildAborted errors.
+function shouldRetryIndexCreateCmd(cmdObj) {
+ if (cmdObj.hasOwnProperty("autocommit")) {
+ // Transactions are retried at a higher level.
+ return false;
+ }
+
+ const cmdName = Object.keys(cmdObj)[0];
+ if (kCreateIndexCmdNames.has(cmdName)) {
+ return true;
+ }
+
+ return false;
+}
+
+// Returns if the code is one that could come from an index build being aborted.
+function hasIndexBuildAbortedError(res) {
+ return res.code === ErrorCodes.IndexBuildAborted;
+}
+
+function hasInterruptedDbCreationError(errOrRes) {
+ return errOrRes.code === ErrorCodes.Interrupted &&
+ ((errOrRes.errmsg.indexOf("Database") === 0 &&
+ errOrRes.errmsg.indexOf("could not be created") > 0) ||
+ errOrRes.errmsg.indexOf("Failed to read local metadata.") === 0 ||
+ errOrRes.errmsg.indexOf("split failed") === 0 ||
+ errOrRes.errmsg.indexOf(
+ "Failed to read highest version persisted chunk for collection") === 0);
+}
+
+/* Run client command with the ability to retry on a IndexBuildAborted Code
+ * and InterruptedDbCreation Error.
+ */
+function runWithRetries(mongo, cmdObj, clientFunction, clientFunctionArguments) {
+ let retryCount = 0;
+ while (true) {
+ const res = clientFunction.apply(mongo, clientFunctionArguments);
+
+ if (++retryCount >= kMaxRetryCount) {
+ return res;
+ } else if (hasIndexBuildAbortedError(res)) {
+ if (shouldRetryIndexCreateCmd(cmdObj)) {
+ print("-=-=-=- Retrying " + tojsononeline(cmdObj) +
+ " after IndexBuildAborted error response: " + tojsononeline(res));
+ continue;
+ } else {
+ return res;
+ }
+ } else if (hasInterruptedDbCreationError(res)) {
+ print("-=-=-=- Retrying " + tojsononeline(cmdObj) +
+ " after interrupted db creation response: " + tojsononeline(res));
+ continue;
+ }
+
+ return res;
+ }
+}
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/retry_aborted_db_and_index_creation.js");
+})();
diff --git a/jstests/libs/override_methods/sharding_continuous_config_stepdown.js b/jstests/libs/override_methods/sharding_csrs_continuous_config_stepdown.js
similarity index 100%
rename from jstests/libs/override_methods/sharding_continuous_config_stepdown.js
rename to jstests/libs/override_methods/sharding_csrs_continuous_config_stepdown.js
diff --git a/jstests/libs/override_methods/tenant_aware_response_checker.js b/jstests/libs/override_methods/tenant_aware_response_checker.js
index 9c9e9a0ef7745..7b53504ab68d5 100644
--- a/jstests/libs/override_methods/tenant_aware_response_checker.js
+++ b/jstests/libs/override_methods/tenant_aware_response_checker.js
@@ -12,23 +12,25 @@ function wordInString(str, word) {
return regexp.test(str);
}
-function checkExpectedDbNameInString(str, dbName, prefixedDbName) {
+function checkExpectedDbNameInString(str, dbName, prefixedDbName, originalRes) {
// System db names (admin, local and config) should never be tenant prefixed.
if (dbName == "admin" || dbName == "local" || dbName == "config") {
assert.eq(false,
wordInString(str, prefixedDbName),
- `Response db name "${str}" does not match sent db name "${dbName}"`);
+ `Response db name "${str}" does not match sent db name "${
+ dbName}". The response is "${originalRes}"`);
return;
}
// Currently, we do not expect prefixed db name in db name field as we only test with
// "featureFlagRequireTenantID: true".
- // TODO SERVER-70740: expect prefixed db name if "expectPrefix" option in request is true.
+ // TODO SERVER-78300: expect prefixed db name if "expectPrefix" option in request is true.
assert.eq(false,
wordInString(str, prefixedDbName),
- `Response db name "${str}" does not match sent db name "${dbName}"`);
+ `Response db name "${str}" does not match sent db name "${
+ dbName}". The response is "${originalRes}"`);
}
-function checkExpectedDbInErrorMsg(errMsg, dbName, prefixedDbName) {
+function checkExpectedDbInErrorMsg(errMsg, dbName, prefixedDbName, originalRes) {
// The db name in error message should always include tenant prefixed db name regardless how the
// tenantId was received in the request.
@@ -38,17 +40,8 @@ function checkExpectedDbInErrorMsg(errMsg, dbName, prefixedDbName) {
return;
}
- // TODO SERVER-74486: We will check collection ns string in future.
- if (errMsg.includes(dbName + ".")) {
- // Do not check ns until we change error mssage to include tenant in ns.
- return;
- }
-
- // System db names (admin, local and config) should never be tenant prefixed.
+ // Skip check system db names (admin, local and config) which could be tenant prefixed or not.
if (dbName == "admin" || dbName == "local" || dbName == "config") {
- assert.eq(false,
- wordInString(errMsg, prefixedDbName),
- `Response db name "${errMsg}" does not match sent db name "${dbName}"`);
return;
}
@@ -61,7 +54,7 @@ function checkExpectedDbInErrorMsg(errMsg, dbName, prefixedDbName) {
assert.eq(true,
errMsg.includes(prefixedDbName),
`The db name in the errmsg does not contain expected tenant prefixed db name "${
- prefixedDbName}", error msg: ${errMsg}`);
+ prefixedDbName}". The response is "${originalRes}"`);
}
/**
@@ -70,8 +63,9 @@ function checkExpectedDbInErrorMsg(errMsg, dbName, prefixedDbName) {
* @param {*} requestDbName the original db name requested by jstest.
* @param {*} prefixedDbName the tenant prefixed db name expected by inject_dollar_tenant.js and
* inject_security_toiken.js.
+ * @param {*} originalResForLogging the original response for logging.
*/
-function assertExpectedDbNameInResponse(res, requestDbName, prefixedDbName) {
+function assertExpectedDbNameInResponse(res, requestDbName, prefixedDbName, originalResForLogging) {
if (requestDbName.length === 0) {
return;
}
@@ -80,21 +74,25 @@ function assertExpectedDbNameInResponse(res, requestDbName, prefixedDbName) {
let v = res[k];
if (typeof v === "string") {
if (k === "dbName" || k == "db" || k == "dropped") {
- checkExpectedDbNameInString(v, requestDbName, prefixedDbName);
+ checkExpectedDbNameInString(
+ v, requestDbName, prefixedDbName, originalResForLogging);
} else if (k === "namespace" || k === "ns") {
- checkExpectedDbNameInString(getDbName(v), requestDbName, prefixedDbName);
+ checkExpectedDbNameInString(
+ getDbName(v), requestDbName, prefixedDbName, originalResForLogging);
} else if (k == "name") {
- checkExpectedDbNameInString(v, requestDbName, prefixedDbName);
+ checkExpectedDbNameInString(
+ v, requestDbName, prefixedDbName, originalResForLogging);
} else if (k === "errmsg") {
- checkExpectedDbInErrorMsg(v, requestDbName, prefixedDbName);
+ checkExpectedDbInErrorMsg(v, requestDbName, prefixedDbName, originalResForLogging);
}
} else if (Array.isArray(v)) {
v.forEach((item) => {
if (typeof item === "object" && item !== null)
- assertExpectedDbNameInResponse(item, requestDbName, prefixedDbName);
+ assertExpectedDbNameInResponse(
+ item, requestDbName, prefixedDbName, originalResForLogging);
});
} else if (typeof v === "object" && v !== null && Object.keys(v).length > 0) {
- assertExpectedDbNameInResponse(v, requestDbName, prefixedDbName);
+ assertExpectedDbNameInResponse(v, requestDbName, prefixedDbName, originalResForLogging);
}
}
}
diff --git a/jstests/libs/password_protected.pem b/jstests/libs/password_protected.pem
index 1a30869523723..1a8a08d2cd236 100644
--- a/jstests/libs/password_protected.pem
+++ b/jstests/libs/password_protected.pem
@@ -4,58 +4,58 @@
# Server cerificate using an encrypted private key.
-----BEGIN CERTIFICATE-----
-MIIEWDCCA0CgAwIBAgIEGQdcfTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIEWDCCA0CgAwIBAgIEIO4OPzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjBsMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM5WhcNMjUwOTEwMTQyODM5WjBsMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UEAwwG
-c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4Y2yTK4QaCf/
-zGj35W1sCoGoBYtyt7iJthVd86WQEQXK9Efra8phbxlpqEBaf/EvB/hwN8073eCL
-NQlhm5kupVfY1hIezlJzrAJp1u1A/m6znkqTs4cHp5+Ln/MkTq3CYTUwy4Z1mM0T
-sLmTKBlcdVqC3gvux/iL6RjRPmbPXuHgN3ORagk11oSKEsIy8ShGMFZMdtT1pnqr
-xESx3JGIRr+CPoctsDsiOkJmHauqJDqVliF5pjdr2N0T//JOtKzVCY0hf+A0pJ3z
-N0zIiP42XANZCo2aikwOmnnSRzIUJT52tp/FIRzFbuenRF5sJD++zUp15H/30FKA
-INAZiwWO0QIDAQABo4H5MIH2MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1Ud
-JQQMMAoGCCsGAQUFBwMBMB0GA1UdDgQWBBSeFd//+k2VDH02TPQzJmfOqMv4sjCB
+c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4xt3hUpO97g5
+pCBkLjz0pu1XyO7dtaV8BqtjDDWFCYKByMJdYnFNPF/DaGmShpADfbsdQYV8IiWx
+Ly2fVSZH/rcVNvvdMqiDGxhlci14jLTbt4hW3f02Lct6CLy68hx+9Eg3JoyCo2br
+VYbBMhRjBDt7jrYa/WiBVDkeaPfwOxt/jNyZiITWQUMeWfpGjmP5aRp5vgJzDukq
+5kuTaIy/z1K0J5IyHu3sSnC29kLWWdaavdpF29iwf78dkuRsh5h4M/UnUxufyqgf
+u31WZmTCiZcitAi+vOcRtWQiBWVg1iVt9ohATjhvmc7647T1VoHGggrZdYKvfF8Y
+5bPS2LSlPwIDAQABo4H5MIH2MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1Ud
+JQQMMAoGCCsGAQUFBwMBMB0GA1UdDgQWBBRvMjhuWPxwzNAJYB7vdas/PF3HVjCB
iwYDVR0jBIGDMIGAoXikdjB0MQswCQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlv
cmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzAN
-BgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0GCBHvUrJMwGgYD
-VR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQBOLNRO
-G1L6FxiqreB3MBafbW7Q2RsZdIQRqZilec5Kxf+znDH2C5NiRS6vR+5e7hDSPy6m
-F2/CXbZep9Jgu/6SYFSB7x5HvHCSn41zc12UINc5N2ol8e5aHvimoWXvJiT71Wz2
-iRUNUl/DRuUnq/UglCz7iM0NDB3Ti5SgsTka6OmYyAT72sh70pqUq8X+htaSj0/B
-+NIrNjRYMCBKhVo2SsRXh7XvsyUraGYoVH3y9mEVLJkyuBf6W1odXLlokzB84bJT
-QBXv9IUdDQYBK3J7Q7iVOBW0Vkfp11WfVaAhrcAC/udL5LoxeJep/hJVNNuA0qqG
-BkrzIl/Evn3SZ++/
+BgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0GCBEreWhowGgYD
+VR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAq0bTR
+PEZPpKq+VCBT+06+uoe3l93zs/h72n3Yn6BdO2cCcskqi0yB3NhazLIohgZ1rGyY
+o9W5Z73Y6BCk8Rd07c6vTc0Jgra1cdr/1X6RZpENMVfcvi0w3Ej5dlK+znU68oDj
+eDh/GorF/I9JvPvYpArIGdxSNR0isDsgI9I+4/YMg7WTCOTrMSLq9DmSK51uxTWe
+GY8AUuOPxZPeWwWWZsO9sRIXCbzAPOSF8TiqjWiKkYF3UO0dLIId9YRty7EPp2ea
+SDmfYT1CSihDaUzbDmV+39UMw8Pw1mn45kN4CxXPF+3NDx7HqZ4yzna2DiJw9J2w
+YEbO5Gg3cM6vxgC4
-----END CERTIFICATE-----
------BEGIN RSA PRIVATE KEY-----
-Proc-Type: 4,ENCRYPTED
-DEK-Info: AES-256-CBC,53A39D8D01CE003985BD7AB8429591B1
-
-7K2t5Np+XfDNlKHDkU29xpNC+3xmE2FJJAmD9pMwpeoiPB/6snYnNXnDJyNW60oA
-hriOMMoNU/vGUvn1aAyIlxw4MsjO0PkHfKipI3m+NQC2kZNTTBOBA/dzLavo64zB
-5IDDlNzrvHlxP3P/jAF2EdQ91206YtYw/x1Ix4y2F4hITktRsYEx09gH9hKoo8uJ
-f2ekx28ANIOjuWBc3zI1SDpfGPqr5ruZZsB6Ucj6+rYpSboC1ZAfdCdjbAhuAkp0
-1QiGKhfyyZdo44ojjCzZpPwGsOaK+f1UYp8p9WqXJ+RfwP69h9yJNVPsWi1axcTd
-nRM2ppxmeRuyrPo/HeD2T19gDkzGdlJA35dN2T4p43k4xfUGKWjij/5ndFuupfVs
-o4o0rqhzvBSupsxnka1TfXNHrbdJpiWoH0M5G0YhaJfX2+tnWi301jQ2kE8fMXGB
-aisE0BXNp0oFk1SYgiet5H3Rj0eWabvUHszjvzEcn8EK/G4llHvijGVUeYIajFfZ
-4Yyx4chBAMB8fto/LcDbGzRe9cHyCz1bq7/IBqnYAUMNwsC5Z4OfMRZK274kQX1l
-V8YZW4O5jpAyI9BhbwrQE/Lx5eX4JKd62j3OTwewL/aitfXjF5zgiv4LjTLB4XcP
-z3MAdjCQbwnbd8M6izVy36OMO1wR1PO7Pad39S+BOtdtodB78/ZR3j2bgsRqiHAx
-l4kgbKgnKsceHjbxjrb7qelqalwEhlTIwVciY7Dooh4qT73uPRJwWbCXARpsh4mg
-1FHCRhyyPumIXa/EENssAghr/xI/gwx6ZJnun4WMK/Um0dHv8WUXQcN+Fx2GT9BO
-UgObmASGvfG50q8ZHp+qwIOy/EHYA7fnWYNr1DZnCZPse3AvYwXLyG4AAdFjQdSe
-2Q9v+Pw39EYNl7h97gnm10hL79bJ0eFggQGGXrqNd0wKBeewhxSigEEZ70K/H7ga
-URoRxDl8pe+NytGRPrWIUZbYPxaFCxc+kpnB9V96u+RuTNQORrOdiKKjzFUve2qh
-JiNASm6JHsCKsVfPEmIOSnzwJPpAcEx8SYEyhyrPMHv51UjPtUaStByp+DsWiKeb
-yzI20MxV0WishdjgfWN6YU4eRY0CXUPxly+MmhNHdscOSzUtnpaAoCpcEs/zsope
-xNUtBz8lsyb1x6ooBibnqrP6E5TA78hiAK+9/UsEpdPBODmudRA0Bc3X9y0dyEe/
-76wzVnZnE3Ho63XYEyRLnHGOQOxjzgQr9ssRh2hjvk9/wEuJ746WmHAxf66DIDRQ
-yQP5O8zYYMREPj4s6WPK8zWuYCzMwMmtHVc+/kYUKopmKRr4jzKZucuDuTW5FnFx
-Zi9RqgQHrkafTLOsg2mFW85IcewTlJ7lLzHP8wlnbxtX5NdzDArpTuX5gVDvXSQx
-LNdUtMrBFvpfszxzHMMEg2qo5+T/2UGS8sd5Z0mpB5jLFzaxAFq98KAU51XVpvBn
-Q3R5lXS6Q3myHCYg1NHDCtfss1fiwpCIVn9EI2u9/IhM9Tpnp+l1eFafv1Doi5BL
-LRwBtXxqMqUnvdvuS0LBLAJaJlH5jrF28vNrKP4SRK3j9zg6p+Yvgm1EvJC01iiY
------END RSA PRIVATE KEY-----
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFLTBXBgkqhkiG9w0BBQ0wSjApBgkqhkiG9w0BBQwwHAQIjrhb/JO1RvYCAggA
+MAwGCCqGSIb3DQIJBQAwHQYJYIZIAWUDBAEqBBDtYUB4pr1Zg+bjhQkJuSySBIIE
+0HWptM5jRrPfdiEjPEx1qPTIh3zalOcVjHIX0JI6WAuwJlNFJ+jewzEQ7lZWPVXv
+XD/M/h9FD9whe0i9F35h+rrHChbhmfCosXItPbgj22wJHvOdwNw8eoardT2p3BXA
+TZBNs9LdOPlIbnpSGe+mBU7z3bh3LBpJkdni2gXi6WN+2nyUv5V0oWui9a3sqRhS
+fQyVeg7zT+w3/gFIZ2MaMaSAAZS3C+u9GQCqFQH7ut8U2EUnQhoWvbGc38amCAbx
+ovC6uS2IwDXMFdWrPmusEWZBa/GmndBa7OXO+KlzUxIM8iw6ICRTmXwT9e5dS4ED
+3ONDPzBZxXrsam/tHx0UaTLMo9sDLMoW3PPVp/pYzIyltQMi3QsJ9+oEB8Xel8KB
+r+UQAkx+XCgZFFOwGKt76w8OanTbR6krWxbdmEeECo2dlDjQjygGE/jfnQ4Z+4cC
+ZvSNj8rODbSbHevkw0Fx/bIrtjI6aIP63cPsV1Y+bk930IasiprOKFsyHarcWMXw
+2YQ5IMfqcHjfS5ZRmjmruZhySLZ8TBEBlQSoPEUNpAlcXBkAdhls9fVSa7d1sPcP
+2PKLUgA4I/IjBRM9z4Y77yYaeZ8cKDFh8IgGgXlFMwjNUujJRlyOyYGx4BClQXvu
+Q7sH4V3PC/JXTeczRUe0+hRHLD2M9krs6ROdDBpvkbxhba3NifhH02eA3sfInazg
+mXAbUvjnULorSLS1riScZj/ExLgbIOGiZQ/H6O0v3LZt9EsOlC1xgcOLLWJpCo6A
+qypjyuCJGenG1gFIf7CLxU1nhlAC6uRcsvwvY4vRw5fO1u8fuomxWyOOulMFeOBW
+my+rNEtTmhBlPiST11g0BHuW+zyGQxgzr88x9tWl+OdIoRGazhWRTguIVOV0ubt1
+dAhF4jY3uNl9kvHMB3viiRvuuH9lf17MTJtdjboIA81uvqYkx7sN3773QzCwcYGP
+DdFK7aHB3MwKF9e4z7FFQTIqpRCjyZgih3HhUJub2cKer1qPYN5N37uSTl8Tk7sB
+x6xH45lVnQ8bIzzwQd0SBEKi7zIZzOyqstSp7IsOOgsxVZPyDq8Wuqg6DNcOP+A6
+sdaNk5gHVuRsJXDLOoQUjmUp5IOW0ePPFpBwNIbHMAI9/Jy7W7l4A1/9S8/LhFFw
+4JI3qaBlAocNR5CnmmS2kOFmQLOR5sdOq4C8+D/1ICXJqyj7MpTfBbUb6Jm1/XSm
+9mibccGCgrj2VmpcoYw8O79GJsYVQJofofwhH8r4J3ZIYSA/LkeKtDg3y/gwWwkL
+dJYl7ZpF3uoD6d+euwzLJntICLrXFkLvTx15Gy1+qFe4l749/XnEQcav0v1NnYnN
+MhlcdOaACxoRA0aq2G7Ds7sFqduX7qqM7/NipP2ER6UhYwI1BFjtT3r+9SMKoJi9
+XHxoiOwdz+qd57NNqvMT9iHJ/YOBPilLFAMgzbJFrDbJguI7OjwTnDF0CCSSTi2r
+g3JQszXsZK+uRHi1FfxdBiPKFx2r590262GJ6/goX0thwG0smKkMG4eZ2tUaSiL/
+RLClfI2NfvBat/ThKWcipAc9vZulJzzjGKmmrndU3DnQujh/RAnGCZVlK/ysgQNG
+JOZg2p+qtfXODXQj84bcTtdXQ1sSCtorgHr/qKsuT97v
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/jstests/libs/password_protected.pem.digest.sha1 b/jstests/libs/password_protected.pem.digest.sha1
index 98c4138fb9974..cc9e4167ae047 100644
--- a/jstests/libs/password_protected.pem.digest.sha1
+++ b/jstests/libs/password_protected.pem.digest.sha1
@@ -1 +1 @@
-470CE33FFA8FCA3CC61F3FD2266B6E3DAB4B77D2
\ No newline at end of file
+D42847522F2EB56669C8F4C1E88FFB177026143E
\ No newline at end of file
diff --git a/jstests/libs/password_protected.pem.digest.sha256 b/jstests/libs/password_protected.pem.digest.sha256
index 02d1a0b702be6..6bddec7b349ab 100644
--- a/jstests/libs/password_protected.pem.digest.sha256
+++ b/jstests/libs/password_protected.pem.digest.sha256
@@ -1 +1 @@
-7A24D74A8CC11A256EA0EE08EBBFA60D0274094F7A4B36EB7B8B0061AB58F375
\ No newline at end of file
+CB2C415800C604B9C195D8D1F2A714732720867A2736A8DC6ECF8340EA026860
\ No newline at end of file
diff --git a/jstests/libs/query_stats_utils.js b/jstests/libs/query_stats_utils.js
new file mode 100644
index 0000000000000..1401830ed2d3f
--- /dev/null
+++ b/jstests/libs/query_stats_utils.js
@@ -0,0 +1,208 @@
+const kShellApplicationName = "MongoDB Shell";
+const kDefaultQueryStatsHmacKey = BinData(0, "MjM0NTY3ODkxMDExMTIxMzE0MTUxNjE3MTgxOTIwMjE=");
+
+/**
+ * Utility for checking that the aggregated queryStats metrics are logical (follows sum >= max >=
+ * min, and sum = max = min if only one execution).
+ */
+function verifyMetrics(batch) {
+ batch.forEach(element => {
+ if (element.metrics.execCount === 1) {
+ for (const [metricName, summaryValues] of Object.entries(element.metrics)) {
+ // Skip over fields that aren't aggregated metrics with sum/min/max (execCount,
+ // lastExecutionMicros).
+ if (summaryValues.sum === undefined) {
+ continue;
+ }
+ const debugInfo = {[metricName]: summaryValues};
+ // If there has only been one execution, all metrics should have min, max, and sum
+ // equal to each other.
+ assert.eq(summaryValues.sum, summaryValues.min, debugInfo);
+ assert.eq(summaryValues.sum, summaryValues.max, debugInfo);
+ assert.eq(summaryValues.min, summaryValues.max, debugInfo);
+ }
+ } else {
+ for (const [metricName, summaryValues] of Object.entries(element.metrics)) {
+ // Skip over fields that aren't aggregated metrics with sum/min/max (execCount,
+ // lastExecutionMicros).
+ if (summaryValues.sum === undefined) {
+ continue;
+ }
+ const debugInfo = {[metricName]: summaryValues};
+ assert.gte(summaryValues.sum, summaryValues.min, debugInfo);
+ assert.gte(summaryValues.sum, summaryValues.max, debugInfo);
+ assert.lte(summaryValues.min, summaryValues.max, debugInfo);
+ }
+ }
+ });
+}
+
+/**
+ *
+ * Collect query stats from a given collection. Only include query shapes generated by the shell
+ * that is running tests.
+ *
+ * @param conn - connection to database
+ * @param {object} options {
+ * {String} collName - name of collection
+ * match - extraMatch - optional argument that can be used to filter the pipeline
+ * }
+ */
+function getQueryStats(conn, options = {
+ collName: ""
+}) {
+ let match = {"key.client.application.name": kShellApplicationName, ...options.extraMatch};
+ if (options.collName && options.collName) {
+ match["key.queryShape.cmdNs.coll"] = options.collName;
+ }
+ const result = conn.adminCommand({
+ aggregate: 1,
+ pipeline: [
+ {$queryStats: {}},
+ // Sort on query stats key so entries are in a deterministic order.
+ {$sort: {key: 1}},
+ {$match: match}
+ ],
+ cursor: {}
+ });
+ assert.commandWorked(result);
+ return result.cursor.firstBatch;
+}
+
+/**
+ * @param {object} conn - connection to database
+ * @param {object} options {
+ * {BinData} hmacKey
+ * {String} collName - name of collection
+ * {boolean} transformIdentifiers - whether to include transform identifiers
+ * }
+ */
+function getQueryStatsFindCmd(conn, options = {
+ collName: "",
+ transformIdentifiers: false,
+ hmacKey: kDefaultQueryStatsHmacKey
+}) {
+ let matchExpr = {
+ "key.queryShape.command": "find",
+ "key.client.application.name": kShellApplicationName
+ };
+ if (options.collName) {
+ matchExpr["key.queryShape.cmdNs.coll"] = options.collName;
+ }
+ // Filter out agg queries, including $queryStats.
+ var pipeline;
+ if (options.transformIdentifiers) {
+ pipeline = [
+ {
+ $queryStats: {
+ transformIdentifiers: {
+ algorithm: "hmac-sha-256",
+ hmacKey: options.hmacKey ? options.hmacKey : kDefaultQueryStatsHmacKey
+ }
+ }
+ },
+ {$match: matchExpr},
+ // Sort on queryStats key so entries are in a deterministic order.
+ {$sort: {key: 1}},
+ ];
+ } else {
+ pipeline = [
+ {$queryStats: {}},
+ {$match: matchExpr},
+ // Sort on queryStats key so entries are in a deterministic order.
+ {$sort: {key: 1}},
+ ];
+ }
+ const result = conn.adminCommand({aggregate: 1, pipeline: pipeline, cursor: {}});
+ assert.commandWorked(result);
+ return result.cursor.firstBatch;
+}
+
+/**
+ * Collects query stats from any aggregate command query shapes (with $queryStats requests filtered
+ * out) that were generated by the shell that is running tests.
+ *
+ * /**
+ * @param {object} conn - connection to database
+ * @param {object} options {
+ * {BinData} hmacKey
+ * {boolean} transformIdentifiers - whether to include transform identifiers
+ * }
+ */
+function getQueryStatsAggCmd(conn, options = {
+ transformIdentifiers: false,
+ hmacKey: kDefaultQueryStatsHmacKey
+}) {
+ var pipeline;
+ if (options.transformIdentifiers) {
+ pipeline = [
+ {
+ $queryStats: {
+ transformIdentifiers: {
+ algorithm: "hmac-sha-256",
+ hmacKey: options.hmacKey ? options.hmacKey : kDefaultQueryStatsHmacKey
+ }
+ }
+ },
+ // Filter out find queries and $queryStats aggregations.
+ {
+ $match: {
+ "key.queryShape.command": "aggregate",
+ "key.queryShape.pipeline.0.$queryStats": {$exists: false},
+ "key.client.application.name": kShellApplicationName
+ }
+ },
+ // Sort on key so entries are in a deterministic order.
+ {$sort: {key: 1}},
+ ];
+ } else {
+ pipeline = [
+ {$queryStats: {}},
+ // Filter out find queries and $queryStats aggregations.
+ {
+ $match: {
+ "key.queryShape.command": "aggregate",
+ "key.queryShape.pipeline.0.$queryStats": {$exists: false},
+ "key.client.application.name": kShellApplicationName
+ }
+ },
+ // Sort on key so entries are in a deterministic order.
+ {$sort: {key: 1}},
+ ];
+ }
+
+ const result = conn.adminCommand({aggregate: 1, pipeline: pipeline, cursor: {}});
+ assert.commandWorked(result);
+
+ return result.cursor.firstBatch;
+}
+
+function confirmAllExpectedFieldsPresent(expectedKey, resultingKey) {
+ let fieldsCounter = 0;
+ for (const field in resultingKey) {
+ fieldsCounter++;
+ if (field === "client") {
+ // client meta data is environment/machine dependent, so do not
+ // assert on fields or specific fields other than the application name.
+ assert.eq(resultingKey.client.application.name, kShellApplicationName);
+ continue;
+ }
+ if (!expectedKey.hasOwnProperty(field)) {
+ print("Field present in actual object but missing from expected: " + field);
+ print("Expected " + tojson(expectedKey));
+ print("Actual " + tojson(resultingKey));
+ }
+ assert(expectedKey.hasOwnProperty(field));
+ assert.eq(expectedKey[field], resultingKey[field]);
+ }
+ // Make sure the resulting key isn't missing any fields.
+ assert.eq(fieldsCounter, Object.keys(expectedKey).length, resultingKey);
+}
+
+function asFieldPath(str) {
+ return "$" + str;
+}
+
+function asVarRef(str) {
+ return "$$" + str;
+}
diff --git a/jstests/libs/retryable_writes_util.js b/jstests/libs/retryable_writes_util.js
index eb3ae969e3d61..18a88414dff4d 100644
--- a/jstests/libs/retryable_writes_util.js
+++ b/jstests/libs/retryable_writes_util.js
@@ -33,7 +33,8 @@ var RetryableWritesUtil = (function() {
"findAndModify",
"insert",
"update",
- "testInternalTransactions"
+ "testInternalTransactions",
+ "bulkWrite"
]);
/**
diff --git a/jstests/libs/rollover_ca.pem b/jstests/libs/rollover_ca.pem
index 7cb94c33ec2a0..b24b04ef9d4ca 100644
--- a/jstests/libs/rollover_ca.pem
+++ b/jstests/libs/rollover_ca.pem
@@ -3,53 +3,53 @@
#
# Separate CA used during rollover tests.
-----BEGIN CERTIFICATE-----
-MIIDxzCCAq+gAwIBAgIEMEXJYDANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
+MIIDxzCCAq+gAwIBAgIEa8Os7zANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxETAPBgNVBAcMCE5ldyBZb3JrMRYwFAYDVQQK
DA1Nb25nb0RCLCBJbmMuMQ8wDQYDVQQLDAZLZXJuZWwxIDAeBgNVBAMMF0tlcm5l
-bCBSb2xsb3ZlciBUZXN0IENBMB4XDTIyMDEyNzIxNTk0N1oXDTI0MDQzMDIxNTk0
-N1owfjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhO
+bCBSb2xsb3ZlciBUZXN0IENBMB4XDTIzMDYwOTE0Mjg0NloXDTI1MDkxMDE0Mjg0
+NlowfjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhO
ZXcgWW9yazEWMBQGA1UECgwNTW9uZ29EQiwgSW5jLjEPMA0GA1UECwwGS2VybmVs
MSAwHgYDVQQDDBdLZXJuZWwgUm9sbG92ZXIgVGVzdCBDQTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBALvUQ/c9NMdWMIPv0B5lP0oKUiwAL2WvTZyzmRX9
-imZm2fYpjbYJD6jrpvYk3BP1wV7iPaVqMyy5VmsBjcdJoZQ8kbUE+LnqFSpPwZvs
-qlBfVPSPKwf4ZQYESmfrtpn1xOhOW3q8OsJHpdmFQFVOzPm87kSslVuwkyUhwLtd
-ToSwe339K4OUJV1ZVrtTQ8bMT/gEV2Gh+fScUpK47W01+he8p3lr0WDwmioyCAYS
-v1FHd6YSHB17/M1n5xsDWFillDTxMYK2Y/He7Tvfmy/d5+wOoUNWzQDIkdKlndQ2
-oy5YF0XENmPdGBY6mWaPd03/zKkhVfU0tY650OvLEhIdqmMCAwEAAaNNMEswDwYD
-VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFKMXkxW7L94J
-wfc3SWBZrL6W5EzOMAkGA1UdIwQCMAAwDQYJKoZIhvcNAQELBQADggEBAA9eK1CR
-Xijo6jowUggkJgwietTVAJfoKBJS+OKpBEbd6lQMg1BpbK3AZe/mY0L5c/MdwDut
-7UL9TWtbPiMesPZ5PEO54fvddRIm+gUYRXyiuY2p7vVOt0a4136pMCn24YoFgzQS
-vEP8sDHnL/kBVNB9Khwt1Jt8PaTlkuB2B9TqJNcJUwtaAFN8UekaC9MEk0KbRdhy
-d37h1rvR+FO/BtaWvF8UxGCKK6TkvPHq3hk1gJ6wL7ME+U1op7EzKnKD4yro4oq1
-54KqKP3wjcfuZSLPlgIfJjkKrQmG3gZ6f6aNaMN/IoVdUubjRKEfDEkhEEUZnwt3
-Xb6r5w91Rbljk24=
+AQEBBQADggEPADCCAQoCggEBAMUCe3YeyeHqlUUizlidQKOPGK4Oo3apEkhRLEMV
+pivRALuNNpA51gpGfLmUlpXJuRxHwBnOBYdDMmGT2oS8SbRqW9G1LKebtB+9eVvk
+1nN06bvOoWemnID+AgTfqrgMENMCvSueX2f6e10wHvZD8PJagl1DDwvTPqB6ZeMC
+Uxn3h9JFPWa2ZX28m68VUi4t4ZAdoErD8VMquct2TAoTb3sLQfqXOcbZma2ljQA9
+ajpEbJUi36V8Un8XyGHv9NyuQRuEFS2a8IJ4zhggE4vZtxGwJN5fyhtcteaZJiAM
+qgG9I1/DPcwYm27I8WpGJWUNOYOHc+hydr26+73XhYD3DG8CAwEAAaNNMEswDwYD
+VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFL1PcLhtM/ok
+9HRGBQCBtytyKDALMAkGA1UdIwQCMAAwDQYJKoZIhvcNAQELBQADggEBAKvJRLjf
+JPCqiu75rcyihAdpHvx6QEpEcGvHhpVgo+ZMkz0OhtRcTCpQ3gg/VmzcK6kAAZdX
+7qfzY4i1qdegqidcnp6ozVg+POumEnwGRc8WANeRV2SO+A9t0DNjYS1+2GT/dYcQ
+1TDqTblOPXYw0n9dCZkybtzCDfFQXNVB0gBwjSzQiSiubTAIkkv9OwFHGYI3D2WT
+QkucHZkoq2BguvdNmc917G7WKE6Hj0zxGg7k3VAnE0SBdJcG9bFLHlDE5D4YxncD
+48clvTK33sUNgtL1EbVQphtqrsj+13IO3OdyHC6IvV8yVR/nDJHc0/Mky5oHPKPG
+loO6XClH5ipv6YE=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC71EP3PTTHVjCD
-79AeZT9KClIsAC9lr02cs5kV/YpmZtn2KY22CQ+o66b2JNwT9cFe4j2lajMsuVZr
-AY3HSaGUPJG1BPi56hUqT8Gb7KpQX1T0jysH+GUGBEpn67aZ9cToTlt6vDrCR6XZ
-hUBVTsz5vO5ErJVbsJMlIcC7XU6EsHt9/SuDlCVdWVa7U0PGzE/4BFdhofn0nFKS
-uO1tNfoXvKd5a9Fg8JoqMggGEr9RR3emEhwde/zNZ+cbA1hYpZQ08TGCtmPx3u07
-35sv3efsDqFDVs0AyJHSpZ3UNqMuWBdFxDZj3RgWOplmj3dN/8ypIVX1NLWOudDr
-yxISHapjAgMBAAECggEAIPhxtcSYryUB/ybpcWx0X5rQMckWiNLs3MTp6mJHGRKU
-0BEbPwj4Jr624B44Q4iwGOe9ynuJ4B/oe4AaUxEpu8umCBCHWJsue+7kHWq9ur4B
-O6yl8RA5K4U1Smil9QCfP+gBRtojtla9ViF92Xurn2r6qDNjRGUKb0oterY2QjQr
-fMI6i9oGRPyDYf4v08NjKKOtpjLM7PhY+52GPgEzPLULojqwU2Hwv2hmBf8Yni/l
-mMTrtiLuCtKx0CLDG6O0ZQ+1u7Gx4iuNANDYKHE4Bf0WiRcRHi9KRsN/ZVa8wZvl
-w4NUPo1UdUCKWjM+jTlr3Iv0AM8+BarydYrB3V6CAQKBgQDc/tgcDciVrbViUD0f
-w+jX32g8RcA8ffgfReH+Hw69OaFX6ePmOKggWkI95Wx+heBLiQqGDDadwCIHygD1
-sDKyeMKwRAU3BsYUI414m2zWnONXFQQdHbPzSQvQ3amtPbDwRDN8yzdBsQUFcVPT
-ikdGMvmADEiTQ5Ck1q4kjr5zcQKBgQDZlI8AMVTzC8ulaI8oPZC2IyOe+9PI3LXe
-0/5qPfcks+iQ6XiNM/t6I2eoVqe1SIsMlpCQ1jJoW/GQNdkC6oXAbWc7j7TKNNtF
-DxyHIqctRt2/ijy9p/eXSCMZUTT4PyW9/AWxXYIqXZ76QufF71Zoxs4ZRn3I0NYI
-HEvC9l4pEwKBgGC4nMbydWalByzHJ2leqerJGWq/sFoJW+37/OPmneHNdkLu19Kr
-21GFj1ZdsfVSDI+io9t1PvYd4Ab+rxrYiee4mKTisFGcAldQFBvEEod/VLSJOyqv
-FFIXFzfLu/ZZeLY/czVcD4wNuL/gEKsV6wnbR312YtEpEgZC+yZ+3vXRAoGAB/ii
-WI52H0ViW5f+Dqpav+F/r5ZoRuaXHyfDHV9Ry51vusdi7EFoSCw94vPxxvl2Zqqp
-dzTxudMMgY0He0zeQ5N+gbcdF39iPSB1mhnR6B29iAPnf8dEkd1Js+a+uw2NM22l
-Q786QpUVevOyjBTdpI8MA+8KMq47+SYYPHdMMdMCgYEAjf8TnMe6kB4MC/2urZqy
-pH3kh60EQ3m5d9Ief3wmUBlov9fvegqOrxBQunVeLERO6mXbFV7lVOm8oDvIC+/d
-g18pWN3QoLhfqFTNETwY67EreoMYvXKLu9U7HKs/qYP3RZQb3RB3aSFHlDMJgwIG
-mvnUi+VWUQ3c2GApHClq7gU=
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDFAnt2Hsnh6pVF
+Is5YnUCjjxiuDqN2qRJIUSxDFaYr0QC7jTaQOdYKRny5lJaVybkcR8AZzgWHQzJh
+k9qEvEm0alvRtSynm7QfvXlb5NZzdOm7zqFnppyA/gIE36q4DBDTAr0rnl9n+ntd
+MB72Q/DyWoJdQw8L0z6gemXjAlMZ94fSRT1mtmV9vJuvFVIuLeGQHaBKw/FTKrnL
+dkwKE297C0H6lznG2ZmtpY0APWo6RGyVIt+lfFJ/F8hh7/TcrkEbhBUtmvCCeM4Y
+IBOL2bcRsCTeX8obXLXmmSYgDKoBvSNfwz3MGJtuyPFqRiVlDTmDh3Pocna9uvu9
+14WA9wxvAgMBAAECggEAU0+DaiYG9VRAH2ZioDKPnRrsMt7Z3VoN8yrwbsX+6VSO
+3MEQq9jpXJsGL2xYaatOblkhMUhgKh0OdxkRNURyXqsDfSECazZ661kystuInHZ6
+SQNOWgio8ht4OxtilEX172WfHYzxh4TzGis5TKbag4Im0s2C1VtVhoN8Bo17GuVG
+R6vIolyn4KKIdZIvChCH0bdlOQkEq/RZ+j5Zz9+Ml5lMJ7roYOLq4Vxmc6wx7ZeS
+4Eofloy6PZHpyvhShjYQGvV4gs0utEh7jWcD1GUkjsSkcQSt2fIwS4kD+vjrMraW
+qeEpYGixYyB662+STihXDxHeZoVZ5HdF6Mi6a2V7cQKBgQDhnRIrZFQD4gnfBhRV
+drk9mLgHPOkLBs5SnFbG1+JAPjZUJpbnRtClKVk8+0AM0Ud/sYx19KYCaX0zvxIB
+Eh+D2qQYWr3ZG+ag5j2GMT9bknvXqafxBLZNg6jwxnVaaOrGrsxboYNinfc51sIT
+sB5ZqDFpUsn4iVUmpcm5GBnPCwKBgQDfiy3Iqh6+hcijM5MTxTkZOKMlXQQGv7Ss
+qS45tSBQJf+WtjXvHBOPxfHAsbzuB6HlxxKBs4gQ/dNp7wR55GEffa2c3XPKsyes
+TMdKgB2SiHLrLw38dhHtOFe5m8IT32CjqmhXe5xtyJSyFxFTsywM69y0zeWHVS3Z
++H5jkO+mrQKBgB4lc8kShdti97iyQkNNMuFVJ7nn3KfZh/Yn57x4GkZFSdMiuOU8
+ZCK9jKwGUn+j8y7P1ZnpT8lguRcR/+DewBFJRMXwUg/Rl5aGTVBCAlpFD4E4rTUa
+URW4cvmBOysSe9SChNH1me2yd9dlp9cjoFqQi9Gr+0rXZuZcHsE5xDETAoGBAMNt
+W3NINmR9dPAoUHZCPy6rcwVhEfoMcplXtg/BJySqc42Acho9w2Q3uqepOvAlQAYv
+SSCWoWepX8AGszUU0UvEhZjTiT017oMVBE0/P5sxKrYsht+lIPrv/NFJOBxDdqre
+eSWx7QmVB1nCDOXNh78sG+D896Kedt1N/sBwnvJpAoGAa782dZSkN6SvKnzCHtQR
+ItzQA3eFmVKJTvzMj9AuEdtuuiX4SQyWwRhMOtlhmjv98QVUvLYlT1/tqYdYcIAM
+zLigrlyCqfn0rgx80A9AWb8eAPiAUFuaaYZPifRHRUZOEohHU8ARK9ZmG3lDNK0W
+aossgb2d+UwdcJV8Ghl7M5s=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/rollover_ca.pem.digest.sha1 b/jstests/libs/rollover_ca.pem.digest.sha1
index c028ae06ec922..ddfe377bf1828 100644
--- a/jstests/libs/rollover_ca.pem.digest.sha1
+++ b/jstests/libs/rollover_ca.pem.digest.sha1
@@ -1 +1 @@
-BC0C54E29440F5B46456402B1D4E23F3C415897E
\ No newline at end of file
+A7BADDAD616FD9FF94CFEF687EE98597E90594D4
\ No newline at end of file
diff --git a/jstests/libs/rollover_ca.pem.digest.sha256 b/jstests/libs/rollover_ca.pem.digest.sha256
index 14c053cf5ae68..06299594bd294 100644
--- a/jstests/libs/rollover_ca.pem.digest.sha256
+++ b/jstests/libs/rollover_ca.pem.digest.sha256
@@ -1 +1 @@
-2ED258565E79ED591A290D07D46A4AFA0C4071FE0744A7931D9B19177ACD27F6
\ No newline at end of file
+5949BB54DB96B97538682C309D91EF372993B2D74203539DDDFF63D64FAF5668
\ No newline at end of file
diff --git a/jstests/libs/rollover_ca_merged.pem b/jstests/libs/rollover_ca_merged.pem
index d89b03d088c9a..95d80109cd4a3 100644
--- a/jstests/libs/rollover_ca_merged.pem
+++ b/jstests/libs/rollover_ca_merged.pem
@@ -5,47 +5,47 @@
# Certificate from rollover_ca.pem
-----BEGIN CERTIFICATE-----
-MIIDxzCCAq+gAwIBAgIEMEXJYDANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
+MIIDxzCCAq+gAwIBAgIEa8Os7zANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxETAPBgNVBAcMCE5ldyBZb3JrMRYwFAYDVQQK
DA1Nb25nb0RCLCBJbmMuMQ8wDQYDVQQLDAZLZXJuZWwxIDAeBgNVBAMMF0tlcm5l
-bCBSb2xsb3ZlciBUZXN0IENBMB4XDTIyMDEyNzIxNTk0N1oXDTI0MDQzMDIxNTk0
-N1owfjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhO
+bCBSb2xsb3ZlciBUZXN0IENBMB4XDTIzMDYwOTE0Mjg0NloXDTI1MDkxMDE0Mjg0
+NlowfjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhO
ZXcgWW9yazEWMBQGA1UECgwNTW9uZ29EQiwgSW5jLjEPMA0GA1UECwwGS2VybmVs
MSAwHgYDVQQDDBdLZXJuZWwgUm9sbG92ZXIgVGVzdCBDQTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBALvUQ/c9NMdWMIPv0B5lP0oKUiwAL2WvTZyzmRX9
-imZm2fYpjbYJD6jrpvYk3BP1wV7iPaVqMyy5VmsBjcdJoZQ8kbUE+LnqFSpPwZvs
-qlBfVPSPKwf4ZQYESmfrtpn1xOhOW3q8OsJHpdmFQFVOzPm87kSslVuwkyUhwLtd
-ToSwe339K4OUJV1ZVrtTQ8bMT/gEV2Gh+fScUpK47W01+he8p3lr0WDwmioyCAYS
-v1FHd6YSHB17/M1n5xsDWFillDTxMYK2Y/He7Tvfmy/d5+wOoUNWzQDIkdKlndQ2
-oy5YF0XENmPdGBY6mWaPd03/zKkhVfU0tY650OvLEhIdqmMCAwEAAaNNMEswDwYD
-VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFKMXkxW7L94J
-wfc3SWBZrL6W5EzOMAkGA1UdIwQCMAAwDQYJKoZIhvcNAQELBQADggEBAA9eK1CR
-Xijo6jowUggkJgwietTVAJfoKBJS+OKpBEbd6lQMg1BpbK3AZe/mY0L5c/MdwDut
-7UL9TWtbPiMesPZ5PEO54fvddRIm+gUYRXyiuY2p7vVOt0a4136pMCn24YoFgzQS
-vEP8sDHnL/kBVNB9Khwt1Jt8PaTlkuB2B9TqJNcJUwtaAFN8UekaC9MEk0KbRdhy
-d37h1rvR+FO/BtaWvF8UxGCKK6TkvPHq3hk1gJ6wL7ME+U1op7EzKnKD4yro4oq1
-54KqKP3wjcfuZSLPlgIfJjkKrQmG3gZ6f6aNaMN/IoVdUubjRKEfDEkhEEUZnwt3
-Xb6r5w91Rbljk24=
+AQEBBQADggEPADCCAQoCggEBAMUCe3YeyeHqlUUizlidQKOPGK4Oo3apEkhRLEMV
+pivRALuNNpA51gpGfLmUlpXJuRxHwBnOBYdDMmGT2oS8SbRqW9G1LKebtB+9eVvk
+1nN06bvOoWemnID+AgTfqrgMENMCvSueX2f6e10wHvZD8PJagl1DDwvTPqB6ZeMC
+Uxn3h9JFPWa2ZX28m68VUi4t4ZAdoErD8VMquct2TAoTb3sLQfqXOcbZma2ljQA9
+ajpEbJUi36V8Un8XyGHv9NyuQRuEFS2a8IJ4zhggE4vZtxGwJN5fyhtcteaZJiAM
+qgG9I1/DPcwYm27I8WpGJWUNOYOHc+hydr26+73XhYD3DG8CAwEAAaNNMEswDwYD
+VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFL1PcLhtM/ok
+9HRGBQCBtytyKDALMAkGA1UdIwQCMAAwDQYJKoZIhvcNAQELBQADggEBAKvJRLjf
+JPCqiu75rcyihAdpHvx6QEpEcGvHhpVgo+ZMkz0OhtRcTCpQ3gg/VmzcK6kAAZdX
+7qfzY4i1qdegqidcnp6ozVg+POumEnwGRc8WANeRV2SO+A9t0DNjYS1+2GT/dYcQ
+1TDqTblOPXYw0n9dCZkybtzCDfFQXNVB0gBwjSzQiSiubTAIkkv9OwFHGYI3D2WT
+QkucHZkoq2BguvdNmc917G7WKE6Hj0zxGg7k3VAnE0SBdJcG9bFLHlDE5D4YxncD
+48clvTK33sUNgtL1EbVQphtqrsj+13IO3OdyHC6IvV8yVR/nDJHc0/Mky5oHPKPG
+loO6XClH5ipv6YE=
-----END CERTIFICATE-----
# Certificate from ca.pem
-----BEGIN CERTIFICATE-----
-MIIDeTCCAmGgAwIBAgIEe9SskzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDeTCCAmGgAwIBAgIESt5aGjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQyWhcNMjQwNDMwMjE1OTQyWjB0MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM1WhcNMjUwOTEwMTQyODM1WjB0MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO
-S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDf
-vZIt82obTHnc3iHgUYSc+yVkCHyERF3kdcTTFszDbN9mVPL5ZkH9lIAC3A2rj24T
-pItMW1N+zOaLHU5tJB9VnCnKSFz5CHd/KEcLA3Ql2K70z7n1FvINnBmqAQdgPcPu
-Et2rFgGg3atR3T3bV7ZRlla0CcoAFl/YoDI16oHRXboxAtoAzaIwvS6HUrOYQPYq
-BLGt00Wws4bpILk3b04lDLEHmzDe6N3/v3FgBurPzR2tL97/sJGePE94I833hYG4
-vBdU0Kdt9FbTDEFOgrfRCisHyZY6Vw6rIiWBSLUBCjtm2vipgoD0H3DvyZLbMQRr
-qmctCX4KQtOZ8dV3JQkNAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
-hvcNAQELBQADggEBAJnz4lK9GiCWhCXIPzghYRRheYWL8nhkZ+3+oC1B3/mGEf71
-2VOdND6fMPdHinD8jONH75mOpa7TanriVYX3KbrQ4WABFNJMX9uz09F+0A2D5tyc
-iDkldnei+fiX4eSx80oCPgvaxdJWauiTsEi+fo2Do47PYkch9+BDXT9F/m3S3RRW
-cia7URBAV8Itq6jj2BHcpS/dEqZcmN9kGWujVagcCorc0wBKSmkO/PZIjISid+TO
-Db2g+AvqSBDU0lbdP7NXRSIxvZejDz4qMjcpSbhW9OS2BCYZcq5wgH2lwYkdPtmX
-JkhxWKwsW11WJWDcmaXcffO3a6lDizxyjnTedoU=
+S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCb
+k/WPIqqv46Nv9FeodWiPtx4/v3XZJwGxTk3JEje2CLjeVjU0q6OZoofP1wgSIZSh
+iO2o9iDC5O1Aedop0i+wqe9dMcn34O1K5aM4ff8c4orfBe0xqyvE3cJx4BeSTZ4n
+NY00x9PkCcoq98SoU7S9vkJq+AxUzUII34GQ4xCeaM7+g43PpGo5KFDwrzI/VUJX
+qaeRNXS0/j8Wwp7Gv8L1a+ZGlxrgpXTJLGamhtkWyVEWSpgcc5suA0qSwvkAE1KX
+5aJoBUDL22fLRhs91xNFDUYTAvkG8X4gM0f8lBL24+nbOBkOLdpqSZZ+dk59JKHD
+TFGBx0p17I1g0xjWNjMVAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
+hvcNAQELBQADggEBAIwWNyaQhZglJyKMIAUAwlvBL5REA99bua06xWfJwdmdlci9
+Bb6MgQzVk5K68rsNlcL0ma+Ri5FfU+j7gsYZh4pILYb9xqFxiKX7bxMZv99LR8Mi
+0EImM7gz3S579qYBXWd4V6/1G864qln8neHv+X3MF/wk3O9IYqepWsC3xDRos1Zv
+xQfb37Ol4pcHtue4wHXr5TV8+KPcUusfNcftnpsEHyEUHqPORdHB7xRpfhosRYvL
+7WwMXNseuyHFcdA/rEhUVsca+SUeOMIW+8euuU/as3ZaEpv1ZmpHEYXHb2SlS6W+
+gTzUOtNXsKVDrm9uEcUHytp+xvp9l9NNM/IRGGA=
-----END CERTIFICATE-----
diff --git a/jstests/libs/rollover_ca_merged.pem.digest.sha1 b/jstests/libs/rollover_ca_merged.pem.digest.sha1
index c028ae06ec922..ddfe377bf1828 100644
--- a/jstests/libs/rollover_ca_merged.pem.digest.sha1
+++ b/jstests/libs/rollover_ca_merged.pem.digest.sha1
@@ -1 +1 @@
-BC0C54E29440F5B46456402B1D4E23F3C415897E
\ No newline at end of file
+A7BADDAD616FD9FF94CFEF687EE98597E90594D4
\ No newline at end of file
diff --git a/jstests/libs/rollover_ca_merged.pem.digest.sha256 b/jstests/libs/rollover_ca_merged.pem.digest.sha256
index 14c053cf5ae68..06299594bd294 100644
--- a/jstests/libs/rollover_ca_merged.pem.digest.sha256
+++ b/jstests/libs/rollover_ca_merged.pem.digest.sha256
@@ -1 +1 @@
-2ED258565E79ED591A290D07D46A4AFA0C4071FE0744A7931D9B19177ACD27F6
\ No newline at end of file
+5949BB54DB96B97538682C309D91EF372993B2D74203539DDDFF63D64FAF5668
\ No newline at end of file
diff --git a/jstests/libs/rollover_server.pem b/jstests/libs/rollover_server.pem
index 306ae1b885239..f90adf960f3bd 100644
--- a/jstests/libs/rollover_server.pem
+++ b/jstests/libs/rollover_server.pem
@@ -3,52 +3,52 @@
#
# Server rollover certificate.
-----BEGIN CERTIFICATE-----
-MIIDsTCCApmgAwIBAgIEBBkPnzANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
+MIIDsTCCApmgAwIBAgIEDQu6BDANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxETAPBgNVBAcMCE5ldyBZb3JrMRYwFAYDVQQK
DA1Nb25nb0RCLCBJbmMuMQ8wDQYDVQQLDAZLZXJuZWwxIDAeBgNVBAMMF0tlcm5l
-bCBSb2xsb3ZlciBUZXN0IENBMB4XDTIyMDEyNzIxNTk0N1oXDTI0MDQzMDIxNTk0
-N1oweDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhO
+bCBSb2xsb3ZlciBUZXN0IENBMB4XDTIzMDYwOTE0Mjg0NloXDTI1MDkxMDE0Mjg0
+NloweDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhO
ZXcgWW9yazEhMB8GA1UECgwYTW9uZ29EQiwgSW5jLiAoUm9sbG92ZXIpMQ8wDQYD
VQQLDAZLZXJuZWwxDzANBgNVBAMMBnNlcnZlcjCCASIwDQYJKoZIhvcNAQEBBQAD
-ggEPADCCAQoCggEBAMXA2FrbYDKpEo2EKRNCsx1C4hvDloS5S+GEjOCSk2tqPeZA
-cAMrSMhLYH48Qoda4ojVJ+kbLsRABQyLQfIwc4+zcDO2P7cCVpHK2fsnjaPAMAxQ
-t8M7zHAeQxCRFMjNeU9+wymQ46ztCqvetEktmIjPZpfqIJo4DPeVlqwxj0kNm6w4
-hEtS+rFUDVgunnTMugbEBXcidkBpMGRiovC1U97YjKJlPtZCODZ3TrDSWpppMKXS
-4rIugM6VouUitqlJ85kKUaDYWzuoUPVYLwdf0lWUrlgrYruvYWVs42Yh9yB6GN8B
-3Re2xC1XEPLVR81VP21hIdcBdE+GtK02jxKgfvECAwEAAaM9MDswHQYDVR0lBBYw
+ggEPADCCAQoCggEBAO+78PkGqGcpB2yLFwqJCcD/+nqNa/G/ZywnCEAVaJSKfQsD
+inq4nHbh4FHReB7RxikuhXymWi8HpzATTcBw2t92ZnEjxzJ/CXqZUshcGkGicJXX
+vLoEuWRDL7pIgd2uju8Z1OKo8Ueb6rkIh0dBKtP8WkdIzXl5Cl8Tdyqu59vKlRIf
+pfi7lzQCZmFnhf+ClsRAgqlf7efIWzfFEKnjzZsWq5clo9QVt4s83tXKlL02XT5v
+TPvc6QooYI7KbepamHhniO1W5vZ4RL72s+28TAAEzu6jY1ndqwhP3Fxcsg0riEqF
+K2GkAPjReD7YHjNKYMdrcjtdA781i9RPuizcEkECAwEAAaM9MDswHQYDVR0lBBYw
FAYIKwYBBQUHAwEGCCsGAQUFBwMCMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA
-ATANBgkqhkiG9w0BAQsFAAOCAQEAsm3OxjXa3lO7rE1sjE6OoCAk9zad5gb5FUyZ
-yVEvg5GoOpna3ceOqWbHy/jYEI+ZRfzIEUENLXAfHe6IyPXCHuzhhxrxEikB5S5R
-yT+j+EZvBmFOexZ8dS26U2jMdB6R/NEcTFCWheKV2gVIGp/EFWppA93AcQbJuqQ3
-+HppdEjNpouRTbkVw4/SIu8B4NWqW7+3IWbq6OetzZu8M0a1m+iYKSN/zAbkVyHU
-Wsg0xbXeF75+91/cKieyDmfntaIS6ZjdYDymvQTZ/Lf4sOD3z/ubrXACNrqe5iYi
-SaZ2wyryWQtd8R7l53HBNFVn72d1XxIv4ikMJaGnxo6hmHC/sQ==
+ATANBgkqhkiG9w0BAQsFAAOCAQEAAP374HAhwhdwFwC2aes21gbx0zSD9UBLStYh
+rzh6nwJ0z5lIQv/Y1OVZypjlU8ksuTFNJ9VjgN06ebTPOsX/mD0OszvRdSX7UvtA
+YwR2s/0x/htd09/ET43XwxMVJ44HiBVrgzcm8focxNK6TYdmR0AzPczmLeIum5q/
+GJQZrSYpBQYa+6Hztjq4lhXhMOJKXq4FBPE43qOPDVhJf2DK5z7qscO8js6fMAre
+gwX4QquHDjUeaMHedg/D9NKlVu3zC6TJoPZlZMu889w9z2iMa9DZKpaW8NmSF0yD
+dFmkV0k9q7W79XmfctJCUtW+z3SmTO2i6YpFyeuBMPIg0pbhaw==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDFwNha22AyqRKN
-hCkTQrMdQuIbw5aEuUvhhIzgkpNraj3mQHADK0jIS2B+PEKHWuKI1SfpGy7EQAUM
-i0HyMHOPs3Aztj+3AlaRytn7J42jwDAMULfDO8xwHkMQkRTIzXlPfsMpkOOs7Qqr
-3rRJLZiIz2aX6iCaOAz3lZasMY9JDZusOIRLUvqxVA1YLp50zLoGxAV3InZAaTBk
-YqLwtVPe2IyiZT7WQjg2d06w0lqaaTCl0uKyLoDOlaLlIrapSfOZClGg2Fs7qFD1
-WC8HX9JVlK5YK2K7r2FlbONmIfcgehjfAd0XtsQtVxDy1UfNVT9tYSHXAXRPhrSt
-No8SoH7xAgMBAAECggEBAIzrhcIJPp4bWbs7CMJO77DUbqh2Upa2LNrCSFnoWeFb
-zkx8ctXvTGhfPp32dzpZi8ESlORKmKMFNBPiUNYzrMYkirpvDyxtIB+4vGl7oDWm
-yRadlrYrsN3c0tHFcVg/BEmf/ZdrRrN+H/KmOzdCpMYpdeSf1SfQ9XyhPsPqdT8K
-kKM+Yn94qqjdlZzPc5ylBllg09oT+NDfBEUTa2gsSLb3VP17bh0/twUPMBA864ds
-5aT0MkaZYxNu/LYsc3PNhfZnG1qlyJwT6oIUCju/X0qm6iAXy9d7WZ7HN4tZ2G7H
-pjk0sVJzlquzhfxRYD8g9Hhu4fzMEnC0SvVIThkNGSECgYEA6FaXTBeKm2tzCqCr
-2SaxBRcMgybWLTgUg/Fnm+aQVC6f7emMHb1K6nlZxabTpz0tcIJFoquThTKYvvUB
-e712wtd8ujmCDfXK/7SHWn8EUW0vbwA4yLRcKX+J64f5VhSnvVTd/yvgEpj0hzCZ
-8jdfg3pyK2LZ74WxiT9vxl1Sf4UCgYEA2eSSh8YApSpcM155MpXFTQlgBDuUIfot
-o33j/ccsJyB47hGXphOYVCBA+RMLiix//Xp34x6xeFJFfMeSIWiaGIRZ3mQpm/eN
-UPUL63Usk16cGkhbyEOa0s5o8mTjT3dTYIuJ0jIFBIfR8nfqjg3YmKxHlhm9uowe
-ArExXCekv30CgYBZiFzfdsb0I5D+jHIMyWs4AezRcZrhbBYDznhVzKDbv+fjf7d+
-El8XQlJE54fyj5G+JIV/LU047AmOtM2wiI+GgBHRla23gXuL1F7AkefxFPlNdjFr
-ro1BdKOKfyincmg9fsHZvmen4weAPUtl2s7U1M/ARmSjd8q1kBubvoS3HQKBgFKR
-1qFS/D2avtPMkjsEHH9j9RnFhg4WIyNYeoRZ7LZrDMiSrBgIRupiSpzYhb/3uwzQ
-UwwjPndtHd36NrsoS3TM+s1WwZnUBp5OLhUtExZJfPhMwVMzo0ENbSGl76nWeycT
-2cdooxQjcADlRmJMhu7cIkiUOpytqlW01hKpBzp1AoGAGwf8hPmGVliXI3QxkE+0
-x4vMRU4TxdYxwI/NUq6lg2VKR8MtjwPRIZzSgE+WXuwEujyW5e7Xik+MWI+d25Wn
-640nsi8SdrffjaRTMvJ/cngPt5O6h/2p954+5w5w1JLwxRM9xuMYx1aUiNnvlWqQ
-aZP3iz7pFJ2fTDM8T3y9++Y=
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDvu/D5BqhnKQds
+ixcKiQnA//p6jWvxv2csJwhAFWiUin0LA4p6uJx24eBR0Xge0cYpLoV8plovB6cw
+E03AcNrfdmZxI8cyfwl6mVLIXBpBonCV17y6BLlkQy+6SIHdro7vGdTiqPFHm+q5
+CIdHQSrT/FpHSM15eQpfE3cqrufbypUSH6X4u5c0AmZhZ4X/gpbEQIKpX+3nyFs3
+xRCp482bFquXJaPUFbeLPN7VypS9Nl0+b0z73OkKKGCOym3qWph4Z4jtVub2eES+
+9rPtvEwABM7uo2NZ3asIT9xcXLINK4hKhSthpAD40Xg+2B4zSmDHa3I7XQO/NYvU
+T7os3BJBAgMBAAECggEAH7NAaA2DUzDPtICn6VK5oXW3z2DH902R/PqkUqUCETJG
+2p/7PcMPnYC8wtTqB1lB1Mw8BoGQx0tBxk+nJl1F8+qaHTfX7UtzG20Oz0OujSNH
+s+i1Ifvh7+oac42G7qNYbzGBPv+L+CYb4Z/JE5kAasuhvG9aDEf/IRdX2KMMMbnU
+eC3At9WcrxqNvFreYaVJNgU/AUbY8n9lyYSWJw/GODkf1rAhPYU3DrXqSQtnJu8D
+HEo9BUFxvuz6FMeADYecNSJolKU6d1KM3cRtDsftgK2G2Hn+v99xubIoFsV/7M6R
+9H6pQaiKsSo/yatg4s0QQmQNZ5EHBySqTYvyCRG+aQKBgQD9J9kpVcsWYzNX0OFT
+15Zr8GUK0V4UNMdVgpjaPLQx9x9+wBpCmVFfhGEmP7aM2ZtbH/1a3CsvqMzouweZ
+NHfmVCybUmBbdcj36PXzVFS/iExDU3EAZizzAxQ1QscDHTnp1DaJXzp9o3VZS9vv
+ptTddaddPxaVxWCxncik5YKwrwKBgQDybX0cWIvQJBO+bFk2kmWsLWlK3gmznGy4
+KnA5oJbJEv+9ehw4WwMSQPc5PMGbOnNN4cmkn7XFbAeaCuV8tS++8LFlGJNutrbb
+Y+DiEBHuaCbscM+By90Z018qw96g3ejLGKUTjffyte4kYxZWqODzab5qi/LPz01G
+24jtT0XIDwKBgEvMrapBxQBcDZiCs6UuDR0eqrJ1hAzazMCezPOzb2TykJycGoDM
+dV/7PDd+pkNAONMtHeghulCX41rf/WNzIV923rBXFwDroJQSTepg2stKnUMfbdtn
+vJe62UclSn91Ncz8vKOfPt01n0Jwg1cbhesVelpiNHn90nj8PIKbMemDAoGAfEhB
+QYzrcHNuY8ssGVwIH7LQLf+Sva/N1MR88C4x3zeMQFkfqoyOEIeZtpA9ORVIE39T
+XsA58dImO4Smjb5dkefXKdrlinbFW8fifEJ8zto6SjCzUPlwilLgkQ4FTjc1pxkl
+V9cKbDV0ttbNlvPRDzkA06KXgo3mMhNOKUrgnWUCgYEAwI9FloXAj+S3efkeZ9jc
+viQdRtWRQGnmidVu8Rfp+8mBEQ/qu2s1jI66L5ax3Xp32QL4yDe979OOwp7pV9Y4
+M3cJ3lM/82bkfIQBP2MfXr9LpBQ9KaPInVWHU8fTdnAybQttJ5hZnUHiQ9Bw89Sr
+f7g5DtSK1lTNZXS/YD5Mbfg=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/rollover_server.pem.digest.sha1 b/jstests/libs/rollover_server.pem.digest.sha1
index b3c5b556abac7..3baf539de1996 100644
--- a/jstests/libs/rollover_server.pem.digest.sha1
+++ b/jstests/libs/rollover_server.pem.digest.sha1
@@ -1 +1 @@
-5C5314F8D08C97FDA9DA5B112F5FA30B37319197
\ No newline at end of file
+C7F5ED18F1B7BB7461F523786EA3DC86CE760CFC
\ No newline at end of file
diff --git a/jstests/libs/rollover_server.pem.digest.sha256 b/jstests/libs/rollover_server.pem.digest.sha256
index 12ff9605c6cd2..b658c87a79aee 100644
--- a/jstests/libs/rollover_server.pem.digest.sha256
+++ b/jstests/libs/rollover_server.pem.digest.sha256
@@ -1 +1 @@
-C6F4922E7BCEFAF6ECE24C01D812B7CC77E8C4599717DBE916A259FBBFD46779
\ No newline at end of file
+77F7D478FF7EEEB6EE4F15A0410C24B83ED728D4C3F7654C9FBD865B1EAC6484
\ No newline at end of file
diff --git a/jstests/libs/rs0.pem b/jstests/libs/rs0.pem
index 14f0e2a9d1cd8..1a03d984df691 100644
--- a/jstests/libs/rs0.pem
+++ b/jstests/libs/rs0.pem
@@ -3,55 +3,55 @@
#
# General purpose server certificate file.
-----BEGIN CERTIFICATE-----
-MIIERDCCAyygAwIBAgIELAAMRzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIERDCCAyygAwIBAgIEOEYgezANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjBYMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQyWhcNMjUwOTEwMTQyODQyWjBYMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDDAKBgNVBAsMA3JzMDCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAMdo6tEwLb5g57mc+aj8i8sZEhePJ1LScFicbZ1v
-yxsmcQ1T3m0Vcy/MUKCja8vQ5aDYavNZmJ6rsHmmMjF0IGG4UgDmJE40kMXfLRCJ
-DJ9kZyToq+CVQKHHNFbCCF4rYj7crz0t9qn8G3+1L901O2JkXhUdfIQ1XJiB2kfD
-mbUBjl2kxhobX0IH4QKEFoFvVcT0xGBA4OFMRk2fS9M+/6ya0z5wcjeqSw9juCWr
-H6HtcxRCHVrAEBJk7lbOKHGpQDPUdm0zL5/ayj52Y+ky7H1+pergjgx/ilXOmQYp
-iOHI6fUuRUR194J/t8TG/PE6ZQqN2YxeLY4H5ffFha3v6ncCAwEAAaOB+TCB9jAJ
+AQEBBQADggEPADCCAQoCggEBAKzHxnz3XQ18kldMMXkmB1asTjrvUwfjRTjd2SPF
+BF4kYv04iNW2RXZv12Z8HLFvkMBPICeli0k7MGPParqHq9xY8iEMWE2Kg2/UD6Rk
+GRfm9DKLg0XsTY+tDGx2lV96bxHoAdJyAW4+Gw+aLkoaU/g9KWS1T4uXYcXav42r
+rYP4X5IQoDidUJrYEanjOVEq9Yb2Yuhx2ms+/UgMJh2NeufQjzeSaLKuWMvg+ifW
+CSQqXrVzBLpMxl5l7Xzv0RQSm4W110EagDT+qyyR7dQAPj927+0BxCVWVB/eWrgf
++T9/ktYQwkfElux1ceQvTyPo0drwbB3+s7vDIYkyW81V6J0CAwEAAaOB+TCB9jAJ
BgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNV
-HQ4EFgQULj+elw+YJr0JKdiRLAtdeH40E4AwgYsGA1UdIwSBgzCBgKF4pHYwdDEL
+HQ4EFgQUL8FPqdWiheUf3BW+3276HVxXwgAwgYsGA1UdIwSBgzCBgKF4pHYwdDEL
MAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9y
ayBDaXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNV
-BAMMDktlcm5lbCBUZXN0IENBggR71KyTMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcE
-fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAnRwckjgQFPpAehcBqZVJOBQspjfQKDWF
-9iipxovXSQ5yPfqKu9Apywy+GryFhg881UFEcPgdVVxMEZ5SkUQNNhjv7+QAPz9D
-Jc0QqIUdZkgqAvJShlJ8y+7C27p7pUqbsflyx0Zd1NnllNW4xclhZ8pG4oqmhX5H
-fJxUtz2LEk3pttC8c56f/t3CaVSVFDAWDFfPXAj2ZOg0cKpjn6A6rxA7h5gAXFlB
-fjEjBVpyeMzgfS3OvHsSCZCe2mZK6vdRHFnniZMDPog1h7Z5tDWPAZH1UF9pGhVg
-Tbyg/Xbp159SjmleaLMsL39E0Nqj18kzMYHT5oxlq1LPyrzmUcVSgg==
+BAMMDktlcm5lbCBUZXN0IENBggRK3loaMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcE
+fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAXnw4jioplYNcJh1AAza9BqCyAkPd4zC9
+K1yaJZPeh5VJ2ytvTYsC2JXohVWvfqO2Rd7I4x95KB5I1+bbTNqpvvN5/9/BDo1h
+jL83THTcPCifyMm7i8O+I+jPcAGeKoRkprxZwQrhBbqQgKAcayCVe6m1WF0t5nt3
+e8I7pKLYQlW33N3FFDSpo5C0e1XEZt5pXBNlCafowqCHEKp/u6NH5sDFXaiSIWN/
+Y8xRiV9EVTfQcBhC+ssg3GbtvEWGLZY8uzpTh2KFv4smj3e7rel9blh7MRlie0/r
+DkUxt1eYchLmQPBJa9xNLxNXCSccgI5Emx5ZGf93I22fEo0u6mhU+Q==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDHaOrRMC2+YOe5
-nPmo/IvLGRIXjydS0nBYnG2db8sbJnENU95tFXMvzFCgo2vL0OWg2GrzWZieq7B5
-pjIxdCBhuFIA5iRONJDF3y0QiQyfZGck6KvglUChxzRWwgheK2I+3K89Lfap/Bt/
-tS/dNTtiZF4VHXyENVyYgdpHw5m1AY5dpMYaG19CB+EChBaBb1XE9MRgQODhTEZN
-n0vTPv+smtM+cHI3qksPY7glqx+h7XMUQh1awBASZO5WzihxqUAz1HZtMy+f2so+
-dmPpMux9fqXq4I4Mf4pVzpkGKYjhyOn1LkVEdfeCf7fExvzxOmUKjdmMXi2OB+X3
-xYWt7+p3AgMBAAECggEAevMdlU13ZuKo/bDj3mWYa+lRVmVVgaNu0bPhgijjuiHB
-os4LXobTNq1rKSCZ3lk2vC20A8dCatLBRMBMQmGvdZEIPrWnvPB8MwSf7IoT1lM5
-pXAwSZC9AQKqKrQIJMfrE4dOJRKp+7UN33f/wwyHSOtJdwtvMrUUGpQ12fmoVAlC
-93LlK0g7ZrnIXsuZoMnJqNKra3nizTAeKzG0AvrKU9tR21SPkQZJvGRKDqIbYfGW
-5zJorVLm+yb8iS7XYS8uD/MKeUIo0uMFgm6YDGIBldyh/kTAEqsC/03RFAt3u5bZ
-4Veo1HMU0rb7XEe3rgjhZX0SS7NfaTLH4yQjRXNPgQKBgQDirpy39v4SrPVEZkVM
-Js78pSN7Mz0D8Z3NhBJDj+3LZIMeYA75LVrfrEdy/aNGrmwIBBOmKeIAZ3wt7K04
-Rl6ic3Ng+NADEoiDZnI0QLvrOfDtqsfQ5UggU1AvXbJgubmaqhkm1Fk3CKaSXqCM
-Wy3cGEr923VfMwyP4F0KEFlGWQKBgQDhM1PmAfv1QxXcd6CBMMqNNbxgp2GJQ7IM
-jtxbM4/3hW8ZOskz34weycGOrc63BMHHwesoyTKssitz1NiCL8+28kuHNfech5Hh
-pVumPM0T3Yo5OGaZOPyV/Onbu9knZvdnQ+AxKiUbQ2P6pw49KDccv5Z8jdn8ppZE
-d4dB7aA9TwKBgGve8h8tP3z9p33UOTfi4+8gWsEfAfMlgJWzOsnB7UQz4Z2L0rRL
-HAqCvkF/Z7qT+D7cSclx9uWAWXy1Oz0jZ5dg31APN1Wu9R2qm8A36fUnTnqbIZlL
-8sXHP8o6iU80MpniRjPPK2FgEXxn4XjJTwqE2PLzA2i5L0osV7oZS5KJAoGBAKlB
-UoVEaIHlccDJiDCQ4ytOf86E/qGT53XYyXUiQ8nc16s2q5H5Ke7Z5EfACeU0BhCE
-hqGY1iVFo7Li/faayJrPcESnIvraSaI2DbPbbqbHkuN+qF3wnqZ796fWf1dF5BwQ
-v6cz7b+X3sS8Wc9NMl7A//GNC5EAA2BiFZ5PYmexAoGASc45IMiE91ohTVKghuhP
-aPDxqMe+pW8v+RjMt8S8nACjVa86ArwfP7ay1o0pbGOWztxrarOrDvFTdB5np68W
-WcYZIusKepo/tacAYDv5/EoMobHAHp+/KlLeP6zCqBLK7LSp+g7w3LH7KRCPcfca
-hVBUJrdPssNoOVsesKTB6RU=
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCsx8Z8910NfJJX
+TDF5JgdWrE4671MH40U43dkjxQReJGL9OIjVtkV2b9dmfByxb5DATyAnpYtJOzBj
+z2q6h6vcWPIhDFhNioNv1A+kZBkX5vQyi4NF7E2PrQxsdpVfem8R6AHScgFuPhsP
+mi5KGlP4PSlktU+Ll2HF2r+Nq62D+F+SEKA4nVCa2BGp4zlRKvWG9mLocdprPv1I
+DCYdjXrn0I83kmiyrljL4Pon1gkkKl61cwS6TMZeZe1879EUEpuFtddBGoA0/qss
+ke3UAD4/du/tAcQlVlQf3lq4H/k/f5LWEMJHxJbsdXHkL08j6NHa8Gwd/rO7wyGJ
+MlvNVeidAgMBAAECggEAeMXEFsIJx02WNXNK+bX9VzZmEIgNb+wLDO71wy0KGoww
+rXQBIfGgqGMRG9ARdm3XrrUgctYigHPhJUNtBSmklgeOaE9qwfjaWybOMEjPyYdk
+lpgykIPWy2FY19AkJXM9hDS6YVHTci3zWHqbdKzmmRSXhI/AJIESlOyNDQg954pl
+pLrI58NkJpRdmcmVIrHJeQWTmOq+VrIyhqBpgCTnONZDtkA1+i6acgPQh+O0kJ5O
+A+KTNbQJPO1tmpiVuQJjNPUlfgBOt6vmgPLjkHQ19/E0IN8auAaGv+fMoQ4XqlYX
+nuYvJoEOfhXJYHRWta9EFAJZyvs752LHlbFz5VKweQKBgQDVsGwPXJnBImtEPBJ4
+DUyxQX8fB4eUHaQU8d1FdKzaXmoVq6oxZSXQMUj1f+iRfTJzT1MtNe/q/+SS1d7I
+eVuPK3yPGPflQFkuDuSTgWJB7UgX2+i8kKTlDadfJJNPTMMIZzFJrbGhhscVl7rx
+IRvunR92gdlaUqeWxbUxzzvPawKBgQDO/b/lCdvRIhJcQ9oVDfVVkXX80KPdv8sd
+2HmDojx9uRrZ0aCM5vk/AKVZTEoS+5fApbojzpRdpj6A/9dZcmw7G5lrotU/f2aE
+z9XmDmV8TawZifRiQu3n90xnv/jwUEwSJR6hOZXXmBBK/Bit2GKiEEfHaNym1q3c
+U+sWkxJSFwKBgCwFbS14+R/FdG2ZJoDe1IbLpGGDDpYfyRabgOb3E0jlHFucgrIs
+US7jiFEy6XlXXlZM6CivLN6vmqn5Ly0Dey4yWjWsgh0TNYv8e3A7vj8wn6Jypi56
+ac3aEznRchtrB96qS4gPJUHOXyL+n/9ev79XVQz30Qv/bRDtZ9d8BqlDAoGAbhHd
+k0wUyjcWEF48f+m2RlRdq5y/JtIwjqRoqakCBdEDCEVC3OqOLASJ6Nx6n3GOlvEJ
+9LSLjOk0X6CswXHpP91DTkt+no9+0q06j2Wkbd9X3xTPEdmJbUrCJIGfPRtV+Ggo
+y481sTm5oEZCUV+5w3ho0w9eFpIeTgWKA60dlu0CgYEAxkpYMpOtQ9GN6O73qcB2
+0oTiomEEq7e2nbFtdkVj/jvRJ1oDsbAn60noVCKzNLX0f5/erSIr9Vr/SGs1W9Hg
+vUjCdkOD6RJSN3Y9irVuhdbOkKFQa4HYKrJBQqEelgCVefeCQ+uyxqbUZiVrA8Fv
+vBuGtW9UCYEN7jt5kO8Qmgk=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/rs0.pem.digest.sha1 b/jstests/libs/rs0.pem.digest.sha1
index aee2a51fa6c5e..d9a15ff150455 100644
--- a/jstests/libs/rs0.pem.digest.sha1
+++ b/jstests/libs/rs0.pem.digest.sha1
@@ -1 +1 @@
-C89AE2171C6A7E7985285ECF05755953288B45E3
\ No newline at end of file
+317BCCD4BE507AC43E46AF6FA1A7B73EF3F382BA
\ No newline at end of file
diff --git a/jstests/libs/rs0.pem.digest.sha256 b/jstests/libs/rs0.pem.digest.sha256
index c7bcbd608099f..2f545c2faffd8 100644
--- a/jstests/libs/rs0.pem.digest.sha256
+++ b/jstests/libs/rs0.pem.digest.sha256
@@ -1 +1 @@
-BBAED3E7D623E89E015831F02FEE9B3D9BB55630B2D1F4B0463ABF96F1C36F34
\ No newline at end of file
+D63654D8DFD75A5E8C237F55AE7B2B0F825778B7C719DD5C7BB850E8C1E8CF64
\ No newline at end of file
diff --git a/jstests/libs/rs1.pem b/jstests/libs/rs1.pem
index 1ce5ad8af7dce..540c5166a8d91 100644
--- a/jstests/libs/rs1.pem
+++ b/jstests/libs/rs1.pem
@@ -3,55 +3,55 @@
#
# General purpose server certificate file.
-----BEGIN CERTIFICATE-----
-MIIERDCCAyygAwIBAgIEWAd1pjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIERDCCAyygAwIBAgIEeJ4FSTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjBYMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQzWhcNMjUwOTEwMTQyODQzWjBYMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDDAKBgNVBAsMA3JzMTCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBAMUg2WiApnL3z6XBZ5l5WJJkeNfOE3FMJDYTnQNw
-OV6zWjsAG1Wuzi9QiH8HdyIDIk61l1qcAzfdecI02pTjm8VgLl3lFlpyXzcpazXO
-bTxXqenCB4vSLIWNIMbD/qIzAxsMXjI+2xHg7TnGS5zNM2eRPt8RiNv+TfA99bKS
-3TnQPCEO0lKafKR9EPmdUSMgEwHLEX64P+wabf8TYopS7eUy9nI1QvkZ9tphbGrS
-AIrKQH8pU5OzObkxeCuMNjai/4JQ6XlEOvUMl0LOTLB4pfBc04PIokCdUmFRhIeM
-fzJ4CKMLrtV083EOADcN1HlRf65XJED3WjjbjTYupSj35f0CAwEAAaOB+TCB9jAJ
+AQEBBQADggEPADCCAQoCggEBAM3Zy8Ex0Rj99cwRtygAmNRlkbH5sKt1v0MFkkWb
+0ni4FmoU+13DheUhhQGn7SnD00oh0sjACEBrMe/2lPhsFJoph5HuDAwLBcgoY0IV
+46fOKN3sZcKpV6pkq8MqGEwD/pADsrfm/A/LyBQgIyuNOHK40ND0E6iQ8310TsES
+Tx0H8MivJ17YRLONWKH7yAPnEMVN7zq0pfiAkrvlzvMIjJcYgh5q+DR5Drf+kz3h
+4K1RG6aUEswXBEF5CywGGxjbbMfPcVE4ztYgke6V4uCdgjRp4/YsdS27h/t6vXYi
+l5PyARJyevqv/g+5OErQi/J9szBq2ZKjA1j5KJfw6l3ex98CAwEAAaOB+TCB9jAJ
BgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNV
-HQ4EFgQUcWth71HW7KFa9lXxbGBmPDwt0BwwgYsGA1UdIwSBgzCBgKF4pHYwdDEL
+HQ4EFgQUc0AkmxrNl4bhLNcLVlIeBl2aBwswgYsGA1UdIwSBgzCBgKF4pHYwdDEL
MAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9y
ayBDaXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNV
-BAMMDktlcm5lbCBUZXN0IENBggR71KyTMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcE
-fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAQxxTMUk5Lj0scrdkG5kIJcOmR8EYJjF0
-OZ7/K0Mt5J1naYvB9WIaoDkR9QVhPqTaFMBOOImFHzU5ElVCSmH0QNtvPEGKxIpY
-+M9pPfPGOt4Rk+Jq3ha4fXrJmMXAxJf++zR8n8Lfb/aa7gULk4lw6StlEfT5nLpx
-VPS+F/VyPuSQfJcum38hwFl5Itm7jRTWqtwi4UXgcq+XnyIiJnr9k72wdzDD40Cm
-qTHKbR9zuG7O7N+VqceJgZyYwgHiuxflPY+FPT5uD1hPvsUjNIYBtmFEwUifGPF3
-/Oub8jzFSIzG/W85UAKCSW7Nd+e3n1/RObB75kpUZgJMNzaGt0JXcw==
+BAMMDktlcm5lbCBUZXN0IENBggRK3loaMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcE
+fwAAATANBgkqhkiG9w0BAQsFAAOCAQEARrzy+HQuvJMHO3klypBOKLVvgPzP70tv
+uDOHZqKVJG9KUPhl1Bl+nxnqt8RLp0VXSilT505yfjpAQAfROVERY8M6xXsDPwnC
+grj5Er1ehWaqHAn3nJLZd/k9QpeEq5A5fVJpmwKYK4Knhw8Sutpez+aMC5PWXytO
+v3pfZ7sl7sGl1Nf568PKBdcGOeLm6TZ12s6MoCnUNErao0P72DDoEh1gTHuM2P1+
+Ctk85L38z2H+XWNYIUmoy3m7HEYy4VnKkLVqE5u6FHphF9rhYZV8AECPUnM7mSf2
+BncdCoa8jU8ZkZNkAtUngl7bBvMiVPuG7qbWrTvXns8HFTDIba1Eiw==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDFINlogKZy98+l
-wWeZeViSZHjXzhNxTCQ2E50DcDles1o7ABtVrs4vUIh/B3ciAyJOtZdanAM33XnC
-NNqU45vFYC5d5RZacl83KWs1zm08V6npwgeL0iyFjSDGw/6iMwMbDF4yPtsR4O05
-xkuczTNnkT7fEYjb/k3wPfWykt050DwhDtJSmnykfRD5nVEjIBMByxF+uD/sGm3/
-E2KKUu3lMvZyNUL5GfbaYWxq0gCKykB/KVOTszm5MXgrjDY2ov+CUOl5RDr1DJdC
-zkyweKXwXNODyKJAnVJhUYSHjH8yeAijC67VdPNxDgA3DdR5UX+uVyRA91o42402
-LqUo9+X9AgMBAAECggEBAKJKeBLcD60JLg/12Vf2GiBzzTVGOKWHHSzUGulQxDqe
-CAQZA2gYQTGc6LNELyV2VRFd2RzE4rVkhjCIGQiJFp55n9K3nx6ZmKAuXlBvAWmz
-dnS3xEStpO/Sj5B0nbdlrgHfvdE6BFuExWlSQr/2BIMYhh1aL0WG6R2HtTbuSGml
-/QDjPN1x8mDunzt01QZDhw8XyyZhMSPfVNrgeS20OAU8tk1somxJi0TlFfe8sje1
-v4npn2ACrr5efbjP+k12w1edny+gFHZT4zSt1ocSj2SAzwSfadPgHDdp5kn42xau
-nc5/+ZcusBGVTsBKJGDxGrzE9to4ga2zaC1nVqiUrVUCgYEA+7XHlY7eAaabc23r
-TYwpLafaCFSyxPTDERz6LbuPeEZGE1xDNj6uN2wKLMJm3Btb5akbTHuKixM8GH6+
-Jg2b04aKOLZOPmOWTmezxHmDmC32t9zHg9cGglKeLwjj/WUN39h0cepBEni03CCU
-LdeFxR2qx0EmrYH9lz5h27Cm/kcCgYEAyHztaXdDHKry49tgoXgODGlVg1bWkmK8
-NTY5nyxjgQoDrZHVo6Np/VZ0T4GJj48e/ThebNnQN/MzdScerf/7OcA05+rfOwon
-6y702BBF+Okvj38s5CamuKPOhvfibhN+xwf4HCax70TzE4VvZPqx4sz9QKwtv0A4
-hfwNsbHYB5sCgYEAt6zjUihpqky4XNfV/8WKeu1kNfYQaZauDXvWt66JN9wX4KLP
-zPKaUIj/N8A4LN+uBH2NFReFdoz3qmw6NyxxDD69+DpYCEDFertDu9hsBY1s3qg1
-0ugCsPC2y12yeMLYCAM8na+yAlegqoz/dCA0Vi2a5EGPhsc3lLkJ0bUrdpsCgYBA
-lC0NCcNuWoo+Zw2rNTahPNS0p6YaJP+mzD6nTO8IQ6eHozADs8GSPuTcL0eWHG3I
-9v1DZq2xN/9dPyqctZXAtm7UPU2GgPD2yntHlYZUdRhCyp41J1YQp4MA6pvfBRyT
-E7gRqU0rWzRJF3QYPMBL4+e3iz55GkCat+PZVwXU/QKBgAPJIHNaKW1njaPwtseG
-2UTzpkctIMg3FWK1ArNiGZ4ZzJ/iFojZE5Ux6ll8DuSbbECxNPkVF+Sw2ljUXy1x
-UElqmJCPvPf/sNiX86VrxxxRBvXleaeIOqnWSiNwCyLT2Q3cEsbxOYQXwnhhuEOp
-1VKpb0ervdGaUFzMQ3SqyWWM
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDN2cvBMdEY/fXM
+EbcoAJjUZZGx+bCrdb9DBZJFm9J4uBZqFPtdw4XlIYUBp+0pw9NKIdLIwAhAazHv
+9pT4bBSaKYeR7gwMCwXIKGNCFeOnzijd7GXCqVeqZKvDKhhMA/6QA7K35vwPy8gU
+ICMrjThyuNDQ9BOokPN9dE7BEk8dB/DIryde2ESzjVih+8gD5xDFTe86tKX4gJK7
+5c7zCIyXGIIeavg0eQ63/pM94eCtURumlBLMFwRBeQssBhsY22zHz3FROM7WIJHu
+leLgnYI0aeP2LHUtu4f7er12IpeT8gEScnr6r/4PuThK0IvyfbMwatmSowNY+SiX
+8Opd3sffAgMBAAECggEAZ1naOv1QBkQ7jGG4m5TxJnJ4W+sJEIzlojjB38EEQyfp
+2Qj/y2vHZesWZGZzZGy1QJsKwU/o1K79O4WJ9dAN5/jB4DqsQb+m/3+Hlj53NmED
+k6iEzt9G7H3u3uXbnQ7EWHoC2OJX23Zs1e3suokEtlSkQsTIufFMzsf+YmFW5zOW
+Sp3aV/RmI6I0HaWKaXdeKyvOJleG7xujWRhb+v2w0B808vinziAQ7EWx9724Yyad
+Q217Tk4McQt6SYDE89GZQAGPU5bxE+KxtRWGjkZjZjzpUT1Ca2FYm1bmn9w7fmmi
+riJwYg3hgKWqrZQsAiyFaQSnfAHBL5SknKDYOudAwQKBgQD2zMTAnvrEza2czz8m
+ftP1JYYT3LwyWdkvCgvTLVssO2afLCQVGN9lD9i+61BYcyuWLFxHjYFoyZHrtuzR
+3N8Kn4K8G0tYbp/lcWEydDYGn2c5x5ZFkyHpVwaokGxhJM6Ryf7vBELvCFnjoWzs
+vQqo2M/cUmeeAWSfwkQiKmvwKwKBgQDVhj8izHKlPiFGr37DI1djzgLKHAMtq7Rg
++0SQYRTqvQpqvaObCpv5Gw6AoJ85bCJn9X9RyBDTDqcczfTDIUujz1WdFUOhhArO
+iXkwXN4aMznwV71OuEKTJ0oNn+qoAS4B3StLsbmGqzLtMNYCf0ONoQjXhv28eFYS
+0IdWRQE5HQKBgQCxZlc3Lg+LT1ywDriR7zBkUmih9lTAzJy0l0AQt9WFYd1OJOh6
+0boiZtWhxnumWILIG3Lpa+nNbLaa/I4V+/HHLQlINUZfdoUw4+K95ZLAB+ynN7Po
+pu4FUCzVFZsR6OcjHFNHc4S5VQNoACzPrsLuw4oGEKWyicXOoFFhTaTwqQKBgBAi
+TLL8b808nJcSu6lTCk9oSZ2r8DGLP930HoIqU8JI9niKS8pzSsXP3ZCqkl1sZk0k
+p7IuQ9cuzEHI7i2wXl7KU9XCvZF+wLJKgPW+jqm8JSyb9Jn2IlGsLlRJORnovOk3
+JQZbL4GVfgueHQ2jQB97g4eFk7aiAR760r8aB7WhAoGBAOyLXWJQ4nA2+8+Ha41B
+wEqM1GZFYRXQugOdGiLk+WG6Uq+tlwWzHInCgKsKuBD6VaBamyVDM8jusFBst9a8
+moBWXcGlT4wDWrXAie9vDer/DVfpW7MKEOryeqF5MaIeC5we+uDjQyJ/8N2EJZjf
+hDkZNGMnn1B5rqbbcW9wOilF
-----END PRIVATE KEY-----
diff --git a/jstests/libs/rs1.pem.digest.sha1 b/jstests/libs/rs1.pem.digest.sha1
index f92d4597b5593..53d6877006a4b 100644
--- a/jstests/libs/rs1.pem.digest.sha1
+++ b/jstests/libs/rs1.pem.digest.sha1
@@ -1 +1 @@
-30A30E41279453CAB7BB93E267138AD4BCAB4F03
\ No newline at end of file
+26ED3E3468A6AFAEE9EAE9EC1A19FF67621CDA81
\ No newline at end of file
diff --git a/jstests/libs/rs1.pem.digest.sha256 b/jstests/libs/rs1.pem.digest.sha256
index c2cd2b7a388ba..11ab08011442c 100644
--- a/jstests/libs/rs1.pem.digest.sha256
+++ b/jstests/libs/rs1.pem.digest.sha256
@@ -1 +1 @@
-2BDA71253E3ADEC8A74B7B1B8F4EFDBDFE4D89B7DBD23C64AFFAB318F28D99C0
\ No newline at end of file
+51B1A69A8C044A301A521DF56BF8AFEB7A1804990BFF93D6BBAF498925CFC5C3
\ No newline at end of file
diff --git a/jstests/libs/rs2.pem b/jstests/libs/rs2.pem
index fd0ab632cd87c..237f0f26a83c1 100644
--- a/jstests/libs/rs2.pem
+++ b/jstests/libs/rs2.pem
@@ -3,55 +3,55 @@
#
# General purpose server certificate file.
-----BEGIN CERTIFICATE-----
-MIIERDCCAyygAwIBAgIEV+C4qTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIERDCCAyygAwIBAgIEEPb6fjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjBYMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQzWhcNMjUwOTEwMTQyODQzWjBYMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDDAKBgNVBAsMA3JzMjCCASIwDQYJKoZIhvcN
-AQEBBQADggEPADCCAQoCggEBALFJkRAZB3R8AWw34dm2bLSpnlybSF6ayRmL+8kn
-I80BFFsH5sB8oPuhtqOslg0sJzemBtZVEv5o1zvi0pJ1KaifhPzRnwFLdpiWMmik
-0Wjz3HSh4gepKF2it5iojkwkXrKKL291RXfybO3xKVMO+AiDg/hYtckAdJqZdjNJ
-3tfgo3n4EKsgYc+EF5ycmERhGdvBhsuFMkakaT4KQtldmTxx/TD4NbH2pmp3G4yp
-70KEEAWOt5TowE/VD1WEJJwA0BxYTckGgBLCXbKUSk8gnPXkM53WSMlTc5G/GMc2
-GMjFoxudfVHNiobLh9y/X/PvlpRfQlvCrHB0nnnaztAz2UkCAwEAAaOB+TCB9jAJ
+AQEBBQADggEPADCCAQoCggEBAPghGUGLksW8CIMMEGg5NBJjQk7BhTBD4MG5wFMj
+k7q+eS0F6ICSn18BoquSzDrHQ3Ts+Y0hQ1B2lexKI9TkpMZgi6Kxcn0BlSG+z4bI
+9AN06fykYmkPwZz84tX6lnB+UDkRXYZAbdDBO3s6qjhw61Az+l6Cbe0y80DIrXWE
+9XDqxsN/M02stwrgECr6oJaED+R3vpDlDPA93RpvyX3zSL6n/SHwRVnlqejS7W85
+jfF6fX7iM9qw29oS2czoO2gHhh4UPkQNNqYhgoKuYIAyrIqw4ARIfQGSKEYNSXzB
+Exo21AMkwaMgJXfjAi8ErIW6Vpz/0Yb+s9wdMLU6nZGHQ20CAwEAAaOB+TCB9jAJ
BgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNV
-HQ4EFgQU/aaFMGpvVWcpjxvMoih9yt5vOJMwgYsGA1UdIwSBgzCBgKF4pHYwdDEL
+HQ4EFgQUiOwt8xofVJDvI9pbVvESreNCVYgwgYsGA1UdIwSBgzCBgKF4pHYwdDEL
MAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9y
ayBDaXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNV
-BAMMDktlcm5lbCBUZXN0IENBggR71KyTMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcE
-fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAvZKEFmva83RGbjwJy+RwmOnW7UjxUPIV
-ztp595BznO/m07XzqeB/eR11YGF/O6UjW1tH6pIs/tBB/Pm7GUa8xzdxf49b+I2d
-moEOTNrplot4ssQdqwoX1TaGxcGvK2iIgK6fn6nZJh1nEeafISfMBc/AnR9wNZm2
-Aw4xiXuZp5kjBc4N+wPzgZKck6Gq5jSsFb6i4QYNzAfcFsED5BI3PIoqEUcIFOwI
-y2Aiei/35hCT4ZeBa3q8EF0xEEaWHF++aj2+3LdfcGoYvCLBYKuVwW9H2iDNVqQE
-F3kcB+40hMm/OYmVE1MPNG8BCjBBC8n9Q1SI1JDAIvVc79vcajVKtg==
+BAMMDktlcm5lbCBUZXN0IENBggRK3loaMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcE
+fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAaEOmcrRqKBSf8cn76sGYl8bhuhGa5AAM
+84f7UkR4IQkZi8LylXpuiByd8Bb5uzfkfesE9kNYlx/aI8+wYg/di5wbkjSpsYyA
+6hWBXGX759S9BcosnbAvuExfhmaZAd3j1V0j7K4Mjjs7vEdeZQaHpJ6WoXGnPmqB
+cWqlLNYEyxwFG4+BbIlmSsZ+xNF5f0eA9P3rdj0CZNHVVRbYUzkqn4FcayzOhENa
+UVvDpssD6NLCwy/cMAn1ePc5AsRpvInR9SMl81IX35QT8rb9YbOGLCCyCbUQL5W1
+xDMhFv2sNhGZQRrBVB+ftMQ51nN60p6JBu9OBFmIZPuI1FvkDYjvvQ==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCxSZEQGQd0fAFs
-N+HZtmy0qZ5cm0hemskZi/vJJyPNARRbB+bAfKD7obajrJYNLCc3pgbWVRL+aNc7
-4tKSdSmon4T80Z8BS3aYljJopNFo89x0oeIHqShdoreYqI5MJF6yii9vdUV38mzt
-8SlTDvgIg4P4WLXJAHSamXYzSd7X4KN5+BCrIGHPhBecnJhEYRnbwYbLhTJGpGk+
-CkLZXZk8cf0w+DWx9qZqdxuMqe9ChBAFjreU6MBP1Q9VhCScANAcWE3JBoASwl2y
-lEpPIJz15DOd1kjJU3ORvxjHNhjIxaMbnX1RzYqGy4fcv1/z75aUX0JbwqxwdJ55
-2s7QM9lJAgMBAAECggEAKengl9OzBYEDvRgBFz2fuZ6YSACWPXeOr7F+l3Hfbuog
-a90UWtGcJaF30n5NA1Q6+VcRKr72PJuAtzHK4sE8VhdQk4zjcKTspuprhH3i3FM6
-/YAANhtx5aFAkqDW831tvfwZdtpc0BzKT/7B7FcPoMOnAaHHaHvpFVvOrBV8z8Bx
-Y06fs7QJH+R9DRLK48hNQ7ezuzSdOyJM96UGdAH7qt73tFBUkRA4WHjMBuJAnVSd
-hc+Qh2a39i6SC1hAA6AtNwi8QD6C7NIvdZwisVv8cuSFyJc53kM4xbGzm1FopZ3m
-QmDyJFhSatCH8IVdbwErDixziQkVc2I5DGCXShf6mQKBgQDZ9NKGceu/pxcC7Qqy
-GFHWv22MVRiIaA0DEM4lFsbhpEtNA4cKVLpFjY/jzZvdIePPPj2OvHVxssrGJJhL
-eK/WP2yUaWQ44wIqNnsns76UnCTreBLNqItHNlggV5tIYFIn1uHUeCg7HhRsCwRq
-Fh3WMQ+z76C3GF5cu9VDwwd4mwKBgQDQO30xGHZYqrqnrWyw5NxjdiDmWzCMIhO+
-+DsOpDaPuleeT9/bEyLHOZvVcUub5i6Y1itZtAOgFnBbuxPozakPa9/fbN9TdQ+a
-jj8hyggFZSG2PDexXQnAJwSbXT96aGi1/CVtnLP4N6kmtO0wjGLq3JggyZLl+G1K
-UvYux0MZ6wKBgQCqxykaDTNSlULzmPaTe3jMkmvs1FSHtTfU2hscdo9ZCBm0e3oZ
-PzBBBV3kehuRlldg8HzdVY+UiipWPSBTqnnknwed9kKATGdK2it/fVhsKzjGg+v/
-1vtizhMZLGvQtyBIdRe4GnozcCtCSROpaDDyvrh2HeHI9UAi206MbMly1wKBgDqK
-zwqTfwo0jZ+AkVM2NIO9/UfmEUMEfZqt/SSDpFdKI0H94Midm6R5HMeoP4KH90e+
-xpdEldRXGqWfddx4nXQZdupAmJTFD2r7XOJqA1FI+m5ahanWp1wfXBs13xfR7MZl
-Kjyj1rENLQAV061XeqPe+uIU6bi/3DIOGupR2RqbAoGBANc3mLU41tA3mFbWhifP
-30XdZTMffoc9iaQ8GdgceYwZc9WvOF1URFweNdUuRRRup/X3yPanYxjBs7hhpY13
-+rIOClCvNJ53eZZwL/mXTMCtIpJ5B2p7dIHY/N1UOLiOMNC8wJMloqPL8OTYZUmv
-Rqmx8CbeoJMcMc21Wbls5RX3
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IRlBi5LFvAiD
+DBBoOTQSY0JOwYUwQ+DBucBTI5O6vnktBeiAkp9fAaKrksw6x0N07PmNIUNQdpXs
+SiPU5KTGYIuisXJ9AZUhvs+GyPQDdOn8pGJpD8Gc/OLV+pZwflA5EV2GQG3QwTt7
+Oqo4cOtQM/pegm3tMvNAyK11hPVw6sbDfzNNrLcK4BAq+qCWhA/kd76Q5QzwPd0a
+b8l980i+p/0h8EVZ5ano0u1vOY3xen1+4jPasNvaEtnM6DtoB4YeFD5EDTamIYKC
+rmCAMqyKsOAESH0BkihGDUl8wRMaNtQDJMGjICV34wIvBKyFulac/9GG/rPcHTC1
+Op2Rh0NtAgMBAAECggEBAJujh0gJ2GKXc29f3drCJttxz2G/GWAQ86YQcYa0xjFY
+0IFhN1mOntDlIbglOF2WJ8Xr7wyLVMHf4GJ6jsc3M/QLEwtV6s5mc7jbCyksW4Lf
+jvWd1uy3qWZjaZV2vW6KjJ9/OP7A4yESU9EUFzmX+sIwGFe30GP8lYCJX+RbJelQ
+3m8QwXVnsBuc1bBA5LPnmncNE4jl1O2sn+CZXLVoAxAsfr9D3STsUYREIBjxXMLa
+ekqG7JdYvh4pBoTBmeeibsCM+icGOJsTbsXBr39aKXXMoxnXRrdPr+VwYL1nm06v
+WCCULs/CQOt8e3QpwARqAuVU8KrP1CkHMiVKTTNcXgECgYEA/1gmdDegIU9Qodz0
+W2FJYMmjWhK0EIT/cv+G+4L+swnjqXjoVeSsao/NnVoxZCOJC9iQ5jyXyPyJOrB3
+6wvHDhiPA4BSgOz7Pk9ORcRSam4iAF5ayQqyBBecqNjVTG/mpK0k8r8VCxhTcVkw
+eyWHvils9vHyLZmrQ6a86lnI080CgYEA+MQ0pgnC6qe53pE9kIfg8YXyC1PpG/uj
+Km10EXgZRjeY0NWg57S8KMAwDlzpvNJoIgf1CsUZQ4vc8PRId1yZNVGlpUE5OuJM
+8KuJ0iPD59aF5oIEt2LFSxV0GfDdpctzsGUkgR9Fz8gsjh22LNqEmn+IDiwf/E17
+8ek5DroJziECgYEA0fcoV7RN5lYUFaVdAa8z6XSk8DIlAth6PZPE6RhEW/Cr1fVo
+BwkshcuN5e/+YTufO7BvJgN5RHn0VeA6G3rpybuEDfr83KsBxbDsyg2vP3kkWG09
+9cbnrtnDpkv2yQ3S+GKv2TQrF55/LtuKNBkeT848natB2yFbZRu8iHAf5ZkCgYAv
+ywpOaAuxp323O/I3BVP7Mv+4m2tqu+KyMQsenBNHAUFzxcc/HOMVdS8GY2VoqIfn
+Gd5DLnDXIFTXWidd/0VUEBah9DD5liawClTVMZfev8FD8vDTBr/e2AVRQoxi4P6e
+AiCMSiTQcaXVu/GXFnhpcl0CVAnlIEHSzTrwEF/34QKBgCgXp2WGZ9Fa6akH3pwJ
+VgmBGbiUH/LMHj7fmKreprL3p9T+7tkfj28+NBuRdDh544A+SYmwuF6gd4yu6WIw
+EW7VuqiHeP785pxSHrUvwav5ja+ii5uKl2qgCtkNRSQUFckZr+1CHfdGbeU4CybU
+rV8BllDApj8GdVXJNUspBPCf
-----END PRIVATE KEY-----
diff --git a/jstests/libs/rs2.pem.digest.sha1 b/jstests/libs/rs2.pem.digest.sha1
index 04f9431cec5a0..d417978e2cec3 100644
--- a/jstests/libs/rs2.pem.digest.sha1
+++ b/jstests/libs/rs2.pem.digest.sha1
@@ -1 +1 @@
-A78F67FACD85FC28B02AF28515C1A115058C2DC3
\ No newline at end of file
+D5001B67C0300AE043ABAC72FF5B20562F1845A2
\ No newline at end of file
diff --git a/jstests/libs/rs2.pem.digest.sha256 b/jstests/libs/rs2.pem.digest.sha256
index 6017cff83691a..45d2cd2ce7295 100644
--- a/jstests/libs/rs2.pem.digest.sha256
+++ b/jstests/libs/rs2.pem.digest.sha256
@@ -1 +1 @@
-F993BFD896A91AF8FFB082B7F13C20E1F7C693B436A9D257B9C236FC508734C6
\ No newline at end of file
+2BCE399B3F536417B055D303378D8614EAAC1F2B285A3EAAE3DE3C9D4479A815
\ No newline at end of file
diff --git a/jstests/libs/sbe_assert_error_override.js b/jstests/libs/sbe_assert_error_override.js
index fb70a8e2337b3..8f752e3088add 100644
--- a/jstests/libs/sbe_assert_error_override.js
+++ b/jstests/libs/sbe_assert_error_override.js
@@ -158,7 +158,17 @@ const equivalentErrorCodesList = [
[40393, 5153212],
[40394, 5153213],
[4940401, 5153214],
- [40390, 5153215]
+ [40390, 5153215],
+ [5787902, 7548606],
+ [5787903, 7548606],
+ [5787908, 7548606],
+ [ErrorCodes.BadValue, 4938500],
+ [50700, 5156303],
+ [50699, 5156302],
+ [50697, 5156304],
+ [50698, 5156305],
+ [5155800, 34473],
+ [5155801, 34470],
];
// This map is generated based on the contents of 'equivalentErrorCodesList'. This map should _not_
diff --git a/jstests/libs/sbe_explain_helpers.js b/jstests/libs/sbe_explain_helpers.js
index a405efc1ae54d..92d27adf5b947 100644
--- a/jstests/libs/sbe_explain_helpers.js
+++ b/jstests/libs/sbe_explain_helpers.js
@@ -3,10 +3,9 @@
*/
// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js");
+import {getPlanStage, getPlanStages} from "jstests/libs/analyze_plan.js";
-function isIdIndexScan(db, root, expectedParentStageForIxScan) {
+export function isIdIndexScan(db, root, expectedParentStageForIxScan) {
const parentStage = getPlanStage(root, expectedParentStageForIxScan);
if (!parentStage)
return false;
@@ -27,7 +26,7 @@ function isIdIndexScan(db, root, expectedParentStageForIxScan) {
* Returns an empty array if the plan does not have the requested stage. Asserts that agg explain
* structure matches expected format.
*/
-function getSbePlanStages(queryLayerOutput, stage) {
+export function getSbePlanStages(queryLayerOutput, stage) {
assert(queryLayerOutput);
const queryInfo = getQueryInfoAtTopLevelOrFirstStage(queryLayerOutput);
// If execution stats are available, then use the execution stats tree.
@@ -46,7 +45,7 @@ function getSbePlanStages(queryLayerOutput, stage) {
* SBE, then plan information will be in the 'queryPlanner' object. Currently, this supports find
* query or pushed-down prefix pipeline stages.
*/
-function getQueryInfoAtTopLevelOrFirstStage(explainOutputV2) {
+export function getQueryInfoAtTopLevelOrFirstStage(explainOutputV2) {
if (explainOutputV2.hasOwnProperty("queryPlanner")) {
return explainOutputV2;
}
diff --git a/jstests/libs/sbe_util.js b/jstests/libs/sbe_util.js
index 90451a646f764..02caf007d628e 100644
--- a/jstests/libs/sbe_util.js
+++ b/jstests/libs/sbe_util.js
@@ -12,7 +12,7 @@ load("jstests/libs/fixture_helpers.js"); // For 'isMongos'
* If 'checkAllNodes` is true, explicitly checks if feature flags are enabled for all
* nodes.
*/
-function checkSBEEnabled(theDB, featureFlags = [], checkAllNodes = false) {
+export function checkSBEEnabled(theDB, featureFlags = [], checkAllNodes = false) {
// By default, we find that SBE is enabled. If, for any node, we find that the classic engine is
// on, `checkResult` will be set to false. This is done intentionally so that in the event that
// we check all nodes, the effects from previously visited nodes will carry over into the rest.
diff --git a/jstests/libs/server-intermediate-ca.pem b/jstests/libs/server-intermediate-ca.pem
index b9665630db88f..b591a9a91bce1 100644
--- a/jstests/libs/server-intermediate-ca.pem
+++ b/jstests/libs/server-intermediate-ca.pem
@@ -3,74 +3,74 @@
#
# Server certificate signed by intermediate CA, including intermediate CA in bundle.
-----BEGIN CERTIFICATE-----
-MIIDrTCCApWgAwIBAgIEUjJhqTANBgkqhkiG9w0BAQsFADB1MQswCQYDVQQGEwJV
+MIIDrTCCApWgAwIBAgIEH05N8TANBgkqhkiG9w0BAQsFADB1MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEYMBYGA1UEAwwPSW50ZXJt
-ZWRpYXRlIENBMB4XDTIyMDEyNzIxNTk0OFoXDTI0MDQzMDIxNTk0OFowfTELMAkG
+ZWRpYXRlIENBMB4XDTIzMDYwOTE0Mjg0N1oXDTI1MDkxMDE0Mjg0N1owfTELMAkG
A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD
aXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxIDAeBgNVBAMM
F1NlcnZlciBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEAszRjtWB25WU9jQus2tifRDECbm0S2dr/+JNDN+af1gyTlun3gHVw
-tWTwtln1CnKoifoiAnN69RW43Z7Y3/s6OC8bH71QWUpwPAEoIkQqj4eTBTCu0vbR
-lHD5asmr3zrtcyS4Lp26CD1zFl+oOOW3rRZpPh6qy4/OsYUUiNEnSm1rIRRLiMMz
-LJLRDBQTU+TJW2HSM4UzwlqGIblWGmHwtu7l0bv+n3cyKr+xbm5z9jTsi7Siox3P
-mwanUR+f9EjWcmRG7wCRqeZ9q5EX4nLr6AGo/1WWrn7kH9BXcY4GFjISbZsYJvGQ
-4w37AHboxmIeQlkGsr+GKYAi3zhuNFB/7wIDAQABoz0wOzAdBgNVHSUEFjAUBggr
+MIIBCgKCAQEAykbosPvg/KsB67glkMfKYwnpaE8wH66C9HHe98wA4hhYT08w57Dy
+vSu8R+to2YwkFPXQPsBrDjOeQ6DrOUumelvy7THcNBNlB1Zzdix2XS+on3dgFUXw
+lMqMHECwgOsMmFRq6fhkS5jINGW2mKBNqmfGmrqFd7YyN+dOzdDdJZufCTNvxQsv
+GJbPbXkFIcq2oEgZgHpAAYQJSV8cKeCVMGvgexPKPEBasmHww0ouVEBFPKckE85W
+i+fKy81KhYtcFJluSZbfcaaOs65Ka5Efbyh/BHMgBPuMdYBZKRvFCYJ9xlXA0tEo
+u2XAXDBqKpvqnNkoI2uIbeuWUIf1r/UuQwIDAQABoz0wOzAdBgNVHSUEFjAUBggr
BgEFBQcDAQYIKwYBBQUHAwIwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0G
-CSqGSIb3DQEBCwUAA4IBAQA0u5Ryv/jMBbnsab4pCwVkY/r+NhY9Cev+IQbOP/op
-ig72quZP16DLP0NwbSo/nUPlF9UFO1q1qVTLukOqANmbmYhfkbJJyQl8MBiaV3vg
-kz89dwBYTgiQuiKm2oU9vGDV6uEOCSeG6e+vs/pdcqaKXif2Zt4Vxv2vbxKWOQRI
-hMBvgCJz5I8JAcH4fTHdrj0cwvNPphnZ0gzDIBL5E2wiluna/fh4M6lnLiztDraf
-s0ddchZ2lRHsoz1JbvCsAg9xUC0+RqQA+yMhfNqf8CxiMtBAIaEc8hNOvuSCbER4
-jZ0vAcXWXtANizGV5XKczpPZCQvNetnDGcC3+6dPauAD
+CSqGSIb3DQEBCwUAA4IBAQBmbBYSYbX313xvzXXwZKW2ee+5/fs8LKNSkuxqoDv0
+gEoOPnx7UkC0skVAsYI+EvRuJ/yMxg1+kRfNvo3lLffXaIRXnLFSQ00EMBE2kxm0
+p2mRv+OZd7eaPuXjNaO9kSfWR1PdgZdOjYnGqhekZMSQbsC1KTlVOk3UozUvGV5H
+OTJPhkdRAYNC7rkdu9xjdHD+kwsgdpwz8QVbTsPaUZcopNWmiJJC01XUsOmPDWdJ
+kwlorBzehDSOqLKfqZYpjuyWeaPMyBYs+0pxNoNs+M4476zqGY4ny/nCmDmj9VE+
+AhHjVyna2Ur8ifUn0OWcWe/bvgGxLFTSbntQMkIhikwT
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCzNGO1YHblZT2N
-C6za2J9EMQJubRLZ2v/4k0M35p/WDJOW6feAdXC1ZPC2WfUKcqiJ+iICc3r1Fbjd
-ntjf+zo4LxsfvVBZSnA8ASgiRCqPh5MFMK7S9tGUcPlqyavfOu1zJLgunboIPXMW
-X6g45betFmk+HqrLj86xhRSI0SdKbWshFEuIwzMsktEMFBNT5MlbYdIzhTPCWoYh
-uVYaYfC27uXRu/6fdzIqv7FubnP2NOyLtKKjHc+bBqdRH5/0SNZyZEbvAJGp5n2r
-kRficuvoAaj/VZaufuQf0FdxjgYWMhJtmxgm8ZDjDfsAdujGYh5CWQayv4YpgCLf
-OG40UH/vAgMBAAECggEAYdyROsJkC1+fHkAq9BPp3aZZ6o28BqZWtJtO0N8rmAuk
-KXy5QThhBV4LKbm0XDFDIp4rJmWD1wU2wCf3zVD3eSkYSdvrXeeNpOcI0LWi/GCQ
-A/yS6/gHeWW8fvmE11Hpy8BYqHRP3CIakDRKvE+OX4JmlJrQsHtm54CCNzjomPZ+
-ijiL3RTXCyc4Jitt465MqI3AfKxPOIxIkblHuVU7C64qZVPHJE6qqeFuji7psiBt
-7N1cbcI0nj4ntCXhMOaufSPq4BXeRINEX1lrwyVXJZJhz3MEXF5+c2pt4Y42SJFM
-YEiBNMXzTuAfanTRdj3y1/Sut3YWRYLOA0l6qEZRoQKBgQDueaif5QiS8yWLWPiZ
-Nla8xL62bzxpxc+74U/OJ/RVKzei81+DKWhHo7uGsiJugTkaoBWogknaFo9rl4va
-KaOIltRsnNUrvjZi1CxhYovJEhkgVPBL97YO3p0XMl7m72qsDTfL8OJHZgF2sQpd
-0/wnd9vgjdFQjaHr0fnjgx6K3wKBgQDAX7JMhz9rzL3ig0rUZni5Ol4AsIAgKtNp
-+pXa7p+BJQEAVa4H2mquwLKa2t9yPXQpjNWGJebu8Mr+6FqaZyBejAY/793/7+Ww
-ZlkqxnssV7QbRK5EuzaY/4du09Z5quYCFjgKL8kO14vIFMR2GGgWfnDw7LWEMBXl
-zpDdz+y88QKBgQDIoq6OKAG4sLzrqiUtshvzoYvarWekjfqiVYPxLIhSh5O9kwjO
-ry2+6DBDuOdjFXFXx0uxhDxiMgzkNpJBMDsYFd4OqzxmGxhiuaPFI0X8Gy/slcm+
-AGC5ze1YsNZLcS7MJirFeJrH+zhMBdN08X76Lctd97MdFKwgXX7iPVSa/QKBgBlC
-98jw4bUoN68tCoCFzEiWj42Ln+eZeBWsoE0VlMCCuXTvy2Cgn+2+xDVtlHYN99kU
-810gUQkDecSrgmpQLIH95Tw0JXTcbc8SQZdKZRZXlgEWrcO8ydihhYlaLAniFT4k
-6Mr9p2tZhrOJdpsDbXe2tuIMU3G+VsHMtF1MVowhAoGBAN2iciuWzfnXI92yGfxW
-9MV0Eh8A3OeFWPel2qWKaQ75ehBO8EbBGQM8iLY7IXHVQkv9Iav9e5JpHhbFf6eP
-i9uPz5v91idOfmubGhVmlQPG+iA3yRGnys1opU+daEVyMaUTWK2z+jC6gPlMK/UP
-HQYfaJnYlK3rMz5btAv8FXNz
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDKRuiw++D8qwHr
+uCWQx8pjCeloTzAfroL0cd73zADiGFhPTzDnsPK9K7xH62jZjCQU9dA+wGsOM55D
+oOs5S6Z6W/LtMdw0E2UHVnN2LHZdL6ifd2AVRfCUyowcQLCA6wyYVGrp+GRLmMg0
+ZbaYoE2qZ8aauoV3tjI3507N0N0lm58JM2/FCy8Yls9teQUhyragSBmAekABhAlJ
+Xxwp4JUwa+B7E8o8QFqyYfDDSi5UQEU8pyQTzlaL58rLzUqFi1wUmW5Jlt9xpo6z
+rkprkR9vKH8EcyAE+4x1gFkpG8UJgn3GVcDS0Si7ZcBcMGoqm+qc2Sgja4ht65ZQ
+h/Wv9S5DAgMBAAECggEABXwQuSPLQO6eGbcfhaJ1MWRGaOakxmcj897Wjd+BMqA4
+XMYn7FBW9Jwn+cc/S47KN6tGnzz2aicqJmlJl38en3i4yIeukdboyV8luFGSUAqH
+xvMkrs77q/0l+WojnwtAhyzazUdH6OjWnN0ZK2BFWtZ/gxiFn+5XxD5IW7pLFMbs
+B9a35zozJlmflj/skcyOIAn0ghvJuabwI768snnFW/HCiN9AhmkProMdN2bVTXmQ
+oDAh1ZTisWk4s5v9K9g67eLTJDrioS88c7X3Io6TWzVlqE9sczUQ4PYt+pMH6Asx
+yY3712U6G4P/jYQGJufcNv1ys10L5Kqz4ry4y1ohaQKBgQD/Xcj/9whM3GPkouTC
+Fn/mBVh0BjDyvIS0BokoMviGOtNutXD/3nDgzJypJMad2h0u7yYzI+K2WEhXZKDj
+tm7dCEorgN6FcHvP/R8DaGhcNcCkGScR0TAoAKf91791eIodRLHv14sr+E52PRY8
+2PcoG9f6mNow1y0nmRbqkDSKLwKBgQDKx2ZwrUtLCKgxi2fqoRmOZQuEC79QOz/n
+Phl0cCPeTpv6G9D6PnbdN+p8tEKicMrDEeV+csUfFBXauHg12gCUpAsddr6RBs4F
+T8hZO6xy3j/TLpAWd5LoRSbhMBSkaJ43xdCQGTqIrzHubGGJPcLaAf1VL45shPAy
+3faDZ/BcLQKBgQCKcoNF2t2CJj52N6ZEX8Rppd5F0RF6mKBtpdl4lOHOjFgS/oXM
+AwI5rlUFEu4nqJgH2RphwYPpjkVyNBlSO5cxeIwVt+FsgQZvRfEpfNKPo4jUrtpS
+u5IbLffmFLE1c+uVYKgDu101soJ/cjD7PjoJYccfkv1AW3icrlARTateewKBgE2y
+P28XB011wLRTHPsKxTcJQnNS6Pf968tXwPsbaLLqXdtkQxtPSc4TebZUf1+sZhR+
+S/e6VAtHb+RBYFYJ9MT/Yf7lG7mH6PKiEjsWoUnWHJB3O+BP6qsyq+YGvTINn+nw
+0qwT7pimwDQBtRGX54wOzRmRwjmUalCjOaw57B/1AoGBAJZf1KOwvz3JZAC6GU78
+9k+vXNaGBRF+mSFFZ6k/p50uLFgI6H7Q0ueFIezbatN+DOZCXpomCv3hadp6iAE5
+YUnfESTXw4mvB+aCQzX4gfkMw116+xpYirWJdZ59u2xNJPD4Q9yDOekHDqNl98M/
+tt3x6rtgvMgDCQ0lH9rQk9jO
-----END PRIVATE KEY-----
# Certificate from intermediate-ca.pem
-----BEGIN CERTIFICATE-----
-MIIDdzCCAl+gAwIBAgIERt21mjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDdzCCAl+gAwIBAgIEe6nR6DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB1MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ2WhcNMjUwOTEwMTQyODQ2WjB1MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEYMBYGA1UEAwwP
SW50ZXJtZWRpYXRlIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
-uLEuQhQ7IQImAIKUMANKaR+4/WLf0F03p5RDlv/TCNdEXTLws03x7bppdv4Y8r6+
-oChy+8rtyA2ckblb0z0OeMlsJY5a04eUhrZYeG4OXn1QuvUqfXl++oBlHnWUD3xG
-3v9oPKMxGf9nr6JJXBCeG3owLR9Lbr3QS6Pvz9WwNZGpUVDm/QQcKvbGHmB9fE7/
-RM6IgxtagZlug5WUCTT08tsLfb89UQchCAjO9eZvDcENofXcnsJWImJdTYDlquMM
-DB54R9cqoLtDV9NiPVYsjCQ1BgXYMxeG0K/T1rWQY4uB132Y3oFy3RxaDT1BqdAO
-O6BOo1AZNyIYIPbt0+Rp3QIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3
-DQEBCwUAA4IBAQCSOAjf6Qh0JnAOK4TqPYt2VG8HEmumxKk/HvIKrk9gbBc6RoVi
-Z1M/gxRz82lnmJvIPbsJk4AmTsw8Pnech8Ujeahw0ybRJbLs7FA0YrHg1B7517Tr
-eZl6TZn7o5+3HKKcCPpoh+P1XrNWP21NsY75T8sTN0BQ3im3aZDApgO4v0YFqWl0
-20YOdrLk81cJ8Znjdh+/ieR4uPH06CbXjAGPAbB+mnEWMNLlV2WGsJtDCHYM+wU0
-zd0wy2KvqMBbr014v/c4jmyeCBcmgxQ9Q8ATWbys7S5p0CFB6+zeV/2Mb32lwSCM
-+Xeg/ms5ZGQJY5fIznwIg+Osg1zGvMF2Rsq9
+12Tv7dcfDmz2/A2bquC4GIPqMHHf1l1cRK8mOydwJRFmzbc4MEFgCmvhURLAE6ie
+B4ghfCKpZqD2kO/GtDBK7isMxur14NbKKKFXnwPreSBknSTccJ+8iIvxK+wni+w0
+Ox/Avr4byocV0O6WJ6JEvvcyNbBk+IWsTfNbLZ32/A6WtraE5q2vIZpN2bNEtJe9
+JVu56wI95zcAZmnz3S1RtLVvT8XqmHnCUTpN+5oJWRBTr9pScQNjicpKo+GST03Y
+j5KaI8B4cdNecldgqbmebcL0m3RGfBKgv8AEqsjdqg3hvD0rXUpeiGKoXMjyzmlv
+OCsQtgP6azneIZRt6MQMsQIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3
+DQEBCwUAA4IBAQBWWBPrsmerwScxU3y1IVGIOoI3hEBCS9t+BzYHpxuvaSHjyYiZ
+e1MLgxt4FAbHu6WMB5T1rkJgrGUluCdctxXMg7Ak6d+hVbuBAzAV44rEw/yVGLGV
+7FvMOxYh9e+HFTq1iI8kSmgDCKsTww6kfE4fs+FI3fCXwhfy3zLlAlBYoqV67bVF
++Yd1E75kBNcAuyY6Zic1N1BI6f23npvY3plQp2qWjhdGEUb76CZSXrEZ3P9q817O
+D27YiPP6uhy5ypVnna2jmTnJ5M2EZ01Sv0w94pz5jUXSi49FRATMc73wYl8bSvw+
+swyDhMJMHUeTPr1deiB8SVdzVsOZCd5LQeuz
-----END CERTIFICATE-----
diff --git a/jstests/libs/server-intermediate-ca.pem.digest.sha1 b/jstests/libs/server-intermediate-ca.pem.digest.sha1
index 2bdaf850a79e5..21672d91b5fa5 100644
--- a/jstests/libs/server-intermediate-ca.pem.digest.sha1
+++ b/jstests/libs/server-intermediate-ca.pem.digest.sha1
@@ -1 +1 @@
-263036BD7986055530468D3980687114823C2687
\ No newline at end of file
+6FE8D7E03541D54ADB167B0F3F7C0C4062BBBBAA
\ No newline at end of file
diff --git a/jstests/libs/server-intermediate-ca.pem.digest.sha256 b/jstests/libs/server-intermediate-ca.pem.digest.sha256
index 077a772ea559d..61024d569dd3d 100644
--- a/jstests/libs/server-intermediate-ca.pem.digest.sha256
+++ b/jstests/libs/server-intermediate-ca.pem.digest.sha256
@@ -1 +1 @@
-57E7FB1DC241D6A3D88F71929D13EB828ED62F05307FC34A88CA712CD54D26EB
\ No newline at end of file
+EDBCB6ACD21542C3E8E1AD2A5B9F68049D8C6804038A8E4313587B56E4255973
\ No newline at end of file
diff --git a/jstests/libs/server-intermediate-leaf.pem b/jstests/libs/server-intermediate-leaf.pem
index 37913546525e1..58e088f7f3412 100644
--- a/jstests/libs/server-intermediate-leaf.pem
+++ b/jstests/libs/server-intermediate-leaf.pem
@@ -3,52 +3,52 @@
#
# Server certificate signed by intermediate CA.
-----BEGIN CERTIFICATE-----
-MIIDqTCCApGgAwIBAgIEJCpSyjANBgkqhkiG9w0BAQsFADB1MQswCQYDVQQGEwJV
+MIIDqTCCApGgAwIBAgIELD7RAjANBgkqhkiG9w0BAQsFADB1MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEYMBYGA1UEAwwPSW50ZXJt
-ZWRpYXRlIENBMB4XDTIyMDEyNzIxNTk0OFoXDTI0MDQzMDIxNTk0OFowgYIxCzAJ
+ZWRpYXRlIENBMB4XDTIzMDYwOTE0Mjg0N1oXDTI1MDkxMDE0Mjg0N1owgYIxCzAJ
BgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsg
Q2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVsMSUwIwYDVQQD
DBxTZXJ2ZXIgTGVhZiBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkqhkiG9w0BAQEF
-AAOCAQ8AMIIBCgKCAQEAq6glhf+rRBoVKBl15cZ2HeE6K5tY0R8Ore5T/rat1IYY
-Lte50nw3LassVz/OfDWOd8AMEnReaz0iVy/YWWQQxZqgAnRYThfjEpN9IfyplzKf
-Xajg887hr28Sh57JLGbpBpfC3sODvPbAk12zdJlbtHNp94hYdatVPQiPaL94F9a5
-auQaKJQS/6W9GtfnQq20XMPFj5DEoybe4aqDkEXZBfa/Sf07qw0WcVvf6tzDYcH7
-6DFnUaZG4+NNkEy70Ckpmk4XyX+2DSdHd974AZfnb7aDXx9IkXExTPwTm4/6LvnF
-LOMTiCUJaU9kpibdLfmxdQqODt8cq7hCYhljd3Fc7wIDAQABozMwMTATBgNVHSUE
+AAOCAQ8AMIIBCgKCAQEAvuD+b/PUsswTUiFXvf+FQfZR/hPu0ix8QFJZ49hkvXoe
+JmjjO5/7XgP/tQXletqobTb6Q38PlIxvIpwnqGL1DSqEGQ/iwoSsroQbF/dtqKRj
+hKlNuOjH2sM/l9/aWl0oJPKFVBUfRlFIgwihwzyPNFbcqW3hlUQYG0VgG1Gr4QWy
+IQvI1BigbOTw64jgqjOotBxIkVb8aavfO+PpghiCwlihfuq5hOGTQcKvZIwS9JY7
+FKqK4j3rQ2rGNp1FmpJ9zjSb/GLxq2xDsGVpa21eIj3hHzbk4/TLTNdABXZI3Prx
+VpHQWUB71QCJiu78Ohha52K1yzX+7wmWFGlFmZtOjwIDAQABozMwMTATBgNVHSUE
DDAKBggrBgEFBQcDATAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZI
-hvcNAQELBQADggEBAJQpLZw+YSScjyL9tNlaoRMhevxoUvTJUiC7UWoLyNdIJxsu
-MgNYA+sHUgtCWu1m1cntLNkW03eZ5/HaiR9S9USyKmLkajG92cWYopvS924MsEq2
-D+bWlQckt2AyMaoCD4uT8Lo8RLlb95RmKeVsi/cbEFJMk7U2BxO0PaFOeEeNtjeY
-LG6LU90paMrWqw8n2zgiWaHFH7cG6fFKHadygdvzxvMFJu8C7djyYP/MjbjaodOS
-ayMZjZ7h1pnCHWZtO0Tb1onjbpykE6R6VFzCMNPWztRPntiRhhVIelWQfCj9PBTx
-ohpjzEZ9UGtqpEz8hJbDU4RF6aD0XQ9zSB/EhuM=
+hvcNAQELBQADggEBAEVPHstyEbJ306Rfe4EyxtWpM1GnhAaAtaCx19m6GwxGjxQh
+808Kuj8DtAzUVkD3pdVFyjDoB24Xz6nb6VlfUSOlNiqRlU+zDtsOvcpE/ipPbucS
+aMCpC2ObZ5iZvTHLSV1Qair5dURCJRxfpS8T1lvoRsIFs/oJ/uqUC5Ifk3LGy1GQ
+gd9o8DgWx2jQPOxjL6BZZFdcIkZRpFQlCspbrYt1kPauT6Eh9szl7O0TdiHPhBDo
++N+21WlnkbANi7yOvJUyeNkwmcrRSJw+rA7wSak7G5G+z2Yw1NbPPGX+0nigAG56
+dqe7mQvKTkfin+K9A1VZPshG1Ecf+O9nRrFHbZ4=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCrqCWF/6tEGhUo
-GXXlxnYd4Torm1jRHw6t7lP+tq3Uhhgu17nSfDctqyxXP858NY53wAwSdF5rPSJX
-L9hZZBDFmqACdFhOF+MSk30h/KmXMp9dqODzzuGvbxKHnsksZukGl8Lew4O89sCT
-XbN0mVu0c2n3iFh1q1U9CI9ov3gX1rlq5BoolBL/pb0a1+dCrbRcw8WPkMSjJt7h
-qoOQRdkF9r9J/TurDRZxW9/q3MNhwfvoMWdRpkbj402QTLvQKSmaThfJf7YNJ0d3
-3vgBl+dvtoNfH0iRcTFM/BObj/ou+cUs4xOIJQlpT2SmJt0t+bF1Co4O3xyruEJi
-GWN3cVzvAgMBAAECggEAZ3RduRbP16mImrRNlAA6a+O0NVfY/aAkLrt2sArVVULE
-DGdDvRHUB5gkjykLf1yf0phSLkUoKqWbrsRNNgLTAOUiDpikJ9zJpAZz4inu5AtE
-dSQ0/3vuFNdyaX5PbI1RYAHTFoLrQNXOVoKgh2NuG6F7eg4YkkKCitg/5jePX32z
-SWqIJEixlX4+Gr1t84wA5b6uoz6a3MT71QQc9pJt4l++5rCCsrT31tK9lxqq0wAh
-Ai38ZLEJlopz/4zexhv9JAI5YpQmCpLuRnXHny++AzaZP2IeUw7UeDkDSgaVCDbZ
-gEyD9FDAmOhENZxDVYXyQduNEPgKUkMFoCVpkXO/UQKBgQDY78jBAiqCIXtWITzX
-ctZW7ZUL8dyKoIEOEUwn6z3MEOgOM1thmbYFIwZER6LGVTeh8H7WUIC6HyRkevbu
-LHSXk1i0xRgZKBNZTcYNWsAnHA+w+XZsfqMkpguy0j4I2Kqcmko1cHjOWiDA0uMX
-oLQhCYTWpZNNQ/ZCofWGdoPehQKBgQDKkRRqg8bzLPQ1N8cavv11nIC2J8zk1ix7
-KBMO0sfbQMz9FfZEzrof99f4kW1ViM63cIDWfjbdv8fgjcqviKW1A3MkRcqhepvq
-k+X8VutD6u9lnUldkCIxeOYdYv5W66ctS+yrbc4imdahCgDJklhFTXM9F5J8wNdz
-92Sttczp4wKBgQCDwb7wWtxuhN9gBHfrcvfzfADWbGNCXxGC9caHSD0UQABw/4g0
-0yLWI9uehNO8Ge3cETE4AVTtthRoYravGZkGppttz3c/cDOwPahYXBHwrv5owlrG
-snNrwt3AEilTPiUBUERaCKGW0u6prmv24cgFeL6Rj6bTer+K4Ms6i6iz8QKBgBn7
-XdkDqgMK4w/oZZtin/Pz7PZwSHGZnv709uzR0FypBSH5LStKMMlk6sixLoroD0us
-HHpmmfb7YFHHHhZBuq9rD9u5L/JiXZlK+xbQt0Bw49/uurhLgndCAJIIXoMbmsfO
-kz6xyNzbSpJBTDSOls/czUkqutlRitEPDFTTmsLtAoGAOAyfMbNEvQk0y5sVRMqL
-d0YqYZN0kEHO20roTFDrMrpXsh3KfbLZP4izBz1B3NOEzdNuCCMVVfceTNmfms8A
-2/dh7FvDF6HJOMFQl2uvSHlouR9eEgm6zSRBORWkzG0f45qpvJMZBBpJzmB2wgVx
-rXc+yJjRTig/EqqVtQ2zBIQ=
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC+4P5v89SyzBNS
+IVe9/4VB9lH+E+7SLHxAUlnj2GS9eh4maOM7n/teA/+1BeV62qhtNvpDfw+UjG8i
+nCeoYvUNKoQZD+LChKyuhBsX922opGOEqU246Mfawz+X39paXSgk8oVUFR9GUUiD
+CKHDPI80VtypbeGVRBgbRWAbUavhBbIhC8jUGKBs5PDriOCqM6i0HEiRVvxpq987
+4+mCGILCWKF+6rmE4ZNBwq9kjBL0ljsUqoriPetDasY2nUWakn3ONJv8YvGrbEOw
+ZWlrbV4iPeEfNuTj9MtM10AFdkjc+vFWkdBZQHvVAImK7vw6GFrnYrXLNf7vCZYU
+aUWZm06PAgMBAAECggEAJD2dsW3PFX9vr6lK+nT4PRTibmYkct0lXiUEkiD0x5DX
+Bp3lft2aITiLJTiQYGoBjnLgw03tjFu5gg257duUAULwP73nZN1B2ASXDE7bECje
+CEMI0bHIuD1X5qMG1x9WzuUI8XTtMjGendpWtDXcTqirTrPhH7EFDrB2VdmE070o
+J/ls6xYUkaYYNR0CthugxIEnVj/8n56ecGTOi0l+QCo3vpW64QmKzz1R2OtqoF0t
+wWpvJT8mhJhJDREZnSYGvEojz5GGevpvmoPZYXd2WxvE6rFhA3nwmeuZn27Nk9Ao
+zxUdAH+eTqBA3SMwzqX3tjcWO1OdVi0PAUzv2BnHSQKBgQDgyxib7h1SvFcYF8aX
+sDpIceS7ySbmb9f65dKbHNqrqJXwGGpDxD0Uz3ehqjfSo6gZzteRQsVfI3VizAlM
+LHmzdXXxmuuHIGFWAWAV6GyBD/Xea9pzL/LQ7n9ldq5HUusFMXx8Q6GuUeb08MJj
+lJhNEHfG99B/6iRfamyxOY3eXQKBgQDZYJ2htLpb6WgOS1RXmh0dg7R/e6WGFzkC
+04rRBEbCGg86vBRAsopzXJXL6EPXuS6GB/RqpDekTednypKFKY1idoSFmcpo8tbd
+XqEszk6Iityiocj03T0VwVHVw2vAuBdbfEGUvEFm0rrJbYHgOeIZ+WyUdQHIJIzi
+69L0rjMZ2wKBgQClnl0CPXxTQbo1YQcLKWa3i1pH1JsZelu5WexCJg8iG/JkU8iv
+Jv0NwRFWBdBdHAC1CwUd3AEI6FoLMWLEQZxk0OzV1hsjkoLEV/0QBw8yQ60Vc4ce
+CLywnJc2DSu4FupbCCu8biPICEXOPzgpIAjZ9oEZEeoG5F5qAkIUjN/ubQKBgBRG
+p81qjyt9lQAoVKCrNmYHX/G4NdNRHTc/RiaC8JqwVZVT78utG67xBuPzMUjQ9B76
+ZPkBglMoFRqgUZRsMMzrYycXFkM9y37wkbYdNEQWN8hPsO/uJwF9e8WlqcpbIYqe
+B3v5J1yQ8W1ScpfYrldf5ZnfZCAoEKAmARJjoU6bAoGACQujARGIN3bPWrCgj2V4
+h3GahvEmLL07kuvy2cSOYfkLpWp4qgR2NEwiRe5kUw0MtHuBe5qilTXn7OnJaYe0
+TkNfahlL73dCfDjNQ5CMXj64P/SP0YoH/snPx7mejJZO1U/4UKB9LbDpJCoMFdyy
+9YewNWEeUhMiQzhMt2PEeY0=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/server-intermediate-leaf.pem.digest.sha1 b/jstests/libs/server-intermediate-leaf.pem.digest.sha1
index eaa87a3e7ae40..bc662d3bb4f12 100644
--- a/jstests/libs/server-intermediate-leaf.pem.digest.sha1
+++ b/jstests/libs/server-intermediate-leaf.pem.digest.sha1
@@ -1 +1 @@
-AEC6D0D178627DADF56597264DA6C674498197EA
\ No newline at end of file
+DBCA9478EF39949988842F7C598633BC51780B0A
\ No newline at end of file
diff --git a/jstests/libs/server-intermediate-leaf.pem.digest.sha256 b/jstests/libs/server-intermediate-leaf.pem.digest.sha256
index 97242f0e438b3..38c57828077d9 100644
--- a/jstests/libs/server-intermediate-leaf.pem.digest.sha256
+++ b/jstests/libs/server-intermediate-leaf.pem.digest.sha256
@@ -1 +1 @@
-B87926861C8DD6C66214A91FF96A2403793604E78799FE55ADE7ECC90EAD3C33
\ No newline at end of file
+32C062F812222E0949A3B03FD347D30D479575FA7691AF6982AB520805EE99D3
\ No newline at end of file
diff --git a/jstests/libs/server.pem b/jstests/libs/server.pem
index 00def9a75d539..4cfd885c56692 100644
--- a/jstests/libs/server.pem
+++ b/jstests/libs/server.pem
@@ -3,56 +3,56 @@
#
# General purpose server certificate file.
-----BEGIN CERTIFICATE-----
-MIIEZDCCA0ygAwIBAgIEJXfWyjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIEZDCCA0ygAwIBAgIEVx6CDzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjBsMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQwWhcNMjUwOTEwMTQyODQwWjBsMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UEAwwG
-c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmimB7418/JLt
-T3GBjWsiyO5RRApaTYbUMDibwHCJu7uS8WXIsXmmM0kG/KeiDCGJsnqJLDmgmmDf
-W1wZf9cqnUb5kg10J7UznPIyqG+LxJJYQTQmUNmHNcQienS4iI/lOrJ+9oeTHE8l
-hwL1trua7XY0cTQHbED4HEMRJ2MRrRZkLyFF/oIOLrM04ya+y5d9XrLlBf+8O2It
-cA93L0ZMdC1dJdWQVibLBEKT6AmeYa3BHr4o2jQbm/N7iW9wcKEZaf4oe1XHy5a7
-YkjzMaGIbtQA7y7W7hENOgVHR1jpHgbkIkiZWJiCfEJ5Voz7Fc0wfDTyh/cHkWSf
-pd0vxKgJXwIDAQABo4IBBDCCAQAwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYD
-VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBT4ez2+qTaostXv
-MIza0HCaaXjqlzCBiwYDVR0jBIGDMIGAoXikdjB0MQswCQYDVQQGEwJVUzERMA8G
+c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvqWriorxaktd
+7eceGLGDiCJ9OM/5ogFclcMhx3+vj+Suafrxmg8viGHDh2Tmg4/2oOcjSO5xZck8
+vVEIlmThtNdULxpV+GVm4eUUn5OlqBVHsqIIwXkD/nQRikXIhrlmtn+F064DzT6J
+MsWkDTZFup2tj48nDamWdBKWInhApuMlGMg/FMRZnC9g+PvDZYHEUku3glFvXi7/
+sGUr1rD2lXe00B/VE648pEENaP+XwcvnyvWOenWEgwWQxSgAggBirnDBg/px+HPV
+isBILw+lDRGM6watcDLBYn2aokA1yC7saNVz1tQ+adUjFyF3kAYiVKjMUu8jNc3z
+uOekpDAEnQIDAQABo4IBBDCCAQAwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYD
+VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSNsQSAJ0uVz49t
+fctZ1tsVa5gLXTCBiwYDVR0jBIGDMIGAoXikdjB0MQswCQYDVQQGEwJVUzERMA8G
A1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoM
B01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3Qg
-Q0GCBHvUrJMwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB
-CwUAA4IBAQDT97jbSaovQZeFMBS6yvXEshb5WuE+417V7SB8YEkfSSmYzqhAIyD9
-28iQAhUqSiU8PGTTEU2g1BXAQ8E41IWhLWAhD/COhZ0p+c0pfN6tv0mIJ1lafx1i
-m5sVAanO+RKtXn8CsmI9OZA0Zc3d4+pMe7VJWNSeoKTFpsRuhKSHgT9UjsTdFtMV
-PoOcjm3cArmzNqJ7IRtSFm/MkYl0tm3NGqXJin279h/r3OalmKthWYXc6d5Z/Hka
-Nm3m/STSuaaL38Y9WudlpEXlvRGJ+VLLLJaDOQOZiOm+2uYbp9GDcbXxGHBhenqz
-kwdit0IT5BIkXpnF2sNGJbEPxiTx9JwO
+Q0GCBEreWhowGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB
+CwUAA4IBAQA0x3Fe/xsvpigB81Tfj/ysBpTagXKUrv4DV1WM4dGJ+ffRU0iwF/vK
+tSowhGIWnpiQL8B+rhc11Rk22Vm2J0CrYIfQxzy7lTt8HjwupKlRcBId9Qo/ULKv
+YS8I7kFu55xDzi44btW5c/LsNa6gP5lfwGh3gcufSkwvGCIa6uLk3wHMpFQ0AxPC
+i6Cv+XdV/ul8CjfjCzPDo8hyyCkARLT0/M0OPgOERNbwgXUsk24ld1lq8pynaYOl
+4h2lS67X9V2QhEfNhzN0LvYXUXSPcPv4vORR0QhUPMrB61WtzQWLrDIWSsNUMdhx
+GrRUxoL1e3OBeA9JLp1V0zVrr/D0wy0B
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCaKYHvjXz8ku1P
-cYGNayLI7lFEClpNhtQwOJvAcIm7u5LxZcixeaYzSQb8p6IMIYmyeoksOaCaYN9b
-XBl/1yqdRvmSDXQntTOc8jKob4vEklhBNCZQ2Yc1xCJ6dLiIj+U6sn72h5McTyWH
-AvW2u5rtdjRxNAdsQPgcQxEnYxGtFmQvIUX+gg4uszTjJr7Ll31esuUF/7w7Yi1w
-D3cvRkx0LV0l1ZBWJssEQpPoCZ5hrcEevijaNBub83uJb3BwoRlp/ih7VcfLlrti
-SPMxoYhu1ADvLtbuEQ06BUdHWOkeBuQiSJlYmIJ8QnlWjPsVzTB8NPKH9weRZJ+l
-3S/EqAlfAgMBAAECggEAMVPaubYCKicxO9xL4d5IERcAdc8COhBMUOmsHJVl3OYW
-DJvO7crI/Vv+mlVhKpSIpM1EmxwNYZhhn7AGkLtebVjKfotDgXkffLaasv6OMGXL
-S4HOSMxx0ShjB9/VNA0nAfMfYO7ciZBhp6Owz7mTxyuteBN31JGtUg27rWirvUAP
-yHE+Y7wTxs3GmdgHeLPcCegh09jZ/yboHAgEZqiuXrwfqPT8kmPVXZ7ZH/38Quar
-jeuaii7SpZUwpJMMd6C5pPs3lcu5E/czQv+XJrXuNbO9yZ3ePk7c+MLGuWffAVd4
-77Gd51epWhQc90RlppxScD7GlcWR8OZyVxgSAVRTkQKBgQDGyCjOCd7LDkMhI/ES
-5gmUK+kbBiDDac/hGDhHZWAMLoVwJ8gMDJu2oU3D8S5Cq2F2nPxCMR75GP/JmU1S
-WtLSa0nR3NMvm4oKMRG628YiSaqN/ZiEHuURw1OMTpgXLq4kB17CsWcKUUQsfB5G
-t9C8d55dqjMp1/sCqiumNNV2IwKBgQDGiWimL/AnVKsx9DVviCXEsxdixQVCBGsr
-LMs990y2NStTslTXVUhH6R5iJ/GMlO9C24UymKiSZ4KyIt+F2B3n/xvnRUdZ253B
-jGX/vF2rr5mrnuUddWGbtZevWPf+rKMFHUynfpITCm3eKNCmUNOLOv8L9cOTndFj
-yFFNF1ONlQKBgBDxapj1OmowUwR2Hcwwk0xv2bSV9yDw2ekjuVhMib1AEduXaHOu
-d28/nHNBEWJZXTtQ8idqLGuq99JlILQOTb3lqysaGV4Lcd6ghBRbOy4c/U6Q+Pj5
-8Shsb6ib4lbHgcxMXIVXvaKhfqAUDDiW3lHSGEt/gFDmudxmSMYn5rHlAoGBAKrN
-gGipCVTs9LNvQ6GGYefNo6rnkVRwdLqaOOhAn5CKQZIp8++lgR/MoMUVVCv73bOJ
-JBiZWT1LLBeRBPgjK+dxhFH9OhtGY3wmC4JuTa/szKnnc3laiPTqz2qdgg1p/H3j
-7RSUQZt793WPa6IKG1THFrD1aKLQvVm49qj6DnnZAoGBAIfF/1/x+Cy/i5mAj145
-+uNgGkY/aqwNglmJbQNN+0rB9Lmc79ZpMLBp081Jd4yhySlwKw44maZkUuyHQ6Jg
-P0v0i/eyowYQviRJHEu4DRgCmHYLMqa5k9ms5orNAKLp+KmmzUEdRUIExuJ5HaF/
-cXPuG2QD/nsf3ha+/PnjVWwo
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC+pauKivFqS13t
+5x4YsYOIIn04z/miAVyVwyHHf6+P5K5p+vGaDy+IYcOHZOaDj/ag5yNI7nFlyTy9
+UQiWZOG011QvGlX4ZWbh5RSfk6WoFUeyogjBeQP+dBGKRciGuWa2f4XTrgPNPoky
+xaQNNkW6na2PjycNqZZ0EpYieECm4yUYyD8UxFmcL2D4+8NlgcRSS7eCUW9eLv+w
+ZSvWsPaVd7TQH9UTrjykQQ1o/5fBy+fK9Y56dYSDBZDFKACCAGKucMGD+nH4c9WK
+wEgvD6UNEYzrBq1wMsFifZqiQDXILuxo1XPW1D5p1SMXIXeQBiJUqMxS7yM1zfO4
+56SkMASdAgMBAAECggEBALaMafpJ2qn+Kp2fPPlGGH0lESRyOEUA6sdYRiMBx3iP
+7tX5/JeYzNnZSuF9IyB8nBor2GSANMLS8z8PtZpUVK7VtH05yXIEnoPU12+JcAjG
+/5UzhBDFsKZYO2dcNIuUQZ6j8t/i8y73H2YnS1N/UTtEuikJWMrDRRY1qd6tqLJi
+i7qRNv2bjdqrEu7Rdhha75WxY7RQysLkGkexde7hFKeWOjFK1pjpn1nRHjTqylE1
+TX4cH37ihXzra0iLhiuLlhIF0dav8/bLNQgLdKlb1H3hEabk089p9+Dwm9YGejus
+GvtePgKHdr9I/6WhltDqxgBn4PD+p+dt4L8KWTRi0B0CgYEA6OweDotakEf1PwdR
+bs6K7tU78LZ7+FKGe25gFW97bQxvk84B954e6kJS74CVPDI8hQPZW1zk1PIN2e7h
+ntNby2I0AWB+zz4Bih8hO8L/F8oqzaoerh9Yd2ZZ8wddMa+lKGu137LRj2vZMEla
+4eeARmlDzpJMg/6u5gnYlzAAvTcCgYEA0YlHDDKK3GyXDoe5O69oiaHKEqSrM4PY
+NY6piHsD1a4ksLjtLUZ/fP2w8NVfV5M7skf8nSMhKMc7vBdu6qBN9eo4TfGNByZk
+nvJkrjDaXVYy+WT/5F+5K6iMcgS1Fo0Dpllv6AMCdDhRWk9fM0/irxwValQaJrs+
+mGcrcd0C1ssCgYBXFN/v5B3Jz31bQSsq2EGNJV+xkSgsIP5ya0O0/+cPUBTvF5gY
+sZ8xSfaj4FjrFoUV3eiHheVvz8dp8SudK7wn/+EdmqwOY0pED3tnUnH4vPbfyXiK
+9OEoUrXSbLBlARwoTozCw6IhktqyeNpnlxuYN4bIVl4RA0j0bD3z2FiOZQKBgFoI
+on6bOQ99mWqk5vK+bCy3WByqpOV9wWlB45bBROSL7zgAPek2YZFTcLQK+uymVwBD
+7keW0Ki08vXfG8m1F8qS2Z6kK/TmilXB0YEHfMyePUjsHQgEGYyo37AeVbFa5jaU
+N4F5yZQmns4vTLi/mqejaZBGkvYRftP9gK1sScwBAoGAbD0cKaMEp9rTZF2x7Olz
+2Thyx9K+8BDbHYfaqpj1PTRoyLYL5syC9FYE4FBkui7NsynyEpHjpvilJduC9oh+
+2/EwfNC75Hq5nJkGgk5KHeMMMz3nNCsGi78AW7ilsYi1hC277UX75UI92j70J+Jl
+0M1McDYiCVYgnsKMydJD1Pw=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/server.pem.digest.sha1 b/jstests/libs/server.pem.digest.sha1
index 459245fc9c876..af47ee559d1bf 100644
--- a/jstests/libs/server.pem.digest.sha1
+++ b/jstests/libs/server.pem.digest.sha1
@@ -1 +1 @@
-B83F19BCEFB506CC46478A2B9DBEE2762EBD038A
\ No newline at end of file
+CF9D05702DD8002E7AAC8E9564420E9D4673B249
\ No newline at end of file
diff --git a/jstests/libs/server.pem.digest.sha256 b/jstests/libs/server.pem.digest.sha256
index 16c8057185c25..7dfe932dbef96 100644
--- a/jstests/libs/server.pem.digest.sha256
+++ b/jstests/libs/server.pem.digest.sha256
@@ -1 +1 @@
-FE8BF8840E44CBE2E04B408C3F49247E256A1006A4DD6EE9B74B5FB84EA2FAA4
\ No newline at end of file
+D466E7EFF3BF6E5F080F8B6C80D839B6D2F1DED790F9AC99C00A7370228481AE
\ No newline at end of file
diff --git a/jstests/libs/server_SAN.pem b/jstests/libs/server_SAN.pem
index 32422b3b27315..35b1c5c08f57d 100644
--- a/jstests/libs/server_SAN.pem
+++ b/jstests/libs/server_SAN.pem
@@ -3,53 +3,53 @@
#
# General purpose server certificate with good SANs.
-----BEGIN CERTIFICATE-----
-MIIDvjCCAqagAwIBAgIEJXwypTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDvjCCAqagAwIBAgIEIQ0eljANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjB9MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQwWhcNMjUwOTEwMTQyODQwWjB9MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEgMB4GA1UEAwwX
S2VybmVsIENsaWVudCBQZWVyIFJvbGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
-ggEKAoIBAQDJBVF8+yRUl2SUbW2GDTTChhRTwrmjqZkrpe65Gt5QIMqq0LDQMr0G
-4t1X0B6gIrLjpzB9a8Aty9PLi+Dcp+C9hC/Bm8W6GsnyqhzFAu5hXELMb+Zfj9tx
-Vv8KcFPB1AxTzpRPUfdncfcpBBKi6c9jtp3IjM/jLE6nppyeP2BQ0LY9xJE2tjVZ
-fSxZ6kJPEbY+QOF3DfSmSY06qhNt8T0EgLhDpp7UuoH3bq2pNrFzMnXIZW868PUn
-FwfFxYWb6+QWAzAVwd5aj4326727pFtwR9+SxvHZl5NcfJIVQQl6WJg5qtCTgwbZ
-uYfyLWVKCtNLxsutrf+ydfGQZOte+0XJAgMBAAGjTzBNMB0GA1UdJQQWMBQGCCsG
+ggEKAoIBAQDb1cukIOQBUsYVprQC8sIUrjOB7uZEIkAgFVd207erlA2Qq5MHWnnU
+OcicqLRLITGVdfS4fSlsu0tnCwSV9Tawo/jrskjITDuIewoOR+vDBlj6GDTygygQ
+6urmNTalafIN/WGpliEsk6zUDqYrAz/bPiiR4f9AxKrczkxkHfD7gM6rzdj+ukxR
+fMmpLUHJgh8pIqSSN6ZA7lnauB7NCI0RQtB9qspmJHUx2FZW5B6Lh+ZOis/soEmX
+OPy0ApzxxyjIjTjoNqWQOG2v8NHAIH2HtQRSVYbkZ8vTolD7BdezEECHAJGoS94v
+oGPMMJ0T/0Gnc+LlUAiyvl+1DnjDw3XFAgMBAAGjTzBNMB0GA1UdJQQWMBQGCCsG
AQUFBwMBBggrBgEFBQcDAjAsBgNVHREEJTAjgglsb2NhbGhvc3SHBH8AAAGHEAAA
-AAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggEBAGhG3IwxfD1f3wEBZ43G
-GSIKbfbrXPuubELReKOwvN12m06EO5UQxgwZmwjBuH1HeZYblEDuYd7xaYxPW543
-2Vrsu1FtXD4U1OVazU3XjCalhcMj3loAWy76w5RexHbedIcd+NdgcqN98jlZolHk
-pyKxQGxdWU8lQvBdHCriOO/hQdPYiJTrR5Y+2rX0Sh/yT0KPD6N+BP2pd7NROVmr
-g9QCZC34fTDVie2useS/QEEj03Amw1HgqfczTEa1e8CaqelC0XQtb9OcKpvv53aO
-JXUsTs5uroM3yXoF8tFTLBs3ulY6xFXRAeJ2x93GYHp/8N8S879JdIIBUYhiwmWy
-PJk=
+AAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggEBAH8uSic6SrFCjMqqS1xa
+6+XymvE/MjXyhARGI6DFrIZPBt/LZwzWr4RznSf3teOruhXvXZAHnaXAOnYwizpW
+XWzDev2xegglXM6N10dFWhy5M3zo2h91RPhc7MCkWcaHwMoPdKADVPW7sD8Ppx6Z
+cfMJbGHAM54XJU7c4RJ0Qs9Lv77+kLXTw0tjCvB7j2tCamTl4vw9j3UwxKdCC/P3
+WXV00yfTkayLEXlquPlNjSKJOXeDj0JoG+T3NgK973soKnztsH8aPq/s1kP0mUDf
+zLrQZRLCxBWAj48wN4h+/mG+EbTt+H2VwPbA9tQPzg9SlIPgY7ZPGSVbA/qCPcp4
+zkE=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDJBVF8+yRUl2SU
-bW2GDTTChhRTwrmjqZkrpe65Gt5QIMqq0LDQMr0G4t1X0B6gIrLjpzB9a8Aty9PL
-i+Dcp+C9hC/Bm8W6GsnyqhzFAu5hXELMb+Zfj9txVv8KcFPB1AxTzpRPUfdncfcp
-BBKi6c9jtp3IjM/jLE6nppyeP2BQ0LY9xJE2tjVZfSxZ6kJPEbY+QOF3DfSmSY06
-qhNt8T0EgLhDpp7UuoH3bq2pNrFzMnXIZW868PUnFwfFxYWb6+QWAzAVwd5aj432
-6727pFtwR9+SxvHZl5NcfJIVQQl6WJg5qtCTgwbZuYfyLWVKCtNLxsutrf+ydfGQ
-ZOte+0XJAgMBAAECggEASQBdb33k27N/G0gCFkSFfH8ksqZstDrLHUbNQvu28HJ4
-J0BSdcl3TCDnMRSriowPWw1EVsfiqr7y02Cg8IEm5Kw0i6L+U2+XF0Ef4YwG9eSD
-farFhr6/epGVXT0dra5MK8NBqOyjZDXHBGYuPmuanSOceVBpzp4wkkG3buClIbOE
-f/NhIoc8odkPUMXICXs7iJM2SVh+2isAH9X0iIx6de7zqnJw1xLwcWdGx6X0KgmY
-SJ1JzUmhSDWdhAW0R3JoluzlEAUs71webtTMthp+AQuOC5wrRc0cARdC37foGC+V
-p13udR0bZyP09Bzb/BW2tqR7QfP7BiXObsxVjC8V4QKBgQD5AF/7kLYeLmpRVvJm
-cMs1KF0i3ZETRW5zQFMfmSYM0LmjyrmkgEtCEtHO5p2KMGtFZdvqkdS5/Zt5l+mJ
-aCwv27OsjPmO5VsvoCGBYOlw+MbnEim+cqOrjjVTNX91fH1ZxAcsp7Hmc09tu/jr
-8Qc8ZDzD4X6itsnI/tbYE8Kc+wKBgQDOq7X6AXfLsLwaq1BlYn/uljRMyrjE8/tu
-iPIj8xKT6KP0RkMqPl1xPT9q3J7gsxkVg28/MLBW2bnLV+dBKL+eGTVPw+AByqQr
-98FrpqKbARTmsu4Dn3vYgharpxrmg1L2nmN4WM3v6HGLdA5EPpQJvCKp2O29QCjh
-3ySH4vblCwKBgQCXwH++rFohmL4Y5nmCrzlZI0lnx0r0SLtgqBJAzrBe3RJWXWW4
-eKvlD90oUGow3wNxXvuhQNE5rPMFLu0YXhGX9TjSb3RkfymMo/XniK2cuTFXgD1K
-oUlYc6nSFWehrYYjoBGTSHxma148DXROLy3uw1Q5OQNZnTbrNUywkZJo4wKBgH8P
-ip3d8SzFx6AN2yu51zV0G0trBxXvepGGmHgJpU5SJRq1Z+280e4g3bBxWyyCb9WO
-LQMIiCGdUmD19jNVPhmRHfmgT0RKtYxikgQBOs9ZZuQ+9Z48mwONVyrJXfyCmKsO
-zdDbqCDrI4O9IlhKsPEbPaR2vhMwMvJLIkZ4/5npAoGBAOnes6P9drxmzaihJdsF
-1QBecczw5h5UaEp5KHfYwf+0G8s7tZeKelgHErPt6tivJ2sWbLNqxbvFQK7t1KNi
-kIRf1MN1XTxdSHuLVqHwuxE50zH3LGFE7c/ujszAiMbHct79s3/kcoUd5lkDVYyC
-x1hWrOgfx84p9Z897299r4fk
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDb1cukIOQBUsYV
+prQC8sIUrjOB7uZEIkAgFVd207erlA2Qq5MHWnnUOcicqLRLITGVdfS4fSlsu0tn
+CwSV9Tawo/jrskjITDuIewoOR+vDBlj6GDTygygQ6urmNTalafIN/WGpliEsk6zU
+DqYrAz/bPiiR4f9AxKrczkxkHfD7gM6rzdj+ukxRfMmpLUHJgh8pIqSSN6ZA7lna
+uB7NCI0RQtB9qspmJHUx2FZW5B6Lh+ZOis/soEmXOPy0ApzxxyjIjTjoNqWQOG2v
+8NHAIH2HtQRSVYbkZ8vTolD7BdezEECHAJGoS94voGPMMJ0T/0Gnc+LlUAiyvl+1
+DnjDw3XFAgMBAAECggEASwfNWVdgepMlyH39MoRoeWZ7bf172gVWnZyrHYNlTMpb
+VuU4aWoX6rdOISnzXzEVG04HGHCfktzZ/3FjP0tSPze33bob3UEnkI1uATHK5eVk
+uPdKGvto0V1cjvXakNp4Iw44Jwl2iSBd/IKDdGrHgKzWa/QWiqSVLIe8yu7tMOlD
+sBcEYemnehTWipZLHLTa9iYog4bmptvwbzM4Kl4HZ3nYNLZzjpHfoZNWGtJMcZW/
+g7697l7rdar/wx4JVa0crUVSLNupigjToA9SH6eTf89pqi67ZV+eLt6tMkub/vVQ
+eHt1E/uXt5ydeCHxnjXArva4oDPir+3wW64qjkgIgQKBgQDumww2kZ2QdTc00ehy
+moYh3UfUn+Jx68NUlxsXOGR/E0PoBiHS3OEFLOuMaIbbDuiUvgHF+XTu+ddWed8R
+iN0jY2mjtrBMPUYPAqttLjVQEmpKPcWfVFARhJYMqWOksHII/l1yi6N9MEyzI6Er
+3sNH2jggqZR+2UInx2EGISZKewKBgQDr3HHtODahFRSue+3DPxliQH0SH++j4KzE
+rcfLi5jdODaUbpPcAsrIbRXnEW1Lf+S1QXKt38t4aR5h8gPn16CQQ2waqX4JDYSG
+frml9II25VPGPU0ve0YzAFfBvn0Ls63J3u7HCcnwV7lDSD8f5sZtHM4qtwLr9SEF
+pNArQnJsvwKBgQDSbf85aPay3g1QEgeUet7sosCkrlUA71IXGiSUN/G5eH4c9LuD
+wbTZ4aHi9JRqQR2xgFkEBlqwH7tf0p9+UVvlx9j0vzuAIVHWDx6sbWIrOfJvg0b0
+m2D58hp7FDCCn/ISKHK1gJ0w3RXnrvaHQDCs/7EcbTI/JNAJUPcqdrXUXQKBgQDL
+7oIO4vDRHGISb5Lno3I2Mp4xgq14G3YmZD+A7cWRWN5QPr/Xlg5xd2hdrwK6Ke29
+DsayMfNCvFkJxPC9kAIDWlhpQS15dFem1oF8TUodXvGtUSmgqUzMIjq+iQ6jhIr6
+Jah9LiiNh7vmwdvaoHXmt+ZGppB/JiaUM6nODZjUDwKBgHYT+uyrrfSvjQrEaeAI
+DSYMPZdvbf4BkDUmqFLu1aUUUUV3j+UrdYQBJZ3tkrq1QEqsogwmMdMfHxb51UBd
+btDEpoK0zsqzcBYnTX2fv9MfP8QzqrK+CMVExHwsTLRB3syG6vg73pmj4wIR3Ni5
+QRaH1E50tZVpdVVxhrMUOaBG
-----END PRIVATE KEY-----
diff --git a/jstests/libs/server_SAN.pem.digest.sha1 b/jstests/libs/server_SAN.pem.digest.sha1
index debb75113449e..72961501176d7 100644
--- a/jstests/libs/server_SAN.pem.digest.sha1
+++ b/jstests/libs/server_SAN.pem.digest.sha1
@@ -1 +1 @@
-6B67673A606FD2F43474BD04490F5F3C88EBDED4
\ No newline at end of file
+E10148B4BA6A0E2F5E12889AEA51622D89EC8524
\ No newline at end of file
diff --git a/jstests/libs/server_SAN.pem.digest.sha256 b/jstests/libs/server_SAN.pem.digest.sha256
index bb25a260fd3d1..6c5631e2e4cdc 100644
--- a/jstests/libs/server_SAN.pem.digest.sha256
+++ b/jstests/libs/server_SAN.pem.digest.sha256
@@ -1 +1 @@
-59BB5C306881B052F8E1CFCC9CE772E8273FBE4B381E4E268CD1ED9508DC88AC
\ No newline at end of file
+D73E8A7806D5DFF5CDAE263F2291EB2213E8D6E033306C11DC1EC43E5AC7F008
\ No newline at end of file
diff --git a/jstests/libs/server_SAN2.pem b/jstests/libs/server_SAN2.pem
index c63d5b76a5ce9..4146fe47ff2ec 100644
--- a/jstests/libs/server_SAN2.pem
+++ b/jstests/libs/server_SAN2.pem
@@ -3,52 +3,52 @@
#
# General purpose server certificate with bad SANs.
-----BEGIN CERTIFICATE-----
-MIIDtDCCApygAwIBAgIEIuh3kzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDtDCCApygAwIBAgIEMI69UzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjB9MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQwWhcNMjUwOTEwMTQyODQwWjB9MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEgMB4GA1UEAwwX
S2VybmVsIENsaWVudCBQZWVyIFJvbGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
-ggEKAoIBAQDCauZxI+7aKcahn2sGQ0flmSrS9jHnB/r0EqQHdABWAzSeQ4yVuqmL
-2OqknjTBuEEjDhuPPt8EuOTxO5iWwoCTsd7dK1VS+rbq2CdrzwMhxKRzBDyPqANz
-pOrmb4UMW80sYkdFKpBlwxi+DxkDZ4jtbbuQISCY5xJBl26f2O+Wh32G1hDFrEhc
-5jvpyCjUjBtdCo96zRvU2XsZRRWoD9EPHHGS5FPKCdBHlPZOkm/xSE4v/srTmxPg
-6qYntQLRyURcSQT2FwEPbItx3mpvHq1MG1LWhMH7Yb4bhlrEdKWoR5AWLfhN7YDG
-t8dRUZVxwQF9VczugDjihVrkYrmhHRkDAgMBAAGjRTBDMBMGA1UdJQQMMAoGCCsG
+ggEKAoIBAQDYVMefpss9TFc0e+pubi0OLN9fHERskMvlLxUN8KCpKL41Go+jNtmj
+6qdIalYbhH/mE0U1xL0m/9dVhIYj0VGULm4Mwz2u8j/9q6nPqz1M32xAGohVLe4m
+Zq6DQJW7XS8ELMnkU5k6uriBB00WNwU/24RdXMLhsFWtYesSNBuAMh9RaFK6mNEK
+p3VHY9ZwqPkSscXvxnIS+acGS7V8VIDN4xgxEqAcaMI5IlFhFUewqxi6xdeoYp+X
+QLfHQxfrPolp/mAXiWS+xyEKlrsjM+H/USl2BflTnYldR0RWnxARZpEqCCZ3o78l
+BdyOUZ4P1J5RmCx0tUk0+x1cVVT9HGzNAgMBAAGjRTBDMBMGA1UdJQQMMAoGCCsG
AQUFBwMBMCwGA1UdEQQlMCOCCWxvY2FsaG9zdIcEfwAAAYcQAAAAAAAAAAAAAAAA
-AAAAATANBgkqhkiG9w0BAQsFAAOCAQEAjVmcCjkgQFQbiZRlwYStgpWYHDGYonEf
-YxOPouTpM+x0JKlejQ2uszX1GXtLXgymqLsjHkcfuoug8aPa4vmUujmuAZEnISae
-qaN8Z+qxHEkBatjnOp8xucL9tUJ+rvCV/i4aDWg2QV7MJmNXXLmUqZMjtjEvbsaw
-qYADAoiCcuREZaeGM08WU0Z56gelb5+uTRbYFEyXO36XR2tRyljJzqXGwvR0tgb2
-LoqXw7QYlIyszQOJOZx4tu9OY/U4RfPwSZEn/nbrN2gIPJ/mpOa1mY01d8fg+Wfu
-A8a+sZc0YYh+qcy7L8am1xS37bhctc6QsaZbf6PfqxUretNzTpKfcA==
+AAAAATANBgkqhkiG9w0BAQsFAAOCAQEALT7OkVF3XbytVG2wKQRjYQ1eIa/Cc+IH
+noOD7c58j721FgbF1Ctm/2n9s8Q/fxDmz4l8njUyf1I9RyxjrovUFpuRtwYBdwTe
+myECS5V/Quel52y0ZWS8FCL+MTHfhyPBVPi98zOK73iVM9RR1Ju/pZhlSWtBp57c
+X5jWMBtIjPPJurpBy2tB7zDb8Mw53YO+wwOHVlAq4SSyU++zcBL+F7cQg0UeCeL9
+KmB3QRaDqZf+5h6zZm0a+b5AB4WOh91LCGeTltJsKE9Fsf8KU2o3GqvWgVS0/Qbs
+dprPz4SFmhQnK8jUTpHAAWDTGSnInQRZlwSqHdwCJ1uuo5RH6kMsrQ==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDCauZxI+7aKcah
-n2sGQ0flmSrS9jHnB/r0EqQHdABWAzSeQ4yVuqmL2OqknjTBuEEjDhuPPt8EuOTx
-O5iWwoCTsd7dK1VS+rbq2CdrzwMhxKRzBDyPqANzpOrmb4UMW80sYkdFKpBlwxi+
-DxkDZ4jtbbuQISCY5xJBl26f2O+Wh32G1hDFrEhc5jvpyCjUjBtdCo96zRvU2XsZ
-RRWoD9EPHHGS5FPKCdBHlPZOkm/xSE4v/srTmxPg6qYntQLRyURcSQT2FwEPbItx
-3mpvHq1MG1LWhMH7Yb4bhlrEdKWoR5AWLfhN7YDGt8dRUZVxwQF9VczugDjihVrk
-YrmhHRkDAgMBAAECggEBALWpV5v+XG/Du1NH96QJeAPEg+xNmUFsBoTLajOAo9O6
-+Q6sxY8etM/0pwOcxGUCuvJ7eyr1L0RlU59I9YJhSe03tavpkrePhx2XTfaBn0aN
-TLAgFEqTTm5fehJJwANVQDipuDib1gMQMm1dBB1XP+3CrBC3s5LLzxY03mxEM1Lt
-szKHRs+uEl4FrriEvYoXllkRB/Ws9zatFvVQ2idJk7Ritmai9lcxq9EpqK0SxeRS
-1FM1T08IVeroHqsJGY/oyNLzkHJZTJCBActQvCauB2yP++a8+1JmR7GcIBUjbmfS
-RdXPArC8hrXDJoOtissI/28rRDiJFaK+dXrlrqxcc5kCgYEA8KZq8T1NKwUnGiWb
-Hhe7LUFOOljCGHOa4uTu/3swXDJU7opIj0cZ7s/QR+Yc+DHhoHTp7N+NbYsR1WCf
-RqiWsQieYdhZd899vte0SLNSWExvRuFpgOcJ5mrUzcTRlZiI6mmrXCiednhvryOc
-vd5uQwBlwI7itn3HvB9+kUVpSRUCgYEAztGM+auXsnwyLPLAvxNlwv4JNQ/DGM1Y
-o/VP6g3N/7Rj1muRN1gqn/GW8pBCYvqH7Fu/SUgR5KT1SmILL2Zk5rY2G9x/J8dM
-a3i7J6J6/fRZgE9IuIoL0wZ3F50iZdosWCi5tOH3++2LwRzHh/KOLzJ+MyCbFpAd
-8RGW/fJzL7cCgYEA6gD49pGkaxO8nRk6R73NvcjF98h9HGe1kbIJkJZKRSyQF9CR
-k/kQh36+SlvBibp/apalLTeIf59+fN1So9OljIoT8JL/FJjH7n4ziYoNpVzVy7e+
-7qA6qTryqRAcuHm1kTOldJzu0cZ023omnR4gW7iUOPT4EXoncY+ydDJpma0CgYBl
-sBSvoZXptTT6crUgfcoYEM24IDLd3AFMRE5xly4FHs3D3Im++4OhtqhwRUvO3L2+
-EfJAdsYNdKoCU6iKOr87zLVYB5chmNVTNj5XI4VznhPviYwI6B8eN9yQaLtD9vy8
-r/F3JW/Hl6mSXrMgfbs5K4tvWgXHFz8Ri4OBAxdtiwKBgQDEY8zxo0ekWuvPdNVM
-3l/tDDGjPdLBSf0wSEBLD1ac+qx1rklvgsxJs0I3MRxAQ9VeSXHA5PjNzj3vVAdo
-yv2PpF3juB+w0EvX7FYWcVfiCcaMHU1e2EzO23IecRTZFWsUoyl2pB2dgmG+gO8E
-OgPIeV6qCNVSCl9H/Xxjy+nBvg==
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDYVMefpss9TFc0
+e+pubi0OLN9fHERskMvlLxUN8KCpKL41Go+jNtmj6qdIalYbhH/mE0U1xL0m/9dV
+hIYj0VGULm4Mwz2u8j/9q6nPqz1M32xAGohVLe4mZq6DQJW7XS8ELMnkU5k6uriB
+B00WNwU/24RdXMLhsFWtYesSNBuAMh9RaFK6mNEKp3VHY9ZwqPkSscXvxnIS+acG
+S7V8VIDN4xgxEqAcaMI5IlFhFUewqxi6xdeoYp+XQLfHQxfrPolp/mAXiWS+xyEK
+lrsjM+H/USl2BflTnYldR0RWnxARZpEqCCZ3o78lBdyOUZ4P1J5RmCx0tUk0+x1c
+VVT9HGzNAgMBAAECggEAZHP5vkjVV0F7vQNNeLnwcgSkNKYLn94cz+9s92wlLmec
+60vKJx/xMAxmYVn/YgfE3DFWkgEpVFK4yx0erMMl/okQ0RxYKESVMlXkJGp/rJKS
+0bGAXQ1W6L4Fiy0SWk6K5oVluoCXRo4t3rxlL0udRGnKrQ/F7sbTAg/V+D36Byxk
+jbN9kkmh8n21/rocMzOzc4WRwMVRKWWfplfXLrR/stVIjsaAr5V+qqR6OQ3/6iye
+AfFgf1KrVD1LdxcfKDbTI+LN51V28ojuiPWg3utGDs09rU0lBGmjrk2797otmFqi
+vVC27jGq3MmpVOFOOQoYfX7LI7K1M0XLVuD/+4ioAQKBgQDttpBQI6uUY8QfxKn6
+l/UocomPgyp0J8ljtHROCbRnoTtTJmmIrgytwOgF18VJiIXwT6SFkkYkEi8NvTs7
+SCC45dn06Z3Zp0drog/pYaFMJZdPWF3ULNivtrAB+4l0t3cEc57GC8ujX8IHiUwO
+4ABQ3y9sp/PH+TEJBy9ym+byPQKBgQDo+SCYv8h08f64VV3WnrFWFVBHD4trn+SL
+XK49ITtMzBvUWfHpe6HzFsEeVUohN6cbpOBs3WuI69PAv7ZhFZtL2Y9oIgwhO5JG
+FQrou9FDzBqn0rsagVzf6O8ysmKM7abo1+WSf/YMCTKNbWDJ6vswCpM1akAVgdIq
+tBGAsVrd0QKBgGewidSbKp3IwuUEmQyZOjQRehWipmMSc9NNBKqBqzLE7qa/i6s2
+GIu2KL9btk/0AFCpvN8Sxgu37tR0ZaDfPn7IrEEbomItiIbXo08u6ffYMd9HcW46
+va4v5yiGOxgxavYDJQ/IV2SFQZKd/hJC/YBJSluAC2OAPDZu7vB0NtIVAoGBAKQG
+CkIg/IQBgMHNMIUTM22VEfEF67X5W9IWjYHybKq0JX/LV7t31NyuD4dIqylZ4mhf
+G7bp6XT7/Bj/oOsXaD/ty76YAgw/wctfs0+KNFpUUAwKHLsbiwUGHaqG10W7aEEV
+B8euGc/9hb723CctLm5zc5Mu3DGINizknFIMDBqRAoGBAMPS/WxSB/GSKlaGq5mz
+kC4f3/So9cz0rVv+9dN4Eg/aMiK3IPqfaVtjIGJ1LModYP8+0EnmwxtLutNgFDRJ
+yjAdtScpl2JMgxtMzS9poFLTCVo5LCPFHuzhfrm6tVQjXcYqjTVUaKessAZQPPle
+C8o0xbmvmzekDPmaEGWiQfq5
-----END PRIVATE KEY-----
diff --git a/jstests/libs/server_SAN2.pem.digest.sha1 b/jstests/libs/server_SAN2.pem.digest.sha1
index 8aa9bde808ca0..2101d581f9efb 100644
--- a/jstests/libs/server_SAN2.pem.digest.sha1
+++ b/jstests/libs/server_SAN2.pem.digest.sha1
@@ -1 +1 @@
-CA28CC498B15E8D0AC2DD04C60AA8AA72B6D3FA0
\ No newline at end of file
+3040F262AC6C02210A6C37B009647238DBC8BD3C
\ No newline at end of file
diff --git a/jstests/libs/server_SAN2.pem.digest.sha256 b/jstests/libs/server_SAN2.pem.digest.sha256
index f044b25a0f027..5705a80aad7fb 100644
--- a/jstests/libs/server_SAN2.pem.digest.sha256
+++ b/jstests/libs/server_SAN2.pem.digest.sha256
@@ -1 +1 @@
-EECBBD89E1F99E4DB0E6B9811A85A3F3C9C360A390D5811994E8D4C1700CDD2D
\ No newline at end of file
+3740B9EB44CAA386A942F009AD601AFF2ADF5DE5CF25B2E76FE9C76909E365CC
\ No newline at end of file
diff --git a/jstests/libs/server_no_SAN.pem b/jstests/libs/server_no_SAN.pem
index a6ef685055fb3..26cb17412d378 100644
--- a/jstests/libs/server_no_SAN.pem
+++ b/jstests/libs/server_no_SAN.pem
@@ -3,52 +3,52 @@
#
# General purpose server certificate with missing SAN.
-----BEGIN CERTIFICATE-----
-MIIDmzCCAoOgAwIBAgIERNWoyTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDmzCCAoOgAwIBAgIEB5Dc3DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjCBkTELMAkG
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQwWhcNMjUwOTEwMTQyODQwWjCBkTELMAkG
A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD
aXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMM
CWxvY2FsaG9zdDEgMB4GA1UEDAwXU2VydmVyIG5vIFNBTiBhdHRyaWJ1dGUwggEi
-MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGBqSXwzpbfK2E4BS7HftKkm3E
-S2D68MDvYjdbMYQVfd+b7AsvtzgvnEhkxjgfoiID5fvy0Ag4RS7SMhpmJtA3e65+
-HLvbwRgnDiugbdNpqpW4cRWxxb/GIOMir2gbeJsUFRGBSFGQR3l172NJIKSERKU9
-m91/zoFbU2tvJPD92qf+tUu+GCL2+7lvxCL5kbG8UlYn07TEnqYEikQzpKC6KaVH
-+fMFahcRIKYo2Co6+pzCNC4hGnaDLQ5R1bqUSow+0+QQ+Fhl/h/CACSdf23p8Cq0
-SWFZ73yXyMvQSx53i7n2EXrgwy6eVoA9das0wuixhAfQ5K2IJhLe+NwDUOhHAgMB
-AAGjFzAVMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA0GCSqGSIb3DQEBCwUAA4IBAQC/
-mu3lLVBn1aQEJB9vFvVnpRIYriLbJ9Wz2oWM4iqJcQNti6HxYaYs4GNxzM2TrC5U
-ehSd2lEIl8blx3s9gkfnS87/pRF5N55DlXX5DGmbdN3WIrnyRRwNtHaaA/MDd8Sm
-8nfxtB4BAQBOclBfbm5WKVgQjFxNO3pCkOpoWMuhKPOqPr7IVL8OwYBkYFtMxdBA
-gADickznCdzTIi5O14CYYMGzkbkf4dcP93WzA+tKwlDghmYOFu6Ujoz3xsi8hz16
-L2pOI+4wznbGPtXGaPEZOTV0ugD5OlbGvyCly8YqgGw7/q9/UdNM3jKPJ6X0UoFn
-P5F2t6c+5jdsuao+bSGw
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDJsUV0l4vt4bLwfUO3hFXoaI74
+U31dq/qdfY4RiXk4DZeNECARkR5rr9xV2Ut8PL08HG/GwFToK83vQ8mD2LPrJNBc
+QTmc2zPs6Nvsr4Vp4GwTvf4v7imAFV4sqdsCZ60S1zlA+nQ4lXlt5i6pEGRWscxQ
+H6501Ycm6wua5CLGbw05j8erj1Zrr59mY9rGhkJ8e6D0dgOlQ52kQYXHFxh8MdT3
++whhQFY8YYsh7TbCGR7xG5CALeFj0pw/unKjA9q+G+7f81VBOp9VCnZmeYM0zmVB
+hWzPaaxlvn34funhrm+zG4WC+5qBo79N7gBoJk7MxuFPn/ZKImpURSD7x5djAgMB
+AAGjFzAVMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA0GCSqGSIb3DQEBCwUAA4IBAQBh
+xjEZg2l5oNZpmEHTWNsWTpNP0/Ifl7Aw1XPz14CykQClOmrzZR+vJXsXG5B3nAs6
+BssYXMHdtonlJs0QMm/YKTrmn13c2IUl++vM0i86qgdI65KOgrXmkppVQVj6Jg0O
+4D2Qivl8bd4HXTCybmFezSxaMAmXPBp6qhdp2lpH+s3vMf4x5kXenASMqX8lEJoc
+nGeFRtd6EF7+PZWv3MQ0a/gVrtmUOk+oB6G4NEt/EBe0dy9yqTgdeuPGLU3lP4kl
+xCJCvrgRAg5TBzgJAcaf97v9wb4BmGbTJPCjX8+M8ynLp1OdhqRd1M09ynyqJpDy
+pSILRZEQq++SAeOUncUV
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDGBqSXwzpbfK2E
-4BS7HftKkm3ES2D68MDvYjdbMYQVfd+b7AsvtzgvnEhkxjgfoiID5fvy0Ag4RS7S
-MhpmJtA3e65+HLvbwRgnDiugbdNpqpW4cRWxxb/GIOMir2gbeJsUFRGBSFGQR3l1
-72NJIKSERKU9m91/zoFbU2tvJPD92qf+tUu+GCL2+7lvxCL5kbG8UlYn07TEnqYE
-ikQzpKC6KaVH+fMFahcRIKYo2Co6+pzCNC4hGnaDLQ5R1bqUSow+0+QQ+Fhl/h/C
-ACSdf23p8Cq0SWFZ73yXyMvQSx53i7n2EXrgwy6eVoA9das0wuixhAfQ5K2IJhLe
-+NwDUOhHAgMBAAECggEBAKB+qc/GjvmvJwNWifbN6ekanJRHc6ZukjByLeNfUmoI
-xr6cpMRbftI3nATxEnOw+FiogXbeXIsepz/9E6BPPNp7B4V2zFrjOSwONBWmbX5q
-n0YJgUe/uhpedTibaYk6SpYQLUvWzSReiXaoOpP+PNhmunYgaXk4aWcXSS+fhSJ4
-NCYYuMinGdDn/uia7h3jzsPCFjMUEFmhg7vxfqTdYQw+wzqq4GkLXkKrYM+Bt6kY
-YU1duZFrRUlO5oOk1lyqIIdRIuxqtNXzwxq/atYdpkEjfmZ3tT+YQQKdwN194eCR
-PlwHGyKjyqJNIP/VkUqf55kzgR8ye0nr4hCOpi4TkoECgYEA5/8BTtxj4gNDXIMN
-MCwCuK7P4DfshzWEyeaelyoOifVd3KUFafDRiuWjhIQO79lcjn0u6qP+pBQ2c053
-N5EuxXlNyCKCZb3j64SPsTu6SPeBf7t3uj1gdXjYJ2uX7SzSdeQgnC5AGf3HbfTb
-MoKnTb4Rp7W3VBS4f8zFA/pFmxECgYEA2oPaeSUGgOE0OodgrtyCZmwRHNN0zQ/G
-wz2R8OfoEknMux2JlRxT4XLK05cWCbB8XVKT/On9bQtyTDCtSyeohgIH/0ubXIz0
-jEXo4FFSuWrCuX+fryKCL3UBZdGcT9sa5c+ihp/bBfI0aGCEBpqyqU7jqhCiOS04
-MTYExZAe3dcCgYAepg+0LWV8rFWv2Rha7tWFNIL96iVzT1y1l6QH8GRvUV0PEzX9
-4vSr1t6dWRzoDt0tbdhO4092ubzfytPxIVr+d6IQ+I4lhsqfiKm7Dlrz3M7c4xVU
-I1uk6UwVGR+/E5bSQEsWlv2c8WvxWNHEgII9wQhwEY+gqgQdzh2RAvVDIQKBgQCQ
-fspUe6heRw1reqPzXXuZUPLL9gRxqXINH0THYgLzPPv16+VTxeSt22PGIU2AYMUM
-TRPxFaXwmrYdLb8aSIJHA0bYid2ViNwZeg0xcHVciP10/rDQdehSi3O+KarMn52c
-w/28Oi7yMqR3NQN4/okQLkub5N3xIzbaS9dB8TiIBwKBgHvKEfySnAqfuroSDRIb
-ryM53B8P1sEd1Nm9QHMH9+vI4h62HSbcDLI2zRP6TMjeuVGJF1LRaHdlE+XLNyrb
-qIOKQ/Jd8HD0MaQ2BckyJmstnU5UagpjkzCMmgVqObBDz2D+1PKMRultta8GNdg6
-+tqSUxiEDXjWu/BHMHtVswGU
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDJsUV0l4vt4bLw
+fUO3hFXoaI74U31dq/qdfY4RiXk4DZeNECARkR5rr9xV2Ut8PL08HG/GwFToK83v
+Q8mD2LPrJNBcQTmc2zPs6Nvsr4Vp4GwTvf4v7imAFV4sqdsCZ60S1zlA+nQ4lXlt
+5i6pEGRWscxQH6501Ycm6wua5CLGbw05j8erj1Zrr59mY9rGhkJ8e6D0dgOlQ52k
+QYXHFxh8MdT3+whhQFY8YYsh7TbCGR7xG5CALeFj0pw/unKjA9q+G+7f81VBOp9V
+CnZmeYM0zmVBhWzPaaxlvn34funhrm+zG4WC+5qBo79N7gBoJk7MxuFPn/ZKImpU
+RSD7x5djAgMBAAECggEBALri7c6pFdmoRpwcFgEYOFoPeFqVUhbX4nLIAjoxvga4
+YXMuO+jLJPr3ixxpKk7GITpvxwrM8F/pJvrLPxBSXfRGumKhdXbojma5Jf9cbCy8
+7KgmZj+XWRD7u7V69hp2YqKQltaku8gqiMfSf+3b6H2EZiLgGFDeAkuHg8tUFPz0
+BhPp2szdh3YVOpmYBHY11EUFyAvs7MCvfZ5/SvYv8S7BfCouH6IGnl1Tf2C+r3I/
+4RK9tqViK2uVfx6+phw/kMBk5vwHecB7uPrcEcsoQ5gOJ5UlPGcG9J8A+W3Q8kRP
+85eJ1+klY/zyXGMK5vIoyPtgYxV4C8pjIUhTn6+6h1ECgYEA/Y6zJrFQZbX8m7WR
+iHhXyKe0gqLWMhxHp6499SuwHIu8KOlVR+O8/5JqMTXhHcAWqgqxZ7vnsurYuqdS
+GVlrs42+wz+BqrKdhNCSjkbHu/+QnT4Y4QtlwjJ90gldMpJGsz72/qT9YEhqljrb
+5N0AEpfazAzig86nDmoZ6SEj3HUCgYEAy6KquIo+LhmMZWf7GFj7F+xThUZdSL0Y
+3temH3YDlwwYPaaLeOhlUB8YONQAOcBKU/kg/rCXjqQj+/esi8uzByREN2JuBDLx
+NU0hFjRGaR8ghrLtQqPI+z+dWMf5n/M7sP0/JZUdoRnbeXg3PTjgsoOl5hrfPA5v
+kKV98BBsCXcCgYAPlIQnnX+eMFeMTdTaeOKT/tIVsOHOhHRWtlsyRHP23RM7xFKs
+Ly8+2QVTbscdZ988pZmPETnEga+9kGh4DEmU5+HP54gVMBKDxbkrque9ApAlSVqI
+AYZIL4gRrueeIuTbQMQ2k3bKQsjh4E4Aux+1BMW6AhStGrajKRvcel/UvQKBgCuk
+u249wsHim+KH/JrlRzuDXQstX4Tdcl0gXuxTQMW4w+FCtpW9SSl44JodZcb63XNN
+67JqarEZoNS56nMzvzUCotQz9lxUBrpjw3mczpJtJ213H76ul76xjamfgnKzkNGI
+FWaapPUIM6+/AifO7umTqZS9oaafeW3I6krvWhhVAoGBAKlFTaJ1r+rF/JWdnKru
+ZNYNU88ddKy5sSc69F0uH+/jZv/hBaqPo9aeAfiqQwVmm4qcjI3pWuUburkNFpmL
+C5VQ2LVxgUHgic17BR8GQAlcmL7bJawOEWiijU6Jz74Z7HnBXKPoRsOnBtAuX721
+tTfECd3/iuqmfVKUptRqXtb9
-----END PRIVATE KEY-----
diff --git a/jstests/libs/server_no_SAN.pem.digest.sha1 b/jstests/libs/server_no_SAN.pem.digest.sha1
index 1fb3e0b684d12..fdcaa7b41ff1b 100644
--- a/jstests/libs/server_no_SAN.pem.digest.sha1
+++ b/jstests/libs/server_no_SAN.pem.digest.sha1
@@ -1 +1 @@
-57BD30716FC867960EC0F13CD7449A7791D11798
\ No newline at end of file
+F6F1B14582AE459BCD82A3EEA8B9DA6FEB53FE62
\ No newline at end of file
diff --git a/jstests/libs/server_no_SAN.pem.digest.sha256 b/jstests/libs/server_no_SAN.pem.digest.sha256
index ba98a5f8bd965..27a7bf17d973c 100644
--- a/jstests/libs/server_no_SAN.pem.digest.sha256
+++ b/jstests/libs/server_no_SAN.pem.digest.sha256
@@ -1 +1 @@
-D5FB78ABB12B2B2413A17981FB384AE6E8E059E53EA8DCA471935799D410E041
\ No newline at end of file
+59924A914651C13CED05B61D757DAD08BD2AA0FD0D23FED683DF39748CC59688
\ No newline at end of file
diff --git a/jstests/libs/server_no_subject.pem b/jstests/libs/server_no_subject.pem
index 7766b47095db3..959e3ae42e8d1 100644
--- a/jstests/libs/server_no_subject.pem
+++ b/jstests/libs/server_no_subject.pem
@@ -3,54 +3,54 @@
#
# Server certificate with empty Subject, but critical SAN.
-----BEGIN CERTIFICATE-----
-MIIEDTCCAvWgAwIBAgIEb1+q+jANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIEDTCCAvWgAwIBAgIEPPun/DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjAAMIIBIjAN
-BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6ETKNscCEbNn3MC81M9hMzfKlEU3
-KTZRZ3EOfBXZxxqmEgMU6nBF6Ez5rrLk0EoA7cfnQYMFrP2sdjJAjq408dIVT7yX
-jpwme7ePUTY4OecMO3pkuK5JwRZT0393nDUBgp46ke+CBTaVdoNLNVlPQqvBWC8+
-4IbNvJRuaLErZBh/dXNpgcBDokp6UvM020TD9oOwpoc6byW85WVcbZ4w0FxllsSM
-xaIGe7YlsFCn94NO1r6gDUdB4JQemFIjdb83/oK7dudLRnMi/AtLbNet416P95+V
-m97WKwvrfaUk33ZfdtmXMYQdaEEJJ49isxNE3inp+y3C/rzFSUoGYVo93QIDAQAB
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQwWhcNMjUwOTEwMTQyODQwWjAAMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwNGP+tbyhjO31TKEofcmnOH2sNNU
+0Jok5kjftKaq4yDshDhrtymdQgkb07n/Lm4XvETO7PwB5Ajtq6VCqMDwgDvCWOJM
+61G0UVBYXgVXz6hHE70x/JYWgChHkmBjnZ51cphSsMzjiAtm2V/bn+uaxDpXJnuT
+9XgGsbxao/748/JRIlKZxYRYjn+2fsqjRU/doYHzA2RFsNsaEWSL6tEH8Y7oUSkP
+MZ/dSh2xxxyb4IwMs7yo4AN1Pkezt4dq1h8CYlc4Iob5AKKpGoFT0gMSkpkDHF02
+iKergDe8WjKMeNKU0VX/7YIMYOjB063bnpKO6FxCuq4ayCx1yfajY7OfTwIDAQAB
o4IBGTCCARUwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB
-BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSgGgjwDilUeiEuYEvNTvgCZt4qKDCB
+BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBStKpasK6aPn4IXRTlOfgJ7c4ge+TCB
iwYDVR0jBIGDMIGAoXikdjB0MQswCQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlv
cmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzAN
-BgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0GCBHvUrJMwLwYD
+BgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0GCBEreWhowLwYD
VR0RAQH/BCUwI4IJbG9jYWxob3N0hwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0G
-CSqGSIb3DQEBCwUAA4IBAQAIZuBwcX/yY7ygxhsCI5o/cwKD+icct3b9djyay7Nv
-ZEPPVAhAPXTh3i7ZkpU/RqDWQ6avEHXuQuZEXusB+2retKGd7qnBb5mkJzJvVXjB
-MInX1Waj1MQiESC7RmNlJvR7JcPZxDjKxchi4/AJkUK0vK4H3IE34zkL4/EE3zMO
-iII8CI67xMxL34sNEaG/DC061Ti1EJonkM5khfw0jaXXjbFkKCjmhdTv0WXvZmYK
-AN0MOiyMNnE7j9sLcE3R9kvgopJAg08gf3JSkyDTMNQJGxqblmFoo+bKEXfRCqC9
-QGe6HIDiRwxjByZ9iqYxqfiH1G7TVbysruTeiP/Jdmab
+CSqGSIb3DQEBCwUAA4IBAQCEAFXRg0IP+BYkDthBhkMNNRYxGfkiG9PsroHp5b+I
+dI1J0UeeKdXHQKL45sjTog0VFwsGUc9EXauweY/airVE9dNct1P95rysvkwCSD4b
+X1eQe1QzMxU+yUotPLK4L16sWjzbjVYGzk+1TN8ICHUtIEZBSgXNt87mmry9G6OD
+4F/YyJMuQcmtxQER6rNZZhyXVC7rEIRIqo4GJ9btlxv6Y/nJd5r1QGQrdHtn/kX6
+d4veHRNk09zDySYvaOK4eiYgBmyB0ouV0DMdmKid5hKIro5qq/aD8IRmtv+AKVCX
+vS+6RpYFOFSDk/xL+pPBDBuwxYrCNJ3dJfZwB3/r7v/t
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDoRMo2xwIRs2fc
-wLzUz2EzN8qURTcpNlFncQ58FdnHGqYSAxTqcEXoTPmusuTQSgDtx+dBgwWs/ax2
-MkCOrjTx0hVPvJeOnCZ7t49RNjg55ww7emS4rknBFlPTf3ecNQGCnjqR74IFNpV2
-g0s1WU9Cq8FYLz7ghs28lG5osStkGH91c2mBwEOiSnpS8zTbRMP2g7CmhzpvJbzl
-ZVxtnjDQXGWWxIzFogZ7tiWwUKf3g07WvqANR0HglB6YUiN1vzf+grt250tGcyL8
-C0ts163jXo/3n5Wb3tYrC+t9pSTfdl922ZcxhB1oQQknj2KzE0TeKen7LcL+vMVJ
-SgZhWj3dAgMBAAECggEBANg4mYpOzpVNzwVJA0CU4WZ9rH9Ew+oAn91M+O/4o2hf
-XXPtHH52EA4GAnVoIevoyF6Stqxc3V7CM9dARqrMb4siSCaXaPsgUA7hvXAbqlH0
-zYVdFgB6dzxhnZ2Izv9CtOo0hE+wh/h0bMULymqafleRboa6TK1VsJa6EuIw9DTT
-d0FAOn8/bnKVT0x94RorbUdLhfMYRZ2CLcABsQa90Z6++uWQOsIWYGWapSW5EdQ0
-WZ43EkRHyIUkFEY0Z+dGRyVSNDiZMYWgv140mfdy8ftK4IeiAXj0rDcJtt8Ih0kZ
-VDIY6hqMORQONbcH1YMiISt6xhDGeZVM19HcYT/nXh0CgYEA+nr616UuraQFbwz5
-aRRHRjcQi9KjLlfGQQDliR4kyHDcnTluwNoVye1uuPLSjd1CGwyIKRShUG9ariOU
-3uqej7G9u20S+PZZ8196SHNaBK0xtT0cQDufUZo707m/tW+8P86EkZQGzyAYxhOK
-+AbeOk1S1+iXwuXvotuRGYK8F1sCgYEA7WMS1hnOkbNYWc3h8EzLFOA2ujO/9TCd
-CAMiGeU87G9B12A/KEEGqgg7/c5qIE60iYBVUiLseeu7M7ZIpjNgvzZYFonp5bsp
-Ki2GbrnfxtF9hnTDXql/tYbqIDaJSIpF8NTaMEd7CHdIUSe1Dsc2/b5/hmhAVMzj
-SQNUUJsGPScCgYEA5hgQ1AYGkjYpU7E8cB3Tt5mf1oIBqvGwykfzk3kgWwzqbHe0
-2O29tEgrPTS63N0S+9wQPISaB1SznWJMQFaQn/msDD+PfSp4yQu9Pk7Qs5kSH6Zq
-jEr4+LJRIRbyF87zxD2HJGAxvRWEDHkpYNyWSkJ7xqEAwGYPM7C1YxToih8CgYAN
-K5X78uqXAtBAC0AhyODrg5UFt6FKxSuxKhtWPHWo2HNas6hNX24zOMm7Rxx0Nmml
-x0z0haOBECcOs2pMbkVL1oQEnBox+LRL9sKimgVkTnMcZpYARn8jXxzWNLBhKeq/
-dc+1JgIZciS8++r7qunOIieJGXeYPNXPUzP2VON/uQKBgQDvMfdU3B99x2p6bChD
-mmKu0OF8ER8k4bhFQ6lpp1zJy77RYSfR4mdTe/T4iN1vgrSSHhdO6DCJBNFNJOzh
-YdrXLEFjQqznR/GWLj8w8lR3al5tqWrLzSvSvt/DA7TYtyR5YfLVjyu8V94jXTsj
-8MCNB+rBU7bCljz7RZ42P3gbXw==
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDA0Y/61vKGM7fV
+MoSh9yac4faw01TQmiTmSN+0pqrjIOyEOGu3KZ1CCRvTuf8ubhe8RM7s/AHkCO2r
+pUKowPCAO8JY4kzrUbRRUFheBVfPqEcTvTH8lhaAKEeSYGOdnnVymFKwzOOIC2bZ
+X9uf65rEOlcme5P1eAaxvFqj/vjz8lEiUpnFhFiOf7Z+yqNFT92hgfMDZEWw2xoR
+ZIvq0QfxjuhRKQ8xn91KHbHHHJvgjAyzvKjgA3U+R7O3h2rWHwJiVzgihvkAoqka
+gVPSAxKSmQMcXTaIp6uAN7xaMox40pTRVf/tggxg6MHTrdueko7oXEK6rhrILHXJ
+9qNjs59PAgMBAAECggEBAJaUEYimzAcZ5HpOVV3XEQR7QpecsiOl/SMJzgnaDqIJ
+WCt5lEUZ7oFfc6fJHQs5VrfkVGSl+SQIBPOCv6fh5O9/u6qGL+OljtfhkuD58zVs
+CPXVTnAfFbtHuX0KYUD6OmYfppQDrbzUiqE/RtiAugB8PwsCfu23qiKrIW3o9Kos
+OlGc0n8WGOOhfA8uDr+jqaIS8rgODBRDd7siTm+YIPG4HZuW77rWBPHMmgxW5geV
+RBCbNnlBokkZX4ZfBzSiPHkDVZutHsNQmlWpl3Equ9O0U7aAOi3vCEBFSPhu5Z9X
+1YRi6gVIr+iUtKPmE9rXgNYuY+oz7YmZys4/4RHQ/GECgYEA7z62La3mv5+GE3MT
+4Hx3/mR8uu+v+sTE6FjU70d4ZJSA6ack6taUWsm75aTGLo+fTozcig1Zs+lPDZiv
+h0sxAk+Tu9JkR3V4mPO8JPQ/weRz+QvrWf4mhEzAmhGfwd8l9EZmDR5UlhtoR9T1
+VAdMQov3h3iD7ThXr3Rqh0LTzCUCgYEAzlJ/poP5l+ypkTIJzb2G2qC8htUtDyW2
+IwCPFXSs2Sze3BeThXwW0LaD85W3yw9xVhoS9wFH4Rc1eDQty2UbkpMDwfH3EHVh
+BCzpu1jrkN+F8KkBMa75Ak9jeuO29IukPVKnNUEWqskhVVFRym6zs9938tEzkBzZ
+fpse/Zum6WMCgYEAiugLWFxGxG13qCVBni5GcTKg5NyzfVLvzMN+5mrFsQg0DPVZ
+zuvFeSz15nCUDIahBTdt+M2ljnrgxlEnYNM8Yk4XxY5zYLYIzi19yKrztbzRxQCx
+pi+U5220yf9/lU3duIWDTQyWKg1Br6sqwZ33HhAsKMDFQF9dMebSzzPyyu0CgYBf
+LDEUB7bfvNyvTGy3SiDlwtWUQMurPBMbbEuUcyC0gX0/+2QiZA5GMpsFht+kPLhk
+JlzvMdkNXN5eV3t0YmxonYZTNaFpJywcd/dNY4QubN1lGSCi4Xqd9S8HZflkLvIR
+E4psB11EZMkKiRt4jL46T/ANwzDM3nH5c+bEx8MjzwKBgD4YsDVmi9KZuq2kq8Bh
+Vyxkwyl552a/0CRFFYooN/rUpidVBxegjqFaD8ynGQXFv1MR0Azz7slzI3AI10Wi
+/YmD7V9Ko8iuO8Rnu5EMXMtqwswc3cSeR3Dxn7B/CQlqvD4DPYWlsL3+gCch1+8f
+DZu2ZEwTqJFaQhUcrY23JM0y
-----END PRIVATE KEY-----
diff --git a/jstests/libs/server_no_subject.pem.digest.sha1 b/jstests/libs/server_no_subject.pem.digest.sha1
index 928599a89c9a3..d592305cf22c3 100644
--- a/jstests/libs/server_no_subject.pem.digest.sha1
+++ b/jstests/libs/server_no_subject.pem.digest.sha1
@@ -1 +1 @@
-B3431C4D34FFE25C36ED62A6958EB9364E5C2DC2
\ No newline at end of file
+8F5D79EF1032B2D9A3636656CD0127A67F1D395B
\ No newline at end of file
diff --git a/jstests/libs/server_no_subject.pem.digest.sha256 b/jstests/libs/server_no_subject.pem.digest.sha256
index 0f686d50330c2..a9f8be39a8d79 100644
--- a/jstests/libs/server_no_subject.pem.digest.sha256
+++ b/jstests/libs/server_no_subject.pem.digest.sha256
@@ -1 +1 @@
-E1DEA621F4E8F1E4AF9471F0A5FE063F8341287D73304D6314055F0B61D39D02
\ No newline at end of file
+B34A00B1A79DFA5ADF3F03F762ADBB7430F28A08CF4EC0B00E09C644CC164AE2
\ No newline at end of file
diff --git a/jstests/libs/server_no_subject_no_SAN.pem b/jstests/libs/server_no_subject_no_SAN.pem
index 0c0fa9291ccb3..886024b297ea6 100644
--- a/jstests/libs/server_no_subject_no_SAN.pem
+++ b/jstests/libs/server_no_subject_no_SAN.pem
@@ -3,53 +3,53 @@
#
# Server certificate with empty Subject, and no SANs.
-----BEGIN CERTIFICATE-----
-MIID2jCCAsKgAwIBAgIEf7gcJDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIID2jCCAsKgAwIBAgIEQYe5jDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjAAMIIBIjAN
-BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu9BAloe22zkr+eJj4AtFHHSY4gib
-e1CqSp6iabV1kFlxoMCu4usfw5i8z5b5HA9vUguQsrUvcs2p8H4tzkVGwAMPvFXs
-7wB3c85GC2ds6rxMJLtLFqNv6PcmvFCVyip56AfyDgAT8WuPSNItIVYHEvKJ5Fkv
-VmPY/q65dtbJkI0wTJilsJ37vgL6PeqgZDhTlcwYQR0eQ0L6SXDp77q7/2wAIsyL
-yZkA4LAF3nqOogF3aA6ESPlpfAz3PztBcv8z3NINgiMzWhve877Daa6AuTU2Fq+0
-4V7+Gn5PODcQw3ze8qGSD429wa2DzsvqwIXEYxHg+M6QIMV54XicsZnSEQIDAQAB
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQwWhcNMjUwOTEwMTQyODQwWjAAMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2DdmP/51O/Dn4LwkBFyAE2qAebqA
+iu4x7cGVMe/WKC1TdYUmp36z7IxEpNI1fCXcWZJoytU/HviJfViFh7KyhKcIGx93
+X4Pd/AC0IGXJ+lR8EGLQHU+UvAwqHOp1ef8OCFS6D25FEzVANgqUd9M2F9FGg0pB
+pStkp9M6YkQzgEC/SnRD5R521l7Ui+5I5g+CdMezX5+69Y7Bil5GriKKiM5yzdyV
+J5NK/SynoTbupzm5BP4cpWK9NwebCV5femrzaKOt8nK2rNd0hxuM6gfCJV/t4fFR
+lUCpmJWJt5v6mkkr5R8cgLykLS8TLNDNhEDxW9KPSUzeD9lEB0v1ABvX2QIDAQAB
o4HnMIHkMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUF
-BwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUVeu/OMtu+8A5fnY8RipJ62KvK84wgYsG
+BwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUFvZLZ4jB9Kz6YNhf6Cxqh4VGwvAwgYsG
A1UdIwSBgzCBgKF4pHYwdDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3Jr
MRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYD
-VQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0IENBggR71KyTMA0GCSqG
-SIb3DQEBCwUAA4IBAQC2cmJknkplrrHBKFfodtZk5988RCv+3kePMd70zKEtEy4F
-IUo2qalZ+DkbX1WTVxnqkj5nUxintVCYGDdDubMdcRobrTXz9HP6k6tkt/SY0PXe
-7v7/zxuDndI+HpsYcaw6caznD30N9L46BYR0iJ7gsHbYzSNi/3hzf0Wmj5zpbcQl
-YPUso/mZFvsSrqo+fOjgTuTpn482oSwictno4rwcGmT2wUFxYQpJsn17n3uN43wx
-uGpSuL9LxwYdn0FG0255zhJkp8e8ofHwS5jA6h9pDR1mdfrPiwSGNlFiUBTmbMXj
-n2uIDbkoJn42cQUGKxqyrEe6qMHYqET+e5Y5boMm
+VQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0IENBggRK3loaMA0GCSqG
+SIb3DQEBCwUAA4IBAQAvs0z/RFgx6BgfH4IsskwxCooqJ4zSZSYwUlz1lZA8jT8W
+NS6KuXwdVw9UvrrzdOFVZbsyzlVjAvRuTMV2z5KpPeLnfraxWuDov7gsYqa6UCfX
+jDX38E+lolZ17ZD5m8BAaNIw1/tvCEokw6+OTK1FEWpEhzucLkF2P4gD3FwAuz26
+ibuG38CSOYn9sn06YLKfNNQVvPb/Aju2/PGT9A2FuLb16qbaDscQbIDbakMt3Kp1
+hBCVQw4EguqSVlxFO+5Qgv7HYd1ZF1rrrua4NqyZ/p21NFFcwmNaZCVRe1zeZ6zf
+Rldsmd2l8y4mAL/omMK8Nl0/k46izX/oA4G/NWjh
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC70ECWh7bbOSv5
-4mPgC0UcdJjiCJt7UKpKnqJptXWQWXGgwK7i6x/DmLzPlvkcD29SC5CytS9yzanw
-fi3ORUbAAw+8VezvAHdzzkYLZ2zqvEwku0sWo2/o9ya8UJXKKnnoB/IOABPxa49I
-0i0hVgcS8onkWS9WY9j+rrl21smQjTBMmKWwnfu+Avo96qBkOFOVzBhBHR5DQvpJ
-cOnvurv/bAAizIvJmQDgsAXeeo6iAXdoDoRI+Wl8DPc/O0Fy/zPc0g2CIzNaG97z
-vsNproC5NTYWr7ThXv4afk84NxDDfN7yoZIPjb3BrYPOy+rAhcRjEeD4zpAgxXnh
-eJyxmdIRAgMBAAECggEACusmdhnZtZDzT4mryMCe+fKFWM6rS1X7MG9cfczA0u2A
-P5o0EJketZ5Ri7f5L3puPFTTyq/h3Ei8knCjdRRt8oe7CHXuWk6qox7gz5TprhPY
-UWJaESzbOq3zoIsGykQB5k1f8xRqFGTuZYtieEeqDZ3wCkhtChav7M53lcMS4MZa
-/P5lv7lNWEnJh59LTXlJ6Pp4zh39OGV9F0MdJzZWhZWcfuEdTn/fYRYiPn/Pggm5
-tliBdNYPnn1Yu6gIQru9EtzTK7f3yeq0oaKBXIk7PiKWZGDQKu5bLGSbFkwvDEPe
-FzoT0q5YZ8cXwRlBp5QtBV48jfl7iG1+zTA5fxVjSQKBgQDlcPVxqv1jOeD/qarU
-ut24Jfv3sNd2oouRiW8+j/4mkBklCYEbCuIJsuAvC8yQaLZQZFeWrK/H4I2eoii6
-qQ3/dCH/D7LerNgZM1ResjQk5eNghmmhM1QnJa3HTz06h33FZL+m+QLXQ9xwmaHV
-6om9wDIeWonmmP17dVqxpEPUFwKBgQDRjbyNPokBxAXnq7X9mbnKuMHlqgUyBD8r
-sInn9XlGjqBBVNpG+k02z+kl7qCwSq41qFHAO7LSnsrJj/xb3oC9K68/ddsy0esK
-76RhSYM+K1ohbNmXy7Sxab4nw+9mIOyH8ZViPCiX3njRFAnHYAHmAwIIe7qvC8+S
-MqqSRXLcFwKBgQC/gUoMJwk5wfyPyGEDEDmx01p9AgjFR6SwxAKrOGqMa2fhZw5w
-sOO77qZ5/iIvQocxkJ8e72IEtePezUCrlCFP3/HkP4kvnRr95b79D0bAqXTggzA1
-UW9jAWsL2ZKkgKS0loP07RC29jhPb5wJvapcS8hJV46Uf4vLeUdK0G2g3wKBgEsS
-40Ua7UyjpJXuD4Iua8Bz1otoEPptSRBm69EdQXiEqmcddiHNlJIZhGahihH+f1Eb
-30XmXVdVqkLyAbAME8Ux/FPY7lHToMhHOHXeN1WzhFPLQl0+jpqszoJCkObezr6Z
-TzJlaQmXm2MUMbiq7aMw2q6dXTFPIus2maNTnCW7AoGAFBUq5JBvMntjcs3qNv1R
-Cr1JcC0ZtoYoMJGLL8VgM09P01XnixkjELQzYhrEMPrZRkFUQTtakv1J3Z6E6ytC
-0oWkr0OhCMdf7MVFh5rXc3MOFK+t17ZgQ8n4/YbmncUmnu0Qy1C0XnLFcdJtyE6t
-snYKL3t0oFL9WZd7NiqA3Hk=
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDYN2Y//nU78Ofg
+vCQEXIATaoB5uoCK7jHtwZUx79YoLVN1hSanfrPsjESk0jV8JdxZkmjK1T8e+Il9
+WIWHsrKEpwgbH3dfg938ALQgZcn6VHwQYtAdT5S8DCoc6nV5/w4IVLoPbkUTNUA2
+CpR30zYX0UaDSkGlK2Sn0zpiRDOAQL9KdEPlHnbWXtSL7kjmD4J0x7Nfn7r1jsGK
+XkauIoqIznLN3JUnk0r9LKehNu6nObkE/hylYr03B5sJXl96avNoo63ycras13SH
+G4zqB8IlX+3h8VGVQKmYlYm3m/qaSSvlHxyAvKQtLxMs0M2EQPFb0o9JTN4P2UQH
+S/UAG9fZAgMBAAECggEAJaSvhqC0pHTyyeu3kZLRMZAvQgI4cve5dZ1obkNFU8Vg
+bGt8KVkj1iEtqkx2x2CIFogo+ndZ//hhZ5NL3C4+al3ySVaXH+gDc+ZjdeiC27Rt
+zSSG+zNpv7uWcWZYgZ7oQIcPxbP7DrcMsNvysVHybv8eeakp+KmvYpCh743UMZXC
+Rjm7Xhv/ihDcNpO0nn1zmdmUnIh2/dBKlKvpceWEWR4hLjBqrxq/Us4Vs359m/zd
+4gwgTv2TvjwnFoHs7BBQVftRFzzNl7ebqPTt0e5B0LA06p2/ch6uLjh255srN0sw
+5KaMG5l5zvrBU+1EFN3+PotFlrKjRmE8q8ah6Cq0CQKBgQDw6rgnP2LowJt1+5MB
+IQRDilvAQhs0e+usa3Ktff56G0LMt488+CNsfsVVJfo+9iJvEaV7IkRJmhhg1ppQ
+QrdBo4stARQ1bvCZSAR6JG55RZo0l8w6lZedrsbs4IEq+I9Yik4wRibKnFBUZtTG
+ft/IWfR9tokoo/oWHEG8Ncff4wKBgQDlwMt3srcTWywvry+jFOqIB4SHmOTgzE8d
+zfVgyX6ztMsArariigxU1Wzrges3meGItQ/5QjC+SSi5iGfQQtj5j1iDZf8KoYmf
+3e1S9zn69Blrr5Zv/BFTD8FOFjHEfi1p/yc9wnhKuaWddl3ZpskmknV3hnbXi/1D
++Q5P+MH+EwKBgEplUUTdcB/MCrXeYEEC/xwUR80RHPGSKu7tp1YoCEOQ04oATT1Y
+qye/5hQrTCHRRETkQCPMKyZHbavJ2ZFbaNfeNwZIxQLTJX1QHgHR6kOM2NfQ4IJe
+25kX9doEYh7w7uM6onaJ00TSDIRj1OUPHO/zx2piyexA5uOZCjuip/xdAoGAE12Q
+yOITdFo2+z308fYNXrHjhdppUFloQcbea+4P8+3FPqVkxfuE1pG2wJMO1Hzv+anp
+UsU1fpSEDj9lILTBvSdL+qdaO/cptoPqQRf9lx2EFwOR1paUjz/At2g/gaRxB4Iu
+OJJvuTy0rPURLAtW4R1vMUfwNHLkQhp4Dl2VXGkCgYBKttzSEmn1cYblMQs8BOfp
+Wp/BsYY0atATvZsBE66F3r9/uCd/FWRjkh5ct4awAZaqCsk8SLDhXmlbfS+mJHZR
+QzTYI68Z0MGQvWr6pFAvCKagGDeQM9uTNJMgphRzjbHe869qCHVZMZ/EZGLpjjqj
+9JPmrPE/sTYezAlSWKtKrQ==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/server_no_subject_no_SAN.pem.digest.sha1 b/jstests/libs/server_no_subject_no_SAN.pem.digest.sha1
index bb098491fa902..fc9e8098cb9d7 100644
--- a/jstests/libs/server_no_subject_no_SAN.pem.digest.sha1
+++ b/jstests/libs/server_no_subject_no_SAN.pem.digest.sha1
@@ -1 +1 @@
-22BC5A014E9F2127C33A41C26E2044BBC2B53F99
\ No newline at end of file
+ED3B838A6EDE1C3ED7AC7A07EC1376D4672A9437
\ No newline at end of file
diff --git a/jstests/libs/server_no_subject_no_SAN.pem.digest.sha256 b/jstests/libs/server_no_subject_no_SAN.pem.digest.sha256
index 67573a0f95299..7e07216593ded 100644
--- a/jstests/libs/server_no_subject_no_SAN.pem.digest.sha256
+++ b/jstests/libs/server_no_subject_no_SAN.pem.digest.sha256
@@ -1 +1 @@
-D5CE39CBFEF9B028FEB2F7395E4BD62E58914EC4C8A2BDB134932F1B3E3B4633
\ No newline at end of file
+3F0D4C05487B274071B96350851F7C5F05583EA620744EF245BCA33202FB0283
\ No newline at end of file
diff --git a/jstests/libs/server_title_bar.pem b/jstests/libs/server_title_bar.pem
index 13d303c4fda5c..c5c5030ec72be 100644
--- a/jstests/libs/server_title_bar.pem
+++ b/jstests/libs/server_title_bar.pem
@@ -3,51 +3,51 @@
#
# Server certificate including the title attribute set to bar.
-----BEGIN CERTIFICATE-----
-MIIDijCCAnKgAwIBAgIEKf++izANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDijCCAnKgAwIBAgIEMqSE8DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjMwMzIyMDIzOTE2WhcNMjUwNjIzMDIzOTE2WjB6MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQxWhcNMjUwOTEwMTQyODQxWjB6MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UEAwwG
c2VydmVyMQwwCgYDVQQMDANiYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQCmLpAVBx01DYjNf4ElBIZvtYm3JXsOAYa5sYhSXHzxMA7t6xBpzynrXqxt
-WDBwVlL+MEEoBi4lP7TBiBD9aZ/6agukeKliv7DuBZUSORIfu8aOsIXEe+U+F35q
-WvCNod8SpQrxvjvvLbQsJCD+zdrzzIVOCgYToAlDb0znu8fXxFQ2gOPbJEu60aX1
-ca6hPA8+rmbt5KfPJ+fIPV/onhaiMuUklTX7PlntMhYgGYANFAP6fVw2OIgeGXjn
-67z+ZD14EQ0reSfzqrKEbvqzrr8MJJ2wJYoYrT/Atu1JsLeudrb8ilmx26jHKa80
-OG3rxObOsg5z/0K2GsWR8AGXE0KzAgMBAAGjHjAcMBoGA1UdEQQTMBGCCWxvY2Fs
-aG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAFWe3TFS1T/GwfM3jE2g7MJQZ
-30+p9HSfiC9kA3KPgyQE6lna17gyAS+YNaAQjC3pT1o3Dbanjs2Y2Ho/6JWAoeoj
-puzrkgCH9IHIKhR/+JX/XwX+yY6txNzwgRvkdVpQkHZ4dp4LBb9sNQ+RA5T3rAlR
-5g9/LwJbBGP0KSG2nxrsDEa3uYtm6HaqyjyNtCe6Hy9ez4qFq0fmKxnu2DnGgRwZ
-O8hxW4rc/c5JRp5q1EuocpEHZTqZ1SigtdA1nBe6cA60gEOqOFfA7DrN4cM5vyk3
-fkxPGQ+uP/6tTSF8DHIL4lE9X8clKMYc8UU47SYCzN6NIKLBQcaQHTCb5V5ZwA==
+AoIBAQDw8WgpAqCTkA2o9SESxrrP9rn49fNarF9z8P6CAxw8nwwaItJKj1ovY/vE
+Mes9pmnduVGq4/kG03DHtFCHBsqfFttcMAFD4NbhhepZQMAO5Jj82j7CqtFYqFKz
+5eRs4BooTKX6kyOEmhZJN6G0LIXFG1UgsGfL9SqQpC7KU/Bi8aUgfEYWTsaBtHVV
+4KU8vtz+0jNeyDF0Llr1OqiF5nwPFvfGNl5ZLy8oewfcPdaISSCLwesYaFrODrQQ
+nQmUenGswNZQSscDBbU6MuLTAjfnlmijlnRRA5CIBN1tNHujVAWyHKOIo7JHfqtf
+80ePNcgulwrYc1jy1ein1wLjDIANAgMBAAGjHjAcMBoGA1UdEQQTMBGCCWxvY2Fs
+aG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAS3MosJziaHA2BT30FzQvCQMj
+BUajLeL4K/8gNIEesrP7qA1w9FXhZgw5zq6tmwaNsZ/M1efYh8KtmnddxzE6DbNL
+lfWZrX51sVKR1YSXhvgN021O+pBFSfuJw1QY38XhOHxqydfJI/qfdrZpherqzQvT
+3P9B3IfIAzp+3b58kqEOh3LCCabcsr59SgQQWoaKZo7QaVH/uP3p+TCpRS3Vq8QO
+V5uVTStgl+mjrsrqA0nmgZuij8cveARo/7W7oHcuSoS9FZ+yXz/ZIpFV4OkJ1MuP
+RAwJQRAO5jEEucpadJzD+bQUKBxxL3uBdE2DqHbbqn6d15vxbowu7zcuNzfCYg==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCmLpAVBx01DYjN
-f4ElBIZvtYm3JXsOAYa5sYhSXHzxMA7t6xBpzynrXqxtWDBwVlL+MEEoBi4lP7TB
-iBD9aZ/6agukeKliv7DuBZUSORIfu8aOsIXEe+U+F35qWvCNod8SpQrxvjvvLbQs
-JCD+zdrzzIVOCgYToAlDb0znu8fXxFQ2gOPbJEu60aX1ca6hPA8+rmbt5KfPJ+fI
-PV/onhaiMuUklTX7PlntMhYgGYANFAP6fVw2OIgeGXjn67z+ZD14EQ0reSfzqrKE
-bvqzrr8MJJ2wJYoYrT/Atu1JsLeudrb8ilmx26jHKa80OG3rxObOsg5z/0K2GsWR
-8AGXE0KzAgMBAAECggEAXNBmwofNpULg5D1RaNZlK2EOAI9bchAiKfZgt/dWBPMd
-c341FZORyxZ+YTe/Hg7onXVf/rWs8jrpfqm7K33hzt+JjxuhJzj+3YGap6neWIDs
-vecTXxD/kTVX8pjF/6SnzWcGfMwN92DkXz7yer2Ii1/wGAz7JdzdL5+rKUY0sGnd
-EVs+f3y46hJ36ejD/DM0Lj9zVMzbOlA/Kiuq+uHGrH3DZBiL5qvzecn+3HM9o8kh
-RzmtdllpsXq+P+MxFoea5OuIbq2vuNh4Cpg5PxEMbXhtPT0XwDf2NtN85CHL2glv
-zI0CqaJ/kNLLLorNrbtekXuLllZPJezPxefyXcby0QKBgQDZi0sOKIAmVXc0whdj
-MmsWpgtjs7S1NCgk09DiObSum/OWDMOYarFGdR/tDl30mzvpCbkk7QhNZcOZlNGX
-szfi6jBm7ejbDaEexJ2U7gU3GaeZ13AqIDukAV2ArMwR40S25JyZ+jZvgsiUXhjv
-nRFXFXaPMejYKPVX9CwLeXBvawKBgQDDju54KZYCKzPrZ0j+CbXTKVEC52Ch6G7S
-g3AAOMHoVXGhn26jD3Uietnq3KI7oSHHeNkqQYYdbFCkjMkF25Rp9xlFILoLZ0VA
-G6krXQ73z+BRPK9TPwzCaVxSXf+mxF4AIrGZbYsSZj+htm74opRk6+q3YrGzI9o1
-0ga84tez2QKBgQCCwT1wmhlEcTRAKrTh86j4KP9JgvcHvvyt/f5cKzEVjjjfpHZg
-AyjgX3+7/VmtryxYSnbU4f+Ofa8Ofatokdjyc655/19pYozIMIdCv7m0v5/EUQBi
-4ZLXZdasg6/4xHBFua0Cw6i6Z5Jl0xUL2I1WmVj0gpwgaKXmoqVilDBnVwKBgQCv
-NfqXErtiSg8ElM+jPFP6U4RP07qSlcvlNPo+WJvza8qZgl0AH7NVJzjj4rZAMsgv
-DimUYIynBArkw3bAltHMdyXe98l4uhgjriTNw1zLzyYt4u866Lyn2vpqmemaI0oN
-WQhCbREzdQUCAJBAmHnYSj9L+1M3K6IwonKC/cNBUQKBgA+Jgzck+Q+mD/+ZvC1R
-UWQmXG3IIMrpLRb+7eAanEDZX97sprY1E+Z05TbUuseR6IheED46JoviPtRYFRHV
-ZBYcuhOd/BdDF3u38U08EAQkqaZnBzHM+780IphFRr3o/wH2JSwyeilSg8q1/XxO
-VnZNKtdpmc5+EKlg3UhTb+T8
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDw8WgpAqCTkA2o
+9SESxrrP9rn49fNarF9z8P6CAxw8nwwaItJKj1ovY/vEMes9pmnduVGq4/kG03DH
+tFCHBsqfFttcMAFD4NbhhepZQMAO5Jj82j7CqtFYqFKz5eRs4BooTKX6kyOEmhZJ
+N6G0LIXFG1UgsGfL9SqQpC7KU/Bi8aUgfEYWTsaBtHVV4KU8vtz+0jNeyDF0Llr1
+OqiF5nwPFvfGNl5ZLy8oewfcPdaISSCLwesYaFrODrQQnQmUenGswNZQSscDBbU6
+MuLTAjfnlmijlnRRA5CIBN1tNHujVAWyHKOIo7JHfqtf80ePNcgulwrYc1jy1ein
+1wLjDIANAgMBAAECggEAUo0DePmTdrtmUrsZx8sa+mG4OhpadHiWg6zQJk9Wf1am
+2NWRY0ZWSeJhwkiwJoE7yGHUH07YZYRQIbwf0wN6rKMyKRWxeUYxmTc+obHTm8aq
+vAcydZP9Afk5zLU5XCw0Chaoz39WLfp1JETRPF/8vRmuQvLtvS2UldYKyfuUYvFp
+xh7dlAjCCnoNPTHmg+0EM8SDFinPdsPi0BJVHA6s94GYC1X/HpXwwk0bTXnV6u92
+KEaSdVeOV3K6DYd5ONzI2IWx6UPPe3GJEgvtv064TWcwBA151+sf8F0E1c3HEoN0
+gTp7k/GbuSBFV0syTskBTu9sluwroi0JNBOtqQKtwQKBgQD7EtUV3PITjO24s6Kb
+32Ngj6DDbTeUjo0dYmx6CoFAQxbMclEnwd+roZBya7ZXcRsa4bA1JCtRFi+7e+uq
+WVnvRD/KqIfPNGwOKYmsoiuryZh1u70bAWHwA4XS1oB3RNv9qxV7HZ1n2Ks2c9nc
+3jpFRsnLIiX8DGJtJpfdh0Q6KQKBgQD1q7AJ/1OlTm7zpCUMd76sj5BmCi5W+p2B
+epORB0/hCsqcsIRvhX1x4BjgbeTg1a0b5kIz3l74oMKcXa02o6buecJYlDNUHmVu
+BNC1F/SguppmtCmLmMUKWSlEyxKi1z73/V8S6ScVhgQQzYnKWL7ynS9H5Oazf7g8
+8iFS3SKbRQKBgBRYt1n5OMjqz8z7Cb91//iwSwfrTujEEJ2D/2R4e3b4uN1Cml8N
+cBHZmvoazoERkm/AQGZeEwCUOrQH29ZHPWmNb1n+BFt/VQ5kwVpzHfTi9m78Etwg
+ZY5syqiGZ4zC0M0i0y/R+5KR1XYpN0uV2sUIcov6rXdajZNo4D+8mYPZAoGAbkK+
+Yd5MxnaraYKUdkIQPBa7MnEDV1pn8sL3FDJ58YEMasL2za7WHJP91ky8WwTMCyeP
+MwRtD53nepK4uTs+Iu5XAbaSSwDReU2D9qNn0fOudMYLl3HbjcdytEOIe7mrs1Xu
+o+gs+IYLsRbu7vhhyzYtEFF2Eb4TDJeHyU8ixxkCgYAROhTkyV74n2K98JZ8GDNx
+BesAB2V8JblnB8d55sgl5yPDhJBYXegEACJ4WS+BohEnOoepBVfBgkqxPAS9xM6l
+vkfsk2/Gq2V8Po0MdU1SHktXLfD+GR1MkD9oTc6rYOkwDH4QBztG+ADyfwPH/siq
+q0BkYC1GSOAd/1+2sBi9UA==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/server_title_bar.pem.digest.sha1 b/jstests/libs/server_title_bar.pem.digest.sha1
index 18a85e869e48b..85abc3bec81c2 100644
--- a/jstests/libs/server_title_bar.pem.digest.sha1
+++ b/jstests/libs/server_title_bar.pem.digest.sha1
@@ -1 +1 @@
-31E9FDDBBAC424AA6377FF410698241361CCDC3F
\ No newline at end of file
+3051C6BF8FE8AE6D1FA2FE178324F6F886ECB1D0
\ No newline at end of file
diff --git a/jstests/libs/server_title_bar.pem.digest.sha256 b/jstests/libs/server_title_bar.pem.digest.sha256
index 8c5d7afe58cf2..9721586063045 100644
--- a/jstests/libs/server_title_bar.pem.digest.sha256
+++ b/jstests/libs/server_title_bar.pem.digest.sha256
@@ -1 +1 @@
-2F1C21FFC8FD92864E6E26AC4052087AE9D34133EA1507D22155170F72903237
\ No newline at end of file
+D6BB414C9AF5821D6D5CC046470232C56F03880AAEFCC7E249A20CBBC8D0FF8F
\ No newline at end of file
diff --git a/jstests/libs/server_title_bar_no_o_ou_dc.pem b/jstests/libs/server_title_bar_no_o_ou_dc.pem
new file mode 100644
index 0000000000000..41ce7b42c9c34
--- /dev/null
+++ b/jstests/libs/server_title_bar_no_o_ou_dc.pem
@@ -0,0 +1,53 @@
+# Autogenerated file, do not edit.
+# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml server_title_bar_no_o_ou_dc.pem
+#
+# Server certificate including the title attribute set to bar without O, OU, or DC.
+-----BEGIN CERTIFICATE-----
+MIIDZzCCAk+gAwIBAgIELGze1TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
+BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQyWhcNMjUwOTEwMTQyODQyWjBXMQ8wDQYD
+VQQDDAZzZXJ2ZXIxDDAKBgNVBAwMA2JhcjELMAkGA1UEBhMCVVMxETAPBgNVBAgM
+CE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEAxNWomUaGmbq8tdEtG9QbpnXlo+RO12LclNrQIAbL
++jqg176WXpQN+OZ4ZzpEY5oYN5RuvjSVRExYOOUM8BTYDNAsY7Txb/X5IC9wWCZj
+FcNxWHFZJcaHcU3Nr5vhqd7emkOdQBnlrfeb4CwKeJ4wUt+SjWJ6VSFjtZ3FVjOO
+eVAl5EC5uaWMnraDGaxmO9DkTt4tDBAXDdmkFMYPnXK7xi2Z2WtVe+sKGxnlCUWk
+Agtn2SN8hqgWDgm3bJtvQjya6ty6c511A6FgJc9CfqbBce7e5OTLOW9MGz46ysRa
+h/EZZEG4u4ofQp2XRhUibuUCJuFTE/5+nioUqJM/EnWyMQIDAQABox4wHDAaBgNV
+HREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBACwAwY9q
+GFkZW43DhMigmv6nSDBM/fLvvHyvPIO6eZCkoFv8tpHnWEe3th+ExvDeg0eLA+HM
+WF1jZKWx2cwJhGi+/ATyO6J10+D0wTWcKU3CN6z0WLpY3jM8ELalBjQLvUl09iAv
+cLMLZVM/xebCN3QKbO5/aVxadeaDtj4FVLDstak7fjNMP2IVjXyobEHqnk7oQLYY
+RSpEGyUCKQWOFFXOpb1BR3YY8Rc5xV/LzBe2vwoBnL/uxsqaE+ObzrzkyrjIWSzt
+CJ9evJ6x6Kw7zlgDDwz9XcEcEPDjOYjTzPnLekUFAGE8LJYXYjLh6HKpOv1WIyBN
+d+i2kGvKYxayuME=
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDE1aiZRoaZury1
+0S0b1BumdeWj5E7XYtyU2tAgBsv6OqDXvpZelA345nhnOkRjmhg3lG6+NJVETFg4
+5QzwFNgM0CxjtPFv9fkgL3BYJmMVw3FYcVklxodxTc2vm+Gp3t6aQ51AGeWt95vg
+LAp4njBS35KNYnpVIWO1ncVWM455UCXkQLm5pYyetoMZrGY70ORO3i0MEBcN2aQU
+xg+dcrvGLZnZa1V76wobGeUJRaQCC2fZI3yGqBYOCbdsm29CPJrq3LpznXUDoWAl
+z0J+psFx7t7k5Ms5b0wbPjrKxFqH8RlkQbi7ih9CnZdGFSJu5QIm4VMT/n6eKhSo
+kz8SdbIxAgMBAAECggEBAJbjbsZo8P/hzY3XVywREk0t7acQenvUNmIJxyf17eHZ
+lbktSdogxwE5s6z2Vry/wLbCm45FgvODTtH2jE9yuxg/cPfSGo8IUTyiAQ4iBy0E
+0NvKsFsr8GIkEXwAFCKDTcOV93LPJ2mP+bcEK9bA9SxiZNbrWfnuiaAM3Nyy0xIc
+zWI3nDwB1U5cfaedJz1gPiBqZvXH6la0jSFdXVeNa6HapV20o9ABgFuwN8JrMt9w
+LyD+SxM5Visnt8LfXq71t+op2suriUz5/lnuz9HEs19f6JCIpjKzUMqxDsuaEpyt
+aofOqy+fuN0fGtnJyHVliiFUvbr4+IpljpyZ/0mt7p0CgYEA5UoEIRQxIeZZATpl
+f4GgYWU7OF/34JRHSDjRgJf9n0FqHMlxbmwnRzWW9P2cUif2+1oESj49kBjm4Yq/
+7qryU6Wk8qKb9dpazDxcGfSi6wfnm814h0NrQzfrtxUKqUeIzAX4MGyty/Z4qATt
+EoNQeBLk09bBg/BguS52wU4RcW8CgYEA28PEJ0KQMlyDIipPr9mBPoKudbVRfgej
+iC/GryEfd84/GPY5B6UZ4YPpZLuHDNAXj1G85/2pMpsKZGD/hpJTL6xNzdoisziG
+hCgUXYkzvv6E4VMBUxfg+BZtYu0dnJxENW7U7xnYlnleRqfQr5n6hqY8rSRonZ2B
+ZtbnqlHsBl8CgYAjkSWu0+kDLy4jSKGx87OjKAbVqd1TkuyzwUyOPAV1jLofGQhl
+bpVxMFjoS+bRv61KNB5yPdFi5tn5poI7TX0liI5brOmwa1ymTb6jsBUNjD5bJrZJ
+lSWwWZ5Q2gy7UYNg0pQ7PsB+4Q66UXlM4925ooSob6m5A7KdRx3YwnlmNQKBgQCf
+J2mZHSWcs07soUPQkG5/PNjnugZohVOzPxZeolhNxXoAs+CdXr4dihCuuPiXGOTX
+EFUElbnBQnMGOxES/klsTxaRrBZRQPB6KSSGkOR+v4iwPhLJgJOWV3ekmyBE2Q60
+u61N86PdZcwW01XYtmSuuxlaMvHoo7ahKaXgvS93dQKBgHKYStfXREIo54ONTgmL
+EHZbFzPCKEzXot2lLaee4L71SwyvawxvRzsC8QgMuG33hbqzechierdfODlrPXMQ
+nWbjByfapIYg7EJIk3xSo3xn9NEJvFgYVKO1BEL5m0VRCf7n3HiTZ3+PT7OZtpJZ
+GmQ1aBdFgcyau+K6GqCmX4ke
+-----END PRIVATE KEY-----
diff --git a/jstests/libs/server_title_bar_no_o_ou_dc.pem.digest.sha1 b/jstests/libs/server_title_bar_no_o_ou_dc.pem.digest.sha1
new file mode 100644
index 0000000000000..b9d351aaee16d
--- /dev/null
+++ b/jstests/libs/server_title_bar_no_o_ou_dc.pem.digest.sha1
@@ -0,0 +1 @@
+AA09D3F3FACEDFFA86CB4216B4237F0A4B15D763
\ No newline at end of file
diff --git a/jstests/libs/server_title_bar_no_o_ou_dc.pem.digest.sha256 b/jstests/libs/server_title_bar_no_o_ou_dc.pem.digest.sha256
new file mode 100644
index 0000000000000..93c14dab77628
--- /dev/null
+++ b/jstests/libs/server_title_bar_no_o_ou_dc.pem.digest.sha256
@@ -0,0 +1 @@
+3646F2B1B24E8AEF7C0C305DD57A384A3B5A8E43DE705557FAE0682092E2F381
\ No newline at end of file
diff --git a/jstests/libs/server_title_foo.pem b/jstests/libs/server_title_foo.pem
index af938c1c7a8be..9a953d666c23c 100644
--- a/jstests/libs/server_title_foo.pem
+++ b/jstests/libs/server_title_foo.pem
@@ -3,51 +3,51 @@
#
# Server certificate including the title attribute set to foo.
-----BEGIN CERTIFICATE-----
-MIIDijCCAnKgAwIBAgIELeq5MTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDijCCAnKgAwIBAgIEMDdisTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjMwMzIyMDIzODEyWhcNMjUwNjIzMDIzODEyWjB6MQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQxWhcNMjUwOTEwMTQyODQxWjB6MQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UEAwwG
c2VydmVyMQwwCgYDVQQMDANmb28wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
-AoIBAQC4Z+4f6WnJJzMqxxkShigpyObbCx0EElyzuSfECotm523C2jpVgplVh5Pn
-eTL6eIUwwNN2d4XHR0VAvvU+tBS+MB42NrZt6MSh+tWCm/HN21/4zg48hdedGFwH
-wDLTN94kRiaChkZ5aNzVqtLa+PtKX6UEYLvIHt+I7Y95hSvc1t1MSaobaEvLRjbU
-fzihRGYYOXeLB0Yw3zurWi7wJ1Z9D8bIYikzgMkn1sPBPTmYHiqQIlxeDmQ5xmNJ
-uRSjK6t16r8SVeNCTS85/pmWuy7hN7YnZXsdGXhP88sZxZOqdjEpsJsj5zGN0Ki0
-KC9NYasht7tZ8dMGmuPjsvo0dwyzAgMBAAGjHjAcMBoGA1UdEQQTMBGCCWxvY2Fs
-aG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEALbGw66c/ZoiuKT2u2i4dTjpV
-L9xceahK9DWGV3syddTPkloER7vpyZzES6TrkC0Kw/3OMnSDaIy1hR3Gp9zCWhDX
-UQLrqh+rnYMEPucG6oWxjPUovfmkWU0zdsTuiXmdJ0eWW/OLe1NPmt6WHlCG2cUl
-BRJR23v2KfRfCL9YaOyLynsY49TXjEELyKD67csA3M6sYKbJ/pseM1TwDqB0Odyz
-CSKDGQx98UsWGS2skuuhPgic8pgJITdp/WfUuI6JyvjpWRuxrHZykJSo38WhS6RG
-rTyj35fDoapyFiJscx0dVrFkTrvptTlLRRxeIDzbZ40wR+EadnJ2/5DB0Nbu0A==
+AoIBAQCue7oeuWmdipFRnsOjXFYpKXTFV+arFNHKynfGdqLyU94ZiG7K7J4SgN+L
+S3CKvEp5lC8/aztLj+EtPXVGj5lC+yVmPPQ0PYxr0G9ufIdZeqo3sLUVOMBovuOe
+fqdc4BOBoKs7zBh7TLr7DBPp17uROTkla8VB28p8jFeKcWI+ImjCLGy71gqEXzV1
++3lMvmSlm8rQq1hGJhOeCxPmHo5o0GlnZUhh4b+ts0clTQU2HZ1EEktFgofz0lT/
+q+2hOdIpYrecUQ97iLFzF/hMrIdSuzia4mDuFAvFXj723N3J3xwoXaGVY46Qe5EE
+oMR+Q+PzjyN4lq3eqsJUtKAgsylHAgMBAAGjHjAcMBoGA1UdEQQTMBGCCWxvY2Fs
+aG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAUox64GeSoepqlF19g4qfkVu3
++ld6tsInEMzPq+gqz4PsjL8e4C7z/JzQQnrGfk2sD3rUUWOiMK3V+Nx+ggCrHPth
+ctRcZXCmD9TxW+zPkji3TLLq4f0cVV6OkDS0eDXIWSi0Fwb+PyhfMcsR7WMTs5G4
+uQeju/3EYx/IVU5WWrMJLcOS6dUV9A9HnVw7y3yYAbO354t9i8KLBFtgCb6O6r9A
+lqGAAEujfpzIRNnvq9TbxCVVlYia01BkJNwj1EPP6bYEvQUnYaokpTqNWYNOkt3D
+XQj6MEQtXhxAZkaHcEFK/LFniY56KB9RDoXfsMFZRXWc/n+SGki8eMLO5YWEMQ==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC4Z+4f6WnJJzMq
-xxkShigpyObbCx0EElyzuSfECotm523C2jpVgplVh5PneTL6eIUwwNN2d4XHR0VA
-vvU+tBS+MB42NrZt6MSh+tWCm/HN21/4zg48hdedGFwHwDLTN94kRiaChkZ5aNzV
-qtLa+PtKX6UEYLvIHt+I7Y95hSvc1t1MSaobaEvLRjbUfzihRGYYOXeLB0Yw3zur
-Wi7wJ1Z9D8bIYikzgMkn1sPBPTmYHiqQIlxeDmQ5xmNJuRSjK6t16r8SVeNCTS85
-/pmWuy7hN7YnZXsdGXhP88sZxZOqdjEpsJsj5zGN0Ki0KC9NYasht7tZ8dMGmuPj
-svo0dwyzAgMBAAECggEAGxL3PSwx4dylgIRWxAd6Yhgi/Mn26qAfiCuJERlTOjqE
-PPV5VxCjnpEXQAblWyzSsUO+SEhoFcf6/PSMYTZjTUEXTnJd+mkQZY/ERTbMG6M3
-xfnK0Uv9Sg1HhcPMMoKjVMQP5137ftvMgHpiFtAzZMoCGlBxgYI2442tYPQSaovJ
-DqjPwz+Mn4PEskR0/xamhW+/dStbl2xaG9URPD5Mf2ZhWl8milMJC02Y/Ytm7igq
-AENT06qMcaBtTQZrQubCmWHN+m/cHdGHLlsg5UN4SwsY5OaNrWqMFqrv8ouZEyC0
-4n5+X0kcU9FtXN6LSlLrlANdnAKKY9Sz5NNynlASYQKBgQD1jC33t4GYzqas+hpa
-WGZfLu8aFdcyN5d2sXyMcPKuzULkytyn2GvfGkzcr4ngrewwM4EpotBxFRWN4CUl
-cABzbehwFi0FM9PE/Ww25TALkBbtGmWxSmNuK9uIMwyCClas4T4fV8BS3pCsBC/p
-Jp1QibOvRxtwTr8NRtWEPI9puwKBgQDAQXf0Q6xiBiM7Jwp7UtPEgEwY5aetTuYj
-lLuasXMbAPpAhuZhBGlgpu4Xg6s/HnkQbuYoYcidoMGtTjwTIrnWtDC507kpdzQp
-DkUJPBijiu6OMvQepJIilf45fyHnyDJ1q881PrrOzYikdHth9Ti61BD80YsHFAuF
-51NJHhedaQKBgFTJsN3G6eNACGHWgt8Lg13+sOWLASH//DcqFl3QapxdmGm0evki
-TC1fwYa6vptssw/52PHtnJhPtX+mFG2W/TDelNKPdcBEIy30bDeQcESt3pzE7rSH
-gUn7rvSa3AjTVRahOHhOLsTuwXoEgB68DLpQslEl9p2TM3l8KiJdXxAdAoGAebT4
-SxnMNwHLq9a7O/bjOLI/ekNoMr6P0laFrRhI1f94bQD5NtGkJBuI/jnMXtjbqxuJ
-eGbuqVrrQNsWDMce/lxzvC/cN/POgW8XJRF2R5HcEwkOoZdtK5foqF1jCWgjCXsK
-YZqkh+Z1aiaTNSAYGa3GU0YTzRdTdCFNCCyUpoECgYAXQe6DhbcgRMv23eqml2Bk
-dtcK8q8SJJ0t/onWItcI6CDo91nZSTkQ0A5aGjhUgTrBSnJFmbYjeyuOfqcVxDc6
-+I2Yn7ybBjpmZFQciOd0T2a79aZWKicjCM1PjJTQN4ghIt6/f/HtmcHoH4OBXfIp
-zHWq9QZ48v0gu+1NZTx8xg==
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCue7oeuWmdipFR
+nsOjXFYpKXTFV+arFNHKynfGdqLyU94ZiG7K7J4SgN+LS3CKvEp5lC8/aztLj+Et
+PXVGj5lC+yVmPPQ0PYxr0G9ufIdZeqo3sLUVOMBovuOefqdc4BOBoKs7zBh7TLr7
+DBPp17uROTkla8VB28p8jFeKcWI+ImjCLGy71gqEXzV1+3lMvmSlm8rQq1hGJhOe
+CxPmHo5o0GlnZUhh4b+ts0clTQU2HZ1EEktFgofz0lT/q+2hOdIpYrecUQ97iLFz
+F/hMrIdSuzia4mDuFAvFXj723N3J3xwoXaGVY46Qe5EEoMR+Q+PzjyN4lq3eqsJU
+tKAgsylHAgMBAAECggEAePY5r7cmqxs3Z3hDdOs8qEifNXic55oFXytCIgp5KLpP
+zS+yb2fepkFFuJHSMbwbfXHn13WFo/rZRr3GoeZHgBIQeXJkchsBUHUt61DVnIjX
+nMgaod24NmWv818ms9J8c29phokVp9UoA4b5/zGaTTF4lqLCG8g99l5tCgNU8nFP
+Cfx/xWuRbxhcLYnrfrctwvG5yS1PcjIkaJb1fAlbKQVMBnJ162tu3RE8zGmR0HJH
+08Trs9eS61ZrGOLJObqPLkwuy8VSsyRDPaSJj8EQ+A2uCCbjbITySfJtOvtW7rXz
+xgxHBbeFtyFURa7lkcfn52MzpXjHCuqeHYH+ZJWzgQKBgQDdPKyW9B1lma8aGyhw
+wce/Xr59cSYA05osBsYiMu93+zxPEspl+fAZg8yjbeMl4I62CANUtwIJCeHw9HeY
+hb4Om9yVVFFp/FlWkPlqdXvAUKGMhO0N+vq6QDN07Q1BVUUUHJZ+gjhrnNvuY2SU
+Fj7VB9L9k/YCSj0QgYWQ4UaIIQKBgQDJ5l7/kQCUzznulU91/BQt4UyL7x3jRvLd
+8L/d82yfEy/Bx4ZFar4I9+B54FRepB1SDdb1bOmwPqnvGNc3Hg3UKJzhSuqpKBpv
++sQfwy8yFLDNMgB7mb6B7VtuZsFYT5yNj+4lA2Cfcuy1PZwX4YatW2Toe+bLft9k
+2zfT1XXkZwKBgDwrZEUp31Til1ziRf1kto61ldlIDK3s3uFadkiW0cov6hcXZTSW
+5VYLInzQRhbnG+kmBMHlhAgxR+HgmyZAOZB/k16JsBrDJwkEJNFvYljLlSRCgrZq
+rAY7r8L9Nb2vEzqFC+kcQXwDDU2oepJL+oq1tgyBUUcOKc1zbIAaxLQBAoGBAKTZ
+9AFKXSYkGdJDmbDlVXist/qeEFJN0OoEtDS+mJc+bEUV6/1sDaR2+JOq5lisOcCQ
+yk50Uk70q34tUzSO1o2/Z3DQ4c+ijguWvmKM1VFX8ZBp3lkNjK67pmb7gazgvBwe
+RD12h4NJrBrEJlqda4DK2ha1bBoGCtNn5yqQ5YTLAoGAK/tGX8AXlStM8lnezsTM
+wRj3kK05ptTOnDWHR4nKJLYXczSo/PJug9/Qjbox+XWzfTlzSThqVkmuN50iNavf
+8N2EzLTD8fhXNBTzzCg5L4oLwdhlGX5mrwXRwTYG7cCFawWeMU3h6HrjRpqD7VE6
+kbmNTsIi1wxt0ymamDF5yuc=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/server_title_foo.pem.digest.sha1 b/jstests/libs/server_title_foo.pem.digest.sha1
index 1c08ffd82a130..77dc1d452d666 100644
--- a/jstests/libs/server_title_foo.pem.digest.sha1
+++ b/jstests/libs/server_title_foo.pem.digest.sha1
@@ -1 +1 @@
-AE9780F50789327BB1F6AD5343490CC2FDF559FD
\ No newline at end of file
+E5836708C648A0532CC95AC1C55A01E010243EAF
\ No newline at end of file
diff --git a/jstests/libs/server_title_foo.pem.digest.sha256 b/jstests/libs/server_title_foo.pem.digest.sha256
index 1b7bfd18f16f8..267623019f628 100644
--- a/jstests/libs/server_title_foo.pem.digest.sha256
+++ b/jstests/libs/server_title_foo.pem.digest.sha256
@@ -1 +1 @@
-C2D4EE231C2704118F01DCD559987464EFDE8939873595386A8772B6274C70A1
\ No newline at end of file
+CAE1806485306B46661711B44D7AEFD232B474C1AB441B33F1AFB60B2912850E
\ No newline at end of file
diff --git a/jstests/libs/server_title_foo_no_o_ou_dc.pem b/jstests/libs/server_title_foo_no_o_ou_dc.pem
index 87e5d8964edac..b3bcd67c4efb6 100644
--- a/jstests/libs/server_title_foo_no_o_ou_dc.pem
+++ b/jstests/libs/server_title_foo_no_o_ou_dc.pem
@@ -3,51 +3,51 @@
#
# Server certificate including the title attribute set to foo without O, OU, or DC.
-----BEGIN CERTIFICATE-----
-MIIDZzCCAk+gAwIBAgIEPUtD4TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIIDZzCCAk+gAwIBAgIEQFQqZDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjMwMzIyMDIzODQxWhcNMjUwNjIzMDIzODQxWjBXMQ8wDQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQyWhcNMjUwOTEwMTQyODQyWjBXMQ8wDQYD
VQQDDAZzZXJ2ZXIxDDAKBgNVBAwMA2ZvbzELMAkGA1UEBhMCVVMxETAPBgNVBAgM
CE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MIIBIjANBgkqhkiG9w0B
-AQEFAAOCAQ8AMIIBCgKCAQEA/z49ZcRbY9ZWekDxYgPqwlNffxfXWVtKibaO/FtY
-vI2Ey6ngyqTGCvZrJ1MWvKxaKoILrPIhjxGcREW/FQNb2TG/6kpnhbUeoYe0zy1w
-/hxZv9mkSe3xmkxw0V4RmzmKfaxeGcsq5S8eNJ9SVX1CRLgyindO+bwkikzMdL7f
-5VlVx2ry3t1Jnn1ncRAGBV+PgtoVqQgK5IYFONVcOsoaxikSzr5q6WW1NwrUNhOs
-F/76LoTFvu14o/QmzxiXsSMLmdo9f/Ejimf1THOMEahmD2KFUnx0F3EzcY6dholF
-mE1pEmytTN9LlnMK/xt2CsuOtjn7NHznX17GBSuF7LzX3QIDAQABox4wHDAaBgNV
-HREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAHPVIMCg
-kmfyKl7Ci5uJS1OCGAdjpaqoSlC2jz7xWOe8P2Hz9gluQNu38EyG9EHM4G1jktCV
-T3KyfaEcQw/4bgz1QlMEio1xPSEsqwMswAzb4cDPbxI3MEyLkx4mIcYZXG614rlm
-ZX6A4UzZ7dIXRPoETnEy6CUDiOBVmlrGfVqv6lqtx63yUSbDKwoF8HVpJxpSjgQt
-qY6AWKHqohmUImwludPlmjxJLh49yJyOMvXHRPr+BMPM/UYKVJ9mx4YmLJxMZMz4
-GSzPKqiJRNczvT1T1qdInUfYa5DtTxYS7NK2ZfvDqtjllTszoUp18shg3P5+tGJa
-2zKmcCXWNlcqkqY=
+AQEFAAOCAQ8AMIIBCgKCAQEA3EfHW/98lze4NL9FsW/3S9ZDGKo/BV+PGJ8sFQ3b
+dBmynH76ZT2YUY7NB1T/kbyBFoyq975VDS9q5PVLgbA7JefzWBKAU5ytxYJsq5B1
+p8UadrljjbjJ5m8DPQeVdz+i/uv22GJ//PGUTj2oih3NMW7bbRI+OQEhzTmuG18o
+oZVJImHJOt49mNCvt/ShZj7EQMWqTaOijHevb7YkraH5EAX/sA3Af0QZPrrLCBu1
+wjTDiN0cvdCdwv6SnduEWqDz3pf9pjAckGIV186PtlvOAmYu+7v/WNNAWqZC4EtR
+wopr6PCzSE17uv8blJX494rHmzDDGm/UJH+yyEPyf3HKeQIDAQABox4wHDAaBgNV
+HREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAGusSCgq
+nogJ2c9qYqSk3rxSTxkPhMXMxSZVJxu6KT/tS4bmxQJ0F/RnHuGNf76DwFjs46NL
+dBes3AHJ+n/LJtaAc6/gsPHJYyM+mUhfZB+Uwzf+YR/9Lg7WXju0wXAWK6qCOz8L
+MoalrKw7hkLCaIryrdC+YLx0WE3/97ZW/Mi6QUNyv0iCcgc9rPvRlCzxh2riARWz
+LnRO1UbN95vEvZuTrAflV0n5Dox2MqzUfY0mZhCAHIHnP8n1+LJzI4NE2xbZMz63
+R+2QHzdDDOj8+7Vj5TmBa+LJ6VXPemdvXpD8wcQSl4gwXBVFNZhCmxr7x7G4GwIj
+mcwn0Qmq1NtYXik=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD/Pj1lxFtj1lZ6
-QPFiA+rCU19/F9dZW0qJto78W1i8jYTLqeDKpMYK9msnUxa8rFoqggus8iGPEZxE
-Rb8VA1vZMb/qSmeFtR6hh7TPLXD+HFm/2aRJ7fGaTHDRXhGbOYp9rF4ZyyrlLx40
-n1JVfUJEuDKKd075vCSKTMx0vt/lWVXHavLe3UmefWdxEAYFX4+C2hWpCArkhgU4
-1Vw6yhrGKRLOvmrpZbU3CtQ2E6wX/vouhMW+7Xij9CbPGJexIwuZ2j1/8SOKZ/VM
-c4wRqGYPYoVSfHQXcTNxjp2GiUWYTWkSbK1M30uWcwr/G3YKy462Ofs0fOdfXsYF
-K4XsvNfdAgMBAAECggEAKCBhyKDw+SYWHEwfZphVDM3Moo9d9JdMhY/ktLmrnqDk
-8pu3UkRLOif5OopudaTm2+3r5fl+2x4aogURAD2x79hJYozl73hE44IRI8zyCZDt
-byLJGDJHHEnOJqwSOoP2SMGTXZy6FqOsrPsrF3OEuob2sxwEl3BDklZ2ghgL3OM5
-IIVycNo7tEGjrH1p7Z0+5Uuwf3lNZxlItc17bOTRwAi9eVlIMyoLz/ocaJFt5C3Y
-KgCzkQcvWjjJEVwlMe10u9yyjs51yKAqkBfREOVYrYcAQigH1QSKpeLqbVDULMcl
-5CM0e9y1ZDZAeOsRFqCdFMYHVBB/PdlMxP2eM+12JQKBgQD/2SZUC75jF29ekgmx
-FEePQ+LQAlnP2Hplo1TUp6mOIB4n75B/GXbhvh/Aw/bzkeOPSKpZMbOUDcHGDwNu
-ul63BeZWC7hBV53/rJdLEbknafZo3Aw427foNhRscA6iyb8z+QzsgElRuQ2Po/qF
-0vXYxBI48V9ANkEUxCnBjhs8xwKBgQD/ZP+L29//osaTdxyiVwMqmMxw1NOg72eJ
-pE5h5anJ+Zdj+XlE1BOGnpz/J1OMCEmspa0py0zqlvBTuhB5l1AamgeSlqjt8u7a
-T56ariCmwkDCHVRUDevXXAmzdHgp9c5SPNp1Ka7qj5vhTK3YpVGUtzcWRJxqWeCg
-rHYL/Yl6OwKBgHF1U/j7iD+bWekfbCraKm3PFhtWn4t7nbPK/cicXaXIencNVw/2
-M/EiBiTPAom7TaXx/JE3aEKk4yS47bXB8lTJyf6ojdp0R33lhOZmgqyG4h5YTxc7
-4M+ag+4et27bdu5OaLvMnDcgkHH9rxB/oESzlr0n1Sy9opjZ8QaDxXJrAoGAAfDQ
-iE2JbDXecGxtSUaD/aTfmNPlL8nh7YfUGKZYHfLJlbbllwJNi65U3xN7bQr7FFbF
-9BVZZkbzWI+HZIUj1K/q8tA2RGieLAaC3AYKtXmwaEk0xNa+PgqzACwYZak6giF4
-P3+rlpi0xIeCoqzO6+RghMjMr3ozXMUyuHCaxNUCgYEAkas1e5PagZ1u5AjpXtN3
-SI5Wc7IwwtzJf3PCsT3ijYifo1NGG98xM5jJhr+6Sw9QYuocJ1+dY1iHKVdxsBAK
-WN+jJqncuF1EMEDLJpCk//ecLygG4aXnVuT+HGe38+X1SWzpTshP0wmZQeixZOtv
-gRcYsGOG1GGQc7R4PrXooY0=
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDcR8db/3yXN7g0
+v0Wxb/dL1kMYqj8FX48YnywVDdt0GbKcfvplPZhRjs0HVP+RvIEWjKr3vlUNL2rk
+9UuBsDsl5/NYEoBTnK3FgmyrkHWnxRp2uWONuMnmbwM9B5V3P6L+6/bYYn/88ZRO
+PaiKHc0xbtttEj45ASHNOa4bXyihlUkiYck63j2Y0K+39KFmPsRAxapNo6KMd69v
+tiStofkQBf+wDcB/RBk+ussIG7XCNMOI3Ry90J3C/pKd24RaoPPel/2mMByQYhXX
+zo+2W84CZi77u/9Y00BapkLgS1HCimvo8LNITXu6/xuUlfj3isebMMMab9Qkf7LI
+Q/J/ccp5AgMBAAECggEAPLnhAMCvBTUMKCZuvRrDzvlTZ/JPC1krprsCuQ8n55rJ
+/ZCX0N8pJZq7dQD+r5uK7r96xm+HHOpsipAA79C4jZujHy2CiQpRcXiiIq3hcY3J
+3N8AzeoaZtdFykZT8xrtAV/lngORixbCLsuasfDDR5RrE2eLFcX1HpF/KPaTye9N
+1cswVQ7RAoUY90+n1PSoDOTbmG6Sw0EEM/gmvHIvN/KQnXhrbHLcgzSPvPauVWhB
+Mddd1qifPvEItOrtEz9E7Oksu42v27Nq1baVC2HYYDYe3jp9Ef4tQN0chlKFbkVU
+XDDFdHpbjSkC1w9WnmVLCJqj4NrTNbkEURmYBf44AQKBgQDxiq+slTBep8x/dIfi
+/FVn7IA9ENYudDcjFWMr3kkKcldoJw92kh33RHIto4gqRLiz3E8bByrXd9S2dFco
+q+e8bXlK7D13z1/Thg8i9m5ogBEzXRwMNMM0S03SoBOGoEghWIQfacxD5YVBNtDP
+xPYO0TrbKfYo/WiWug/qONnveQKBgQDpd0ow7zPXo7EmA1dx7q/VHb+TKkQ4JWeQ
+hNPY9nEuSq4lC5QpLrxF6RhTdpgPjGkxcEURFkVWpO/3717k5bXbIAptOpnLi2M6
+NmiuxA1dxZLCvQcHTBaE4e1XDSaGPqhxvEM8cJ+DFKZUu6BTKD0MlrM0FEHlhfAD
+85gfsIjzAQKBgFPeeAlQ9C6pzRQkflqOi4k+UXjis7wFlm+UKY8969a4xSjhBzmA
+mu8U0SCPu+QRYKDzSnR0FKgkb7O6ydjRd/GxgYBGb0F+vCNSVUcqkkOu1Eoldmu2
+lmE+FJOiCaWhWepaNeZCci7RL2fphK/gECAs/mbDNzocY4iSqpwFiYbhAoGAJmYL
+Ws8M7MusiD8Gc+O5Ick5yB+shruINBnUqhumc+GukMM9xCQ+rRTwflHEItKKPqpj
+gbLzBpQsL9A8AFPTvE2hyWNZBkRPtrRaNVxjgmfLgqIKdOXL0mFCYw3zpyLJG6PG
+Pzua0Lllvgv6C5NTry1eHhOy3uhPmKbI/3VOoQECgYBm6ZixNIVsHCugR+fzVJWb
+oOXdxExOYPGKP4dIvcmd8/CK9W9gFHPPmLOEslhOgCIjzZkJSPSB1lk2Ry3c2rqd
+XXYmWCZYm6gAmO6J4EVW/X22SIqkNHDPymRjN3VHIksmQ35dvQW4rCg2IOx68sHD
+rEP0oSLjSj+Q+e3h7I7nNw==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha1 b/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha1
index be0670f8b8f85..71f399192326c 100644
--- a/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha1
+++ b/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha1
@@ -1 +1 @@
-0F500F3768A87910EAD0571578AB10A9E39F2122
\ No newline at end of file
+F8786CC78C8A2D4F2390806C822BAF7EB6BDF0DE
\ No newline at end of file
diff --git a/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha256 b/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha256
index 38a0951e2a4b4..632a3a72ad0f2 100644
--- a/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha256
+++ b/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha256
@@ -1 +1 @@
-602B89632680A18CC323E067301487BB97A7F49CA9180ED116CC75AE06B2DA94
\ No newline at end of file
+79B89A23F11220BBD9119724BD171A7BE63DC9D9488032691858F9B0F1C0F2E2
\ No newline at end of file
diff --git a/jstests/libs/set_try_bonsai_experimental.js b/jstests/libs/set_try_bonsai_experimental.js
new file mode 100644
index 0000000000000..8962207469dd3
--- /dev/null
+++ b/jstests/libs/set_try_bonsai_experimental.js
@@ -0,0 +1,27 @@
+/**
+ * Set internalQueryFrameworkControl to tryBonsaiExperimental and
+ * internalQueryCardinalityEstimatorMode to sampling. This is intended to be used by tasks which
+ * should use experimental bonsai behavior, currently defined by both the control knob and the CE
+ * mode, regardless of the configuration of the variant running the task. This is needed because the
+ * suite definition cannot override a knob which is also defined by the variant.
+ */
+(function() {
+'use strict';
+
+if (typeof db !== "undefined") {
+ assert.commandWorked(db.adminCommand({
+ setParameter: 1,
+ internalQueryFrameworkControl: "tryBonsaiExperimental",
+ internalQueryCardinalityEstimatorMode: "sampling"
+ }));
+}
+
+if (typeof TestData !== "undefined" && TestData.hasOwnProperty("setParameters") &&
+ TestData.hasOwnProperty("setParametersMongos")) {
+ TestData["setParameters"]["internalQueryFrameworkControl"] = "tryBonsaiExperimental";
+ TestData["setParametersMongos"]["internalQueryFrameworkControl"] = "tryBonsaiExperimental";
+
+ TestData["setParameters"]["internalQueryCardinalityEstimatorMode"] = "sampling";
+ TestData["setParametersMongos"]["internalQueryCardinalityEstimatorMode"] = "sampling";
+}
+})();
diff --git a/jstests/libs/smoke.pem b/jstests/libs/smoke.pem
index f8ecb28a0bf07..432d153517079 100644
--- a/jstests/libs/smoke.pem
+++ b/jstests/libs/smoke.pem
@@ -3,51 +3,51 @@
#
# A self-signed certificate used for smoke testing.
-----BEGIN CERTIFICATE-----
-MIIDZDCCAkygAwIBAgIEXcrf1zANBgkqhkiG9w0BAQsFADBrMQswCQYDVQQGEwJV
+MIIDZDCCAkygAwIBAgIEJATu0jANBgkqhkiG9w0BAQsFADBrMQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEOMAwGA1UEAwwFc21va2Uw
-HhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBrMQswCQYDVQQGEwJVUzER
+HhcNMjMwNjA5MTQyODQ0WhcNMjUwOTEwMTQyODQ0WjBrMQswCQYDVQQGEwJVUzER
MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV
BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEOMAwGA1UEAwwFc21va2UwggEi
-MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPxycX8hni7V8evBZrKWm7b1oT
-OYRz9JBgwQMdZV2TlWpJJLehFESl2ENWRVs9ZiD6TylofnKHaxaIAkhqgNc1Vk2h
-ABj2LcqG7gukcH6++PZLTWS58GmGVunrvCFzX6jkKEa8PFBHqDf/KcPqN6ALHUGY
-jb9md25dMD3NudMLsfZDEA8AXAE/G8UDWbN1RxLUWAxJDotzAoF3dKr7s5ruo9NN
-urSb8SErjmO1AuSArZwYwTcVO98JPUCOQfHyhu0kgi9EN/kKqBFaFYCZAUqd8lKD
-VR0GY3uiYjYXYRcbuvQihQ3H6gnMbAKCS4BR+KW3Uzty27qS36MhGjpd60CnAgMB
-AAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAMwm26DbGdss
-sxgGTGXa4ogqOMe6zjEXib00auRGxJ/DvjDe7CJqEetMaBVnuUVr1neCh0/i35jG
-Cbrzs0fMwYnElXbJk3cGuEB/IeFyVAeJ3y7LiI9+o3AoIK8/Kzipn0W3Z/7ohVlt
-AnA4amQSZFIhpxk/oFks7hFnMjEiS4V+iBOUj6GhY/D3th9rpZWC8rOqTrEYvAqm
-BpYDKbU0j4tS8Z6fDOS7042/e1ocYieA4hBDOuA5jifOlkcWfoUjzNBg5HSbpQoT
-+RbhMZnzP7LdARXm3XvOAkgo5pBji1zTZmO4H1IGEJMcNNthTR20F9yeZXJblJdT
-Gj/fOrDuX5o=
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDY/pPCLzyXxRpPF4LIPuUGWWkG
+GTbXHCW6Si84T4qU6TfuMjghybQivuULF16fybTwx2Q2TByJIEEsi0G2c4STtFGr
+3090YxeS+BaAlRK4qi1Y8nqARDKdfSzSliXeXsU9U0eoNBgAPQCGgk43uqKmCSwB
+MesECWJqf01k+op4FXtyAKqtUpylSs2mhGPOvBz8UVKiz4Fo2Y69nIIjT2F5ceX5
+l5KE/5PQfNpFMAHdZm06JunkJ9yUYB2HLQtBcLWQDbqAQb1TptoWDWgvhxb0SGVy
+1D1UuUHx4EgbYfB293diOznXf9ZEgaa49QUA3VmR161jGSPQGCt2LUjSBhfZAgMB
+AAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAG/Pc+ePHnVS
+bGb5PWJQIai3Xys1ZjCSjQ8Z7aunJ5HB12x7SXDG2rAglJlX3+pvN/qlqBNoPOMM
+VpxI1hvICHoS+YhgUxDiQy/+cJSsgo3eWWqSNES/J2xlNNkYqw1IcNqq26IMeZsA
+kG2WUx3y9C/9FMZGE2xLuBRd5CZuDhwI4CIS+k/7flOxTw+J+VmpmXKEC44T8iEg
+q+xadroj9uMT4IqKPJOkxh/9RnOaSv4FRYgpJtxJ6wtsIlAxXX5M/7/Eyl7Gi0fb
+dRcdHjAQo1X4EoG/ooetSfFkBYa9qrobYoJ3igKTllgrWzClGMaUkLvEb3ddpdVj
+1Ah+rEk1nLo=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDPxycX8hni7V8e
-vBZrKWm7b1oTOYRz9JBgwQMdZV2TlWpJJLehFESl2ENWRVs9ZiD6TylofnKHaxaI
-AkhqgNc1Vk2hABj2LcqG7gukcH6++PZLTWS58GmGVunrvCFzX6jkKEa8PFBHqDf/
-KcPqN6ALHUGYjb9md25dMD3NudMLsfZDEA8AXAE/G8UDWbN1RxLUWAxJDotzAoF3
-dKr7s5ruo9NNurSb8SErjmO1AuSArZwYwTcVO98JPUCOQfHyhu0kgi9EN/kKqBFa
-FYCZAUqd8lKDVR0GY3uiYjYXYRcbuvQihQ3H6gnMbAKCS4BR+KW3Uzty27qS36Mh
-Gjpd60CnAgMBAAECggEAf21zU2M+bwcHPDE8SpKGbtaW1o2C5x6JYV1eTKv7HOUY
-3yzulZJ5m4Cro4A7uccl2H2uNpC+KsEJe3Zy9SZ5VhPM3j3SEUD6IZBigYIgqXzE
-iGA5szN3dFVfdxzVIw365Zqx7wXKcnf/h7UUm1NsCwTxUt80EGKUxAAvFzms7lEm
-GDSiNJh9Bh90fC946LbAIOw+JIrsIJzeUq7wQ3bHWxZfdMc4Hg8M+3znXV0VWlsv
-g9nZ3EKWofyvk5I86bwuuMVcHoFPtMH8U5Hl2jiPxjR1lmWvhcdcGyxrsmA8536Y
-1PA6FfbTuUtjreNKpcpv+pWLOrZIzEWqNYQxKbG2YQKBgQDziwznjWV0zqN7mUz3
-EIeo2UiVF34elfRZaJnRTdv//fUDOjRds96OkbLsp3frNYslGrI0UwPHHpEg+Lha
-JQFto/BizjvC1VaSQqJ6MDt+qPCZ4U6vTGM71jfp3xMQ65M6JU60tqQ2tQofGzlN
-QEW0AwNhmrJfnvRF6mswhioQkQKBgQDaZ8sJyTLUa8DpYxhDgO8Tg3s9VIZQHZa4
-afp0eAWJAVI04Bav1r8/58X0+//BkoSY0cIPIim0t9FuU2TQUUgr/RnXMAW9JApd
-KlOO46IY1ElLRGHkToBPearHiWEuX59E7Md6WH6VRt1TKzTe0ODf2eTFohBnUyMS
-sslQUCdZtwKBgQC5cB4STXJ7/z8xxGd7E8YHxfJjO3EXftyAG4pLeXpTMB6YJ4Bp
-/KQwcDYJxfYkTS+2v94Cw1b0DY05ysgsM1MkFZja0udkEacKVNx8Jy+V4LLaqFiO
-V56TmCgY1nchAg6nTnLgXNUqpqyfRM5byN2KFGVFs1GwR0r91WFqoC11UQKBgF4H
-J4OsCmUniuMv1YdiYAtfpNQvqq+dPYFhpwEzlvZ4CiNXgozrgGUL7M7fGyoEYW/F
-hq2rQJdcOB8uag3BoLfHfLOHKkFW3dtkWJsA542W/4MXCqoXHeiZcev7+knTwycS
-ZYMHzF+KJckjpdxzwwy8q2BmuHczdLsdG8ym5XobAoGBAOfKIZgDttyDb7R4fEzo
-v57t3LrlIF54jQYWxqh+81YzqbQFfouuYkfOFa5a87buGgu2BGjGpZcOAnv/FFw1
-EnHPueBsOWIH0C12Pft/4+idwfVZdOq+QbzMt8rudtcnlwhJZQUrvNQjCymiKi7/
-acmbpzsrdmOQ3PB4+Bfd4E1Z
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDY/pPCLzyXxRpP
+F4LIPuUGWWkGGTbXHCW6Si84T4qU6TfuMjghybQivuULF16fybTwx2Q2TByJIEEs
+i0G2c4STtFGr3090YxeS+BaAlRK4qi1Y8nqARDKdfSzSliXeXsU9U0eoNBgAPQCG
+gk43uqKmCSwBMesECWJqf01k+op4FXtyAKqtUpylSs2mhGPOvBz8UVKiz4Fo2Y69
+nIIjT2F5ceX5l5KE/5PQfNpFMAHdZm06JunkJ9yUYB2HLQtBcLWQDbqAQb1TptoW
+DWgvhxb0SGVy1D1UuUHx4EgbYfB293diOznXf9ZEgaa49QUA3VmR161jGSPQGCt2
+LUjSBhfZAgMBAAECggEBAL35IyM+d63nwcC8tKLbbWbBoFDWgkyRN/aAPBbwjEMS
+awT+smXiUPKabavoKc96qA6kgfeHavQiaOy2fjFdsRQfKeFvAADFQD5LNGgeGuXR
+FyJoJxdknWuDy1oSWdT043ltD68S7HBI98zLB++CViNGpF/nc/l0vdS+3uMo9VFq
+4DSEKco/gSVYFmVmk2qblTsOm0breoeegVNQrfUe420ZN87EYIV6E0cRDF7y9DBg
+ynCJKxVVMbhCGs4XHjgu1wm6Je298crfgZqwM9MbxCQq2z9ZIA3OZZSiVUtzZDN+
+/tIDYQ7evoAU3jlyitEZjzoutmP5oruLg0TmsuTM/IECgYEA9h4cXeZZCenkL1sk
+zXMAhwBCgiBEmLDRhgqEKGNomNAn3YLYS56ZOPdZyOFPluj4pncCw8tuFY4FUjag
+A8q1heUGvpivf+0fPtvKYm43VNBWKqY5OTAhRjGMwvPWz4NRQYmM/VaeTx8z+nlN
+HvSzS/WtZ4RrKSAZZnnAf6oEK7MCgYEA4bUakBqZ3Ia/BD+t3Ic2hJuF7xxtNHLQ
+vTvLrGRS5k42TtFI/S7P5lYw1KNqZfFWPcLpM4Y8Gg+whLXXyiTchzlMu3oLLKf8
+xV9ygKv32LMVyKys3bRy6uPXsBGo5j1O5D6dKSWlpP3PdxJSEgG41ESm1/2XP2lm
+Sdav7YNiuEMCgYEA7nIBvZ+cCBTIdHDzWPDgQ+smhHfKvbwhYqHxpEebYOHanatU
+7v57KEvMeuh/eY/Ax1ZqIVOIFe4AjcRKhyHveAeJ30KCVYhgS0AZ9f8eMFegA7YD
+nrpYom8DFyWyql1pbftb014GBlYOv86hvyoIQ8GD/NS4FaH5ueSfcZBAdi0CgYAE
+LzZZ25RkqT7bVk7j2sHg/X0jLLS9ly9VgslI3edHi8Wn+mtO/lIuZAStvIXZc/r0
+VCu8n813cjkEjNZ+Ueagvyg0BZQ1dLvT8OwPhKCorNFHkiP2TEUhI644/mrSMerH
+gn2paKBEicwR2g4ZdVy22rr5ICNwsoSJ8+f5EEmBPwKBgGyEAufpgsQ/DSZMh5JH
+YRVnis44JXmGh5OOVIZY5FLIy2enTxRDIJRInsLtCiZv8jfpKUPuh4dg/H6V4f7H
+/l9NN3NX0FKCKZ16nfsmsTXn0bSVri6DhJ6StFu0i7UAIqJct5Se+GlA4W/3LUTe
+FoqhkNkhzcva2MInJQzC5kem
-----END PRIVATE KEY-----
diff --git a/jstests/libs/smoke.pem.digest.sha1 b/jstests/libs/smoke.pem.digest.sha1
index 9b868f56bd0eb..5ebb3fc37c656 100644
--- a/jstests/libs/smoke.pem.digest.sha1
+++ b/jstests/libs/smoke.pem.digest.sha1
@@ -1 +1 @@
-5AA4C4852BE5F777296840988BC35FC266C80FEE
\ No newline at end of file
+CF3D4706D74DD3AC2CCDE7C69BDB515B470AD5FD
\ No newline at end of file
diff --git a/jstests/libs/smoke.pem.digest.sha256 b/jstests/libs/smoke.pem.digest.sha256
index 25031455d70b5..c0c6b63f6e873 100644
--- a/jstests/libs/smoke.pem.digest.sha256
+++ b/jstests/libs/smoke.pem.digest.sha256
@@ -1 +1 @@
-EFB8DE63DF6DCFB93649EFA5D5F9667444EAAF32FCB612ADCCB95A1F103F814F
\ No newline at end of file
+088E9A717A2F1FC90B9B5F1C6519E8D104EDFEA64968E919630AC4EE8207C4EE
\ No newline at end of file
diff --git a/jstests/libs/splithorizon-ca.pem b/jstests/libs/splithorizon-ca.pem
index 7d401349feee4..0439865cf2bdf 100644
--- a/jstests/libs/splithorizon-ca.pem
+++ b/jstests/libs/splithorizon-ca.pem
@@ -3,53 +3,53 @@
#
# CA for split horizon testing.
-----BEGIN CERTIFICATE-----
-MIIDyjCCArKgAwIBAgIEEz8mbzANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UEBhMC
+MIIDyjCCArKgAwIBAgIEGtmmGDANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UEBhMC
VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ8w
DQYDVQQLDAZLZXJuZWwxFjAUBgNVBAoMDU1vbmdvREIsIEluYy4xJTAjBgNVBAMM
-HEtlcm5lbCBTcGxpdCBIb3Jpem9uIFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ4WhcN
-MjQwNDMwMjE1OTQ4WjCBiDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3Jr
+HEtlcm5lbCBTcGxpdCBIb3Jpem9uIFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ3WhcN
+MjUwOTEwMTQyODQ3WjCBiDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3Jr
MRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ8wDQYDVQQLDAZLZXJuZWwxFjAUBgNV
BAoMDU1vbmdvREIsIEluYy4xJTAjBgNVBAMMHEtlcm5lbCBTcGxpdCBIb3Jpem9u
-IFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCxHkpTenKn
-0n3MxdH+yL+N/879HmJPKlN0HEAQP350VdyvgD28qlZQgwujolp+s31BoSQoEifv
-ECR374y83oJy3ZXB7315BZjA5APShllIdUJt7mpQDKiKbzqGajX3nPo0iSh1zUlv
-/+swtu4IqlkvhVwHdOPONTpf00jxFYvnL51kLRPgGnqj7tXQl2vQvjHGBNZxN9md
-TMXpHU6HdbhLXnCc6PViCdc8dpOT7d6tL5tZp34mrkfyTbhSHE+LzXj+dHR4LaoA
-fwDJne7pX14NfQikcYo6UfVt4Z4ooxojTGJitB1XbwYq8rdCtdpopCP+0RiModJ0
-XmNR7zoO9rbFAgMBAAGjOjA4MAwGA1UdEwQFMAMBAf8wHQYDVR0OBBYEFOGVM3o0
-/euAzEuIyPIdkRxctV0AMAkGA1UdIwQCMAAwDQYJKoZIhvcNAQELBQADggEBAGki
-+aOaPfFyOTTS8ojhVPQ7vAHvtKcY7mEO+tOaG8K4PCQElS05DzKmMaMPArWP/Cn9
-KxZtBkQbXriprc/NPzVY1I8IAlJJsiYZjdA+lhV6HXyi7hWsQHMmbHkf2jyJ8jBf
-xR+l8MxL/SJCSP5hmOce7vAC+nbmntx/uXcrMz4CDYNbKMqdLETz4tGiX3Dlv/8W
-0kEIPx1pAZLd/gf4BoOXiW8AaISyTkgloxq/jHCgEueFFGUYVpRovNf/N3jYMJDL
-8tPBDUrGFPb7hCSHkHrGsriGVWpVuSsQxdzZO9/rpA2khgqsp8Z+p2X4QvRrwEUW
-sKJmv+i5WkUKmMNowh8=
+IFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCw8c5axdKz
+ca7Ffj3FSFRw10+kCyzwnEiu/PAsC8YbTLGklnWC1wJtz60vHmX1+ns225omKgA1
+C1g4OvpLGt8T4uKy2igF/J01tthvXUE1wkCTU1ZHn+Ski6P6Nsf7AIEDKZKVefbo
+BTp5vhCAM1sQv1sAuTKUZNn0wJPZdQygmmzmGqbF5+aPpbjHrhxQvDbFAMFANVRy
+s9iTPFNjknher/OwnE/zakphlvIrrbnJraQTmbSMQq/mrPnL0e83donhtN1onVUt
+TfgDmVqH7wmZUpjrbF1fB0OQcdzX7T2IwbJnAJC7n1W47jXPaxsQ3uKS89yFQSJN
+froq0dTnWj2JAgMBAAGjOjA4MAwGA1UdEwQFMAMBAf8wHQYDVR0OBBYEFH+VIX2V
+L+Jxin+FQ61yAH+U66phMAkGA1UdIwQCMAAwDQYJKoZIhvcNAQELBQADggEBAEPF
+xhPOMsYu/yDEUkBIxXDyv0mSLf0WH8HBAqSyiRHvkW7h5lSx+/lWu0FBqSAFiTB9
+uaPoImah0cT1H576gIv/xURjw1K7ZvHGcEp34360AJeHWA8OJ4Py7sBadGDFP5Gt
+zmhD+5mluIqgKzKSIr5yxGSg+PaB8R4VGjdxI4otuwkxCslB4C7u7ZhTIBosXdpS
+sD0BaHa+cg1Quxv9L0hGIlvEC3v/u7V5RTAW80atgFLPAfdYdNzepEDLyuC6y/wd
+jJh9RcljWvz82Vh/+I4CJEX+qd2E0gaFWLpbqMoquXr8YWOcplxXWzO0wHvHJslE
+OdYjkLZIIJuUhqSfigk=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCxHkpTenKn0n3M
-xdH+yL+N/879HmJPKlN0HEAQP350VdyvgD28qlZQgwujolp+s31BoSQoEifvECR3
-74y83oJy3ZXB7315BZjA5APShllIdUJt7mpQDKiKbzqGajX3nPo0iSh1zUlv/+sw
-tu4IqlkvhVwHdOPONTpf00jxFYvnL51kLRPgGnqj7tXQl2vQvjHGBNZxN9mdTMXp
-HU6HdbhLXnCc6PViCdc8dpOT7d6tL5tZp34mrkfyTbhSHE+LzXj+dHR4LaoAfwDJ
-ne7pX14NfQikcYo6UfVt4Z4ooxojTGJitB1XbwYq8rdCtdpopCP+0RiModJ0XmNR
-7zoO9rbFAgMBAAECggEAcMPFNILIDrr/5uinexxagNC1+wdmNdrPySPXUcDrBKxJ
-1d7C76RPUfEs7uCF1xb1j1xFxdFRZW4UmduE8haEXRdgqlmMvAlpKJ3DJTzuCSjG
-w3DAdPiqMYEBdOSYOW7TCbGtddref5UjKx+8Sv4RzCSO5BxykS3oXWwKi5tZODVU
-TYH/O7mIA+fQnLv2QCqLQCe7oFzhD7cyLzbpbK0SS3UE38GwAuExCHX810BR6wHi
-nCJkHxEFdFagJKeMBAxNLNdqUG3ISL7/r+FrouSQHZtqAj/Ch/XMcIwEJK85d5Ib
-S0qwFaBXuWSk6/V3hb4gkkL1OqJQ1u2n+yh8hjvMnQKBgQDVxpZH5bgMSxwBvWIe
-BxaRZjucUVcptkNrmW+B6yC/rI7Y8TIY3YIDhAO6Tg4oTo1mANxnmY0EugwptF4+
-blbLt/vOthd81PfYTDziN/ZN7ChabQAnsC8QUwyEQ+cofl3eIu1tu73zKhdeC5Hg
-THYipR/UDjOs6fkwMD1CWpvCqwKBgQDUGiX32qXAF63GA/O6ixAGKAV6ZgO8chzI
-w7ei9CnVUfHOv+1SGwGj14Y8GcWE70VgNv/dArbVpS8E4XFFnkfcwA2W+plrt6DZ
-wTlLis+yM8COQM6VSYv8Zhrf+7JdmRrxpa3j9a7rJjtC7YmrIK3aYDeft7gpTNRe
-J32DBFDsTwKBgQCOkk8PzEkfCci38FJLrHaEqiX/btAu5Xu5ey8++k1xB+iNDu9W
-XgSjy5ug2QXgI+NxsAlOnr3J9Tq/ZaelA3mnjCDID/FCM9bHzrCcPq5p8aJIDIIZ
-9gqtXHXwkEjOXNjFmY23rYpVbjD/a7/yU4xGNtIvXvlfzPuAA+wXIM7Y2QKBgGHy
-5V/NmEfaZ4SPZKnb+H1vVABPRiBrbkGEqLRXH06E9i8tUPJeyGYabMIqgJ3ARYCG
-RaiRzU9iZhFR7xZgXv1hr6Tue5VUCrMk2Um6g+nenmjTItOsUDoyCO0w9hDlWJ0J
-jwE7/xhW4n0o5y+g+shjMKzQkfp4oYYNpJexkZFfAoGAaGyfshedvZhR1jJMwfxy
-T2K3HoIx9f0HpXbSvcTU60ibivfN8j3CKyb21G1TDy0vVXzmmAn/xE1d9+vIoRhh
-zCadDje2aMNoWgYEjxCBm1M6eH1+JKJFDuXV9vxuMjZtdOsw9NGISCa5lVsz2Z/i
-/SF0aY1nqtIKkcfGe2NRaKY=
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCw8c5axdKzca7F
+fj3FSFRw10+kCyzwnEiu/PAsC8YbTLGklnWC1wJtz60vHmX1+ns225omKgA1C1g4
+OvpLGt8T4uKy2igF/J01tthvXUE1wkCTU1ZHn+Ski6P6Nsf7AIEDKZKVefboBTp5
+vhCAM1sQv1sAuTKUZNn0wJPZdQygmmzmGqbF5+aPpbjHrhxQvDbFAMFANVRys9iT
+PFNjknher/OwnE/zakphlvIrrbnJraQTmbSMQq/mrPnL0e83donhtN1onVUtTfgD
+mVqH7wmZUpjrbF1fB0OQcdzX7T2IwbJnAJC7n1W47jXPaxsQ3uKS89yFQSJNfroq
+0dTnWj2JAgMBAAECggEBAJJwLFWXbp8vsHKl3b1N9QRGTRT4YsLmtIiNsL7jJ5sk
+R8hs9OfJxarXuv5J7Bw8pohqChMXo4BC2UoAFXDe1kwA85kNTx5VSE5R+qF/zh4N
+m5/R0pAIVhOPta+4cpzad+sEFto6TAsNeK2UjNdsqSsdEtVwb9m79V3HmxTW+BJZ
+GiUL5y5agp/gGHvDYGwUvHSMV4ve+GSc1bMsjmkIYmW/QBe0SK8zhxViNmcQm3DK
+4fBxrTGsoszdKZajxgVUjxi5Gp7dJCr7dxKkVhBX0Ph5UU7eouLGKT+4y45xsclz
+OCo0pOut10VXDCLgV6vg731JCL8cc4AkB7tKYOFm61kCgYEA5JAhYYsD8rmw2VDT
+UGRnVn3EBWz+i1QwFHWSnlQYU0MO34PRua65+ixsjoN9LVjdeWdYSIS2xsyvbdSw
+uJIU09O/JHAv7YlIoWKa3LdAM5qkSkU2OFaoGc3yaidXoZT8oHAFoQgV1rZaZnE2
+Jfg5yjBzyVbws84yy/oeOslrEr8CgYEAxi9pXi2tEuj3DURonGfH4StDXCt3CDx2
+Lj0pnprc0ppQYzN7gd8FL5auza39iuU2+RHK7MhzgZh8JI1szgwHNYPfVZGUvtE/
+wIjLBZ0wYfCMS9+Am4jEmprGTp+ks9Rn4axOllKzAhsBm5Jejm1YlonJXbBtg1QJ
+LoHf4jhA6bcCgYAlf3/6gNQk/lIFVT1V8yMBDaEbQsaAFU/OC3wCfcl/34CRKw89
+Z20bni4xM2FWDz5GrK3PN/bEptAhNMPJhC3mktbJI565WcyQrZy6mVXvIW/Sv4gl
+thHeKeubftjhqY/Iwpgp0ynCECjEpc48bH0OruRQI/NZjHrI/D/5ZRIsAQKBgFRL
+aDLBlMLdSe9fsGsODUstnpn+dsOPC+lA5Insbo45SoXXKOolVMxX+APNxHCma8pm
+kfxCijaOCAd4C9Zb8VkGvSBlKnI7CDW5OTkrEaAms1W2O7pr4surlztr1Z8Tchff
+tRFlHXSAzh9Ak21a/voQoxBxcTMRtFIOtY4/xQRjAoGAT3n5kKdolTEHLoYk5Nq4
+3qRODRIm5uFHM5OKVGtgWGWhSDiUzbBw+DRL8aceWBI28Vwi5yw7pGZXWWLxmcdh
+JHsjqR5f3HqYjUaHGPqo600S13i+cXuiR63GnMLhUApTXqv1fGx9OShIPm9imX8o
+9hIG/rq5Asx02sY8apm13uY=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/splithorizon-ca.pem.digest.sha1 b/jstests/libs/splithorizon-ca.pem.digest.sha1
index 293283125621d..a28ace5dc86ff 100644
--- a/jstests/libs/splithorizon-ca.pem.digest.sha1
+++ b/jstests/libs/splithorizon-ca.pem.digest.sha1
@@ -1 +1 @@
-88F3B334A89B06D6D5C7F6336F512B3ADE14177B
\ No newline at end of file
+8D493ABC737EF81BFACC721ED7869C62E28BE08C
\ No newline at end of file
diff --git a/jstests/libs/splithorizon-ca.pem.digest.sha256 b/jstests/libs/splithorizon-ca.pem.digest.sha256
index 5bcd94c39bab1..7e7927cebd1f3 100644
--- a/jstests/libs/splithorizon-ca.pem.digest.sha256
+++ b/jstests/libs/splithorizon-ca.pem.digest.sha256
@@ -1 +1 @@
-30C84F4D878690807FEB5F01C6AA72F3CD17CCC416FC3BF71A4D6BE697276867
\ No newline at end of file
+36BF3E83C9B009BD38DCA3389FF19C1AFB1D7090CA8158EECF9F8F8D8E113171
\ No newline at end of file
diff --git a/jstests/libs/splithorizon-server.pem b/jstests/libs/splithorizon-server.pem
index b647aabe66249..cc8a6c6abb267 100644
--- a/jstests/libs/splithorizon-server.pem
+++ b/jstests/libs/splithorizon-server.pem
@@ -3,53 +3,53 @@
#
# Server certificate for split horizon testing.
-----BEGIN CERTIFICATE-----
-MIID5DCCAsygAwIBAgIESEoCtDANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UEBhMC
+MIID5DCCAsygAwIBAgIERACtBTANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UEBhMC
VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ8w
DQYDVQQLDAZLZXJuZWwxFjAUBgNVBAoMDU1vbmdvREIsIEluYy4xJTAjBgNVBAMM
-HEtlcm5lbCBTcGxpdCBIb3Jpem9uIFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ4WhcN
-MjQwNDMwMjE1OTQ4WjCBgTELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3Jr
+HEtlcm5lbCBTcGxpdCBIb3Jpem9uIFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ3WhcN
+MjUwOTEwMTQyODQ3WjCBgTELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3Jr
MRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ8wDQYDVQQLDAZLZXJuZWwxJTAjBgNV
BAoMHE1vbmdvREIsIEluYy4gKFNwbGl0aG9yaXpvbikxDzANBgNVBAMMBnNlcnZl
-cjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL5lcosVHzMYAh6GO4SO
-LEWuT81d9cCszfxSDH8yTLnrEXsiMdDzTM9R/McZTpVQGHE74R8h4wTqf7roCrSK
-gl0MokDUYhr/X8Gbr22UxfmNYyVsN1m55NoUkW/igBkqzsBQEzcfkdpRr6Uv4eu2
-uWcTLb4fJAQBzLtl0SHBgPMx2PNQaudscWKOezAV9zSygPwhqKUWsYzuFPGZPyJp
-QU1cSfFv7PgaH8UGzK3ioXOd37HY0QANBaowO2XocAyK91Htk86mlG1ICEqkPHRG
-u4mbsFDK2bf30Up9BuICQYLIIVe+hpSefqA/IbXqv+1x49hEMGK1ebOt15knuCwc
-3RkCAwEAAaNbMFkwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMDgGA1Ud
+cjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKDnlnCmQwRB+dWZlS87
+pXX6ixb1ipKbyZXxwbUI8l8HWEQKojZdJIvjsmVVTyo5pyW7DeFcmBr7xurF0Scf
+aRv2PnPLZrFlS57fI+ZFlaLK5vIq559MessXFmL+S86aHdNcGBj+O122oZXeuokp
+v0renwoQ7hWC8DalSZ6yG+JvnMxR8lfm1EGpKTqMqVtzYfNJutvO7Xh0ZjnaY7oj
+Nj6rAPYLd4NMbBN3bm0XonnUfnsxGhtswDM04BPi9vP7mdaJofq6cAx/GY1UsBKF
+6wuYNv3HN7Me9JRsX1u+g/1rogG2ti4/tW5JUBjIfBz6+Q2r+P4bzqlt8XvT0GUK
+XmECAwEAAaNbMFkwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMDgGA1Ud
EQQxMC+CCWxvY2FsaG9zdIINc3BsaXRob3Jpem9uMYINc3BsaXRob3Jpem9uMocE
-fwAAATANBgkqhkiG9w0BAQsFAAOCAQEARDK4wkeH/LQ8apO9Q3cAnEOwzBsbz270
-iT+4QQOrZ660F5Ex1SaiZd50brje7UyZmx6YTybMtCxwUAoyRvM3PXvdRUNeMTb7
-J6VQXMGDmP4ERYB6Hzbc/VrOOjxe5MgbGOgfoKlc6HDTcBY9unvh4M3GJrTudVnv
-wfTzwQR4bcFa7ASEohQbSXmw3qPeyD6od3q/A+ZTWvzEdKGKs4tHDoO0+KZROoNC
-Kd9oW+bobAeHmfePGxjv76epqFO/7KQeUd/niY+98hvBCaHQQJT4IjJFmBXIo4JB
-pZrmKAKRS0iD5Wy2Q4r51YSq1dy7YYiJI/FoO6pbxbGZLaAWBKJHUg==
+fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAjRVgI+Y6kV6wLv3s7osLiT6VJ11YOlF1
+JWH9uxRXp9R9ke3aadCV6FQ5F+1C9I6HvT6lCT3Ltlsk5QzOfZ4/GnC09BDhPQKd
+aiiAXx04eZWvXGjC+ge5NfNB7ZOIjuf6qH6859Hr/inGC9rXt4+GQkEps7JiIHax
+iATIDi0lkW3VxWS/jNtIDZW7+//I7Cb4Tp6xB/rZFMiKfoKpJgZ7dot5R78/961u
+Upmzqqn23ULzZ76hGLL5El762BdxhU8IKk+uiqGLmiMRzLO1T+mlQtiqLtscf3ej
+5erWdW4WCiJ7csv0+3EYzxnHvB87N1h/EXnsm5Vp+KmFG/skQ7hMbg==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC+ZXKLFR8zGAIe
-hjuEjixFrk/NXfXArM38Ugx/Mky56xF7IjHQ80zPUfzHGU6VUBhxO+EfIeME6n+6
-6Aq0ioJdDKJA1GIa/1/Bm69tlMX5jWMlbDdZueTaFJFv4oAZKs7AUBM3H5HaUa+l
-L+HrtrlnEy2+HyQEAcy7ZdEhwYDzMdjzUGrnbHFijnswFfc0soD8IailFrGM7hTx
-mT8iaUFNXEnxb+z4Gh/FBsyt4qFznd+x2NEADQWqMDtl6HAMivdR7ZPOppRtSAhK
-pDx0RruJm7BQytm399FKfQbiAkGCyCFXvoaUnn6gPyG16r/tcePYRDBitXmzrdeZ
-J7gsHN0ZAgMBAAECggEBAIW2pWz8Fn9Bmytoxh/f4UPGmZD3LgJ02e0cil5CKxRe
-/FXnu+itFgJ75/TD1yaRq7jKft3oZJEtRysyj2If7FIA72psPcIMG2nTNq7Uzvzl
-yP2dNHo1TX3C+CkFf0UthSoWkogiCcKzn5F5QUbTev3iwDtHj3mo8emiJe95AGDK
-JsBCv//qGcsyC6/CkS02734jW5fnP+On1qRxwVNMDbJPXlauJ13HFCShGFqUdXsh
-DxtmpJNcn3x33lkDvQ8HOco3i7M2vXZdBVZUIBpCUK4jz6WqchFoG3iEpAH6rBLO
-kGRkjx9kZTWd3w1gpV/Z3sd+DxS3ic8bzuo3H/BuRQkCgYEA+ewTPGOdCMr0geGO
-fS/6yVn6Orv3ILjnn/ZyFHzoIs5F6wkc4vf1/EAQP2GJBJqZlQXjv5RSEVElDn2w
-KCM12IsGlZKDoCMbDUba7yF1Y32OwvCeWdoxE/LoymwPZtoj9TWAbqUxD9WKYvaE
-c18hYR7rOT7pNXfS2JLDV1u57j8CgYEAwwbJIhXkVa9f6XppnT0a9ojUqswVp8AD
-r3WJ5QlvoF5rxGdqPmgjaobE+i7mYJcrgMQQkSumBnaGtzVunF9ydg9kFah7uJ/z
-iX8ZuivQbTEJGgrDGYOMIfqRxloq+aR0f66RgUrWV11CuuFw8M/zf4YYCqtl1/V5
-oFu8gLh1DqcCgYAoPkxmp/+sX8n5dBGWtPgkHhn+BTBpE30ws+VwySA0IpT9oM0G
-+zif8szq99CxbheW+IjQ25hPc4qNB84q2GieQty/jwUk7yI3BBbS9MdLSveHCmnl
-PA41ESNNHRNp02yA2qmgp3b7/r22680uKr5cPjdQU4IH+xB3kUJlOb75zwKBgBtD
-tZaoTNWVwOjZDi7HPh+9HU4LXfOnqlw7KF32pb5btOEuO/IhiOH5mUwIhnh2acSE
-pHRHD9zTcR3nwGDoDw5mNG26siEra1aLiEM4oNjDBCy52HCWrV6KsxpxvJRN42N+
-OiVDGxf3l76cibeAut/XFUFYmNJKJQjACmhMYUD9AoGAEgjKAngD8M+XRjNOR9Q8
-3jemZKTSxMiTZ23Jhm3kr1t0OUHObSXrjI/pRU/DyiAkzfoFqp+gBU7EpJJWYjCu
-p0UrW/vR1dj/LHIRtBHNYGwu+CLuZYJmzy1qMSxKthL7yERWVG96lZM4tUTp8UHi
-91CJDlpFxa3JCH7VnApiUIk=
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCg55ZwpkMEQfnV
+mZUvO6V1+osW9YqSm8mV8cG1CPJfB1hECqI2XSSL47JlVU8qOacluw3hXJga+8bq
+xdEnH2kb9j5zy2axZUue3yPmRZWiyubyKuefTHrLFxZi/kvOmh3TXBgY/jtdtqGV
+3rqJKb9K3p8KEO4VgvA2pUmeshvib5zMUfJX5tRBqSk6jKlbc2HzSbrbzu14dGY5
+2mO6IzY+qwD2C3eDTGwTd25tF6J51H57MRobbMAzNOAT4vbz+5nWiaH6unAMfxmN
+VLAShesLmDb9xzezHvSUbF9bvoP9a6IBtrYuP7VuSVAYyHwc+vkNq/j+G86pbfF7
+09BlCl5hAgMBAAECggEBAJlkTcG0bp0HfHHJyTPPC+zN3X0UtXcQdvuwDjdUgSJg
+sR+kd4GWB/ooJnB+BEs/zP2mNhRKnmLS9Y3o6Xv/cMeMV3syRAMHizC4jtO+MGmS
+PtpMVqVBVqp3pwrWXFRoDdjpqbth2hx7t81VGMUb/FYWps0E2MO/INsndWiJaRXS
+SAgvlbJSyELkp8BtG2d/xKPRwmGFO7IHwqpn9dCTLgymlV1V6eTwntqg9pUjxbRg
+JnqeTuhI1kMvJAMOoMhTOqIG2TVriRr/goGsI6dKfkdxV6xGG+D68GoMIcCdYAu/
+mxxw62r+Ujwej8dhjnBt+eyjl2eUrKDkC6dTlfeFg8ECgYEA0wpKw/X4cm/32nPX
+B7uXm4IjereCXgRBQz0bsb7G1M3MekYY6weJd11JwwAVjH9AVO4M+OyjqtS4aKAR
+X3o6Mz8Rq5fM/ddhNP80z0BWbzpkA4jJVxlTT8dajMPf0XB6qQnXsNQmSVnfxwdm
+zJAglg44pf0999+NAmA+LSYal6kCgYEAwy8AumQCwKUcsD9VH5mPVvVsbHSHpDX3
+BfwEX1ludM5xAgOw8U+5OJgSFqQKUo5HMwzgsd0DN3ftDIbyhibLDf1+nJrWzReV
+YM6oWkHSTMYjWgD62DVtgpUzs58IJjTQQ0B0TUi3/7gmRWoHoJpBGw4tzTgTeG7W
+Y46x4GJi4/kCgYBCn0AlBrRs4/35n5IlZcohlH7A0ce9CFjV8ieZACHisik3/IMH
+RzTVUsTjY1ZqKQ3VAeVke8fbORYSKV3ypzJSVTmt0tkq1WBhi6NPPf9LU8KY/wiY
+j20mhUeHopo4kuqWDH5j20VO6KllOmfwchtnY4vskrqKUq9ALcPTGvFQKQKBgFId
+kDzanp3V2T/9JR7qR/fZwQfP7tETMx39bQmAYowZ3klurB6Z398De3izAvTAjwvX
+OyEZTqUje0Nt8tKlt3/nSkEwk3Ytmdbkmsd+Ma/DHFRdXSthLvVlOB81pQQN7CCf
+GU76bQ7UtqU1eogC3ak4SeSw1uAwIVo5SnSQb/JhAoGBALyq7FOHaiNFngQZs28D
+sr9J+H/e43DsH82XGg2XYU9SMfsNrrXoS1MwdUGIPVV9fKRdoFUcsFlcNP2zqZyg
+RyBuOC7/uhi2TAsggXTyqcPVTu3td2BRPQ0LarjHD4eSmgKFcC2reqtabjJ5pRGD
+k9i21Ce0oXr0kdaRGi63C7cP
-----END PRIVATE KEY-----
diff --git a/jstests/libs/splithorizon-server.pem.digest.sha1 b/jstests/libs/splithorizon-server.pem.digest.sha1
index 7758f1bd3f0a9..980ea0a2634fb 100644
--- a/jstests/libs/splithorizon-server.pem.digest.sha1
+++ b/jstests/libs/splithorizon-server.pem.digest.sha1
@@ -1 +1 @@
-E877FCA8158A566902AC60584781143547F1470B
\ No newline at end of file
+F5C2A872E64BE5CD7D88816BEB82BECC1E086DAC
\ No newline at end of file
diff --git a/jstests/libs/splithorizon-server.pem.digest.sha256 b/jstests/libs/splithorizon-server.pem.digest.sha256
index bab0025dbdc7a..b89ddf38ab782 100644
--- a/jstests/libs/splithorizon-server.pem.digest.sha256
+++ b/jstests/libs/splithorizon-server.pem.digest.sha256
@@ -1 +1 @@
-17EACF5F5659067ACC1CFB74075E00741D8638E6F623609F834E5B0E84FE3E1B
\ No newline at end of file
+E45A9387367EF18299199CC2AA9ACCD74AF3CA279CFEB60BD9F3D6C7BAC34A47
\ No newline at end of file
diff --git a/jstests/libs/storage_engine_utils.js b/jstests/libs/storage_engine_utils.js
index 7c6f2d1309ea3..082951572e60f 100644
--- a/jstests/libs/storage_engine_utils.js
+++ b/jstests/libs/storage_engine_utils.js
@@ -9,4 +9,4 @@ function storageEngineIsWiredTiger() {
// We assume that WiredTiger is the default storage engine, if the storage engine is
// unspecified in the test options.
return !jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger";
-}
\ No newline at end of file
+}
diff --git a/jstests/libs/telemetry_utils.js b/jstests/libs/telemetry_utils.js
deleted file mode 100644
index bb42973301ac7..0000000000000
--- a/jstests/libs/telemetry_utils.js
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Utility for checking that the aggregated telemetry metrics are logical (follows sum >= max >=
- * min, and sum = max = min if only one execution).
- */
-function verifyMetrics(batch) {
- batch.forEach(element => {
- if (element.metrics.execCount === 1) {
- for (const [metricName, summaryValues] of Object.entries(element.metrics)) {
- // Skip over fields that aren't aggregated metrics with sum/min/max (execCount,
- // lastExecutionMicros).
- if (summaryValues.sum === undefined) {
- continue;
- }
- const debugInfo = {[metricName]: summaryValues};
- // If there has only been one execution, all metrics should have min, max, and sum
- // equal to each other.
- assert.eq(summaryValues.sum, summaryValues.min, debugInfo);
- assert.eq(summaryValues.sum, summaryValues.max, debugInfo);
- assert.eq(summaryValues.min, summaryValues.max, debugInfo);
- }
- } else {
- for (const [metricName, summaryValues] of Object.entries(element.metrics)) {
- // Skip over fields that aren't aggregated metrics with sum/min/max (execCount,
- // lastExecutionMicros).
- if (summaryValues.sum === undefined) {
- continue;
- }
- const debugInfo = {[metricName]: summaryValues};
- assert.gte(summaryValues.sum, summaryValues.min, debugInfo);
- assert.gte(summaryValues.sum, summaryValues.max, debugInfo);
- assert.lte(summaryValues.min, summaryValues.max, debugInfo);
- }
- }
- });
-}
-
-/**
- *
- * Collect telemetry from a given collection. Only include query shapes generated by the shell that
- * is running tests.
- *
- */
-function getTelemetry(conn) {
- const kApplicationName = "MongoDB Shell";
- const result = conn.adminCommand({
- aggregate: 1,
- pipeline: [
- {$telemetry: {}},
- // Sort on telemetry key so entries are in a deterministic order.
- {$sort: {key: 1}},
- {$match: {"key.applicationName": kApplicationName}}
- ],
- cursor: {}
- });
- return result.cursor.firstBatch;
-}
-
-function getTelemetryRedacted(conn) {
- const kApplicationName = "dXRuJCwctavU";
- const result = conn.adminCommand({
- aggregate: 1,
- pipeline: [
- {$telemetry: {redactIdentifiers: true}},
- // Filter out agg queries, including $telemetry.
- {$match: {"key.find": {$exists: true}, "key.applicationName": kApplicationName}},
- // Sort on telemetry key so entries are in a deterministic order.
- {$sort: {key: 1}},
- ],
- cursor: {}
- });
- assert.commandWorked(result);
- return result.cursor.firstBatch;
-}
diff --git a/jstests/libs/tenant_migration_donor.pem b/jstests/libs/tenant_migration_donor.pem
index dd67976e7578c..3ebd08bc8c2c7 100644
--- a/jstests/libs/tenant_migration_donor.pem
+++ b/jstests/libs/tenant_migration_donor.pem
@@ -3,53 +3,53 @@
#
# Client certificate file for tenant migration donor.
-----BEGIN CERTIFICATE-----
-MIID4zCCAsugAwIBAgIEcGOmLTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIID4zCCAsugAwIBAgIEfyfW3DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjBrMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQzWhcNMjUwOTEwMTQyODQzWjBrMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxHzAdBgNVBAsMFnRlbmFudF9taWdyYXRpb25f
-ZG9ub3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDQEvdnV/rKKmAX
-YmOJz6seF7z0JL4fO2ImmMckKQUYGuWR/Oa/gXuQqb7QHaPPnlKHnKkxrRAotXG2
-2c2a2bWBEQOODl5qsLL8JN8vvJz9iWu6sTh6fnkO630TJKobtlaApgqJOa/H6TPs
-8re6+n4db8yqYC2d+ue/rLI4ruJFRM5VbevgutqsSlVlFYf0gTnsTck6pexXw0HK
-Zamw0yMAEg/F0VfHjcVgpkZIzZKpX5F+v1FmZqjTdAnjnwNdFdwHLlmrnvNfbHkN
-dCbSk1ETole7NTu2LBH9+fxvqzjWmyoA5Srlvt3cHN97XsHwKLC+4ikvRSh6pUwg
-ha4sXLPVAgMBAAGjgYUwgYIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0l
-BAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFJZwHiyyArO1ExT8WAuqUcSYxx7zMDQG
+ZG9ub3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCUWLVdp1mrlWCE
+ujgB0Rjddra6tc8PfjnZUW8qlmOmqQTuPGtkVJBrMsHnmT554XQ+6UsdWylit38S
+7OJOPXR2n8jc67AnrlfnvHh+p/6Q97oiH0+Bcg+9Kl2V+zeNDaNd95JZF+nqlb6W
+WGVPadIILJjD5wWSXqi0C2spN6wyuq0kYFgzCnBJp6p2haIjQzdlstmTOJP9/FPx
+pWWTGBz540Bfyjv4LYPuXmIFf1eVh+xkh+VyONfG/ZJyc9MsINbFi3j/Low6sEgc
+ZoqTTopBB0XKJzQ3NtM63zVrjmizfWFW3a+KlFb9SYfdZUkah+wCQEgwZ7gKo13e
+XNf+3lFjAgMBAAGjgYUwgYIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0l
+BAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFJBZSJC8XIqS/3E+1ULuWywuw+/8MDQG
CysGAQQBgo4pAgEBBCUxIzAhDBh0ZW5hbnRNaWdyYXRpb25Eb25vclJvbGUMBWFk
-bWluMA0GCSqGSIb3DQEBCwUAA4IBAQAMuCkuvIDgMdA6J+DEGxo4Y8PFrcAuzApp
-rtJ9O5iyYfvCFQe9fec0M8aIZMSwnIjpLpn7W+ClK+aXQEetii6uDArmPl8Ql7mu
-y4Sv47iI+9SGTHpjU0sKDQXXAwSyLrOgroRGg0AugpZ3MjXPnEQZqbksgrZjWINi
-uWT/h9PrUBVsXiFAY5zN0OLPPUc72CFRKSbfJdHbn4XwMs9NFQpUa1BPxJnXBdXq
-LlHKrxn/FvJXiRaBcekKpaGWdGYXL5ar5qv3/ZxdXnO12JFGj1mB+biMNpL5I5cT
-/vnXg3S54v4k+EG/0oqo9zbkOp1FEU/n0FKUCg67AR8k0htvDiwV
+bWluMA0GCSqGSIb3DQEBCwUAA4IBAQAEMnjAAZApKG33zdFMX122KKEQXHJLqe5Q
+EGhcUbP5gZWGDIM0Enli9tfJnS/oRVfe7y3q9L9jx4ym2M3cIB4tCa1KACZ65mgV
+MsSS8MgiN5t8gNsxYN5WWeZNN/yoceFbyspJ9s29WNTweoA/Tv+JI0xSCj3HSuMb
+J/nbx6Mhu5+ZD+y68SeqlL6IOuv/XNyPBmQD9zdiAh1mrn9M/zNZ4pnpu32gdcYE
+szhLvI7ML8BszuDy0XPXkpWsaDS7kPH43qYqtYbXbczUgyRWcraXqhsHJWKhludH
+gcxAUCcu1/e4oTzOoY4dAZF2Xx3cwB9aFeWqezSKc2SGHgQTQIB+
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDQEvdnV/rKKmAX
-YmOJz6seF7z0JL4fO2ImmMckKQUYGuWR/Oa/gXuQqb7QHaPPnlKHnKkxrRAotXG2
-2c2a2bWBEQOODl5qsLL8JN8vvJz9iWu6sTh6fnkO630TJKobtlaApgqJOa/H6TPs
-8re6+n4db8yqYC2d+ue/rLI4ruJFRM5VbevgutqsSlVlFYf0gTnsTck6pexXw0HK
-Zamw0yMAEg/F0VfHjcVgpkZIzZKpX5F+v1FmZqjTdAnjnwNdFdwHLlmrnvNfbHkN
-dCbSk1ETole7NTu2LBH9+fxvqzjWmyoA5Srlvt3cHN97XsHwKLC+4ikvRSh6pUwg
-ha4sXLPVAgMBAAECggEBAJZuL+nKJKmGi2Q8oMQgnJEsDlLgQYfo8eP/rnHJKkKs
-J28tTVIE9eW3oxlpZGYVC4u5ymT8vuL/kw/kVY5veZavS0enwcEWsMA0lBs6HLH8
-aSNXPwLobvNHc2ykpEtWvyFRaeqeByTbx8u2CvkmYok7q9c9o51EqcncvBjY7wOu
-Fch/Rq2P0FTidpyk4APe4FaENDn8MKYCIKLNyLthkHqXQ8mzleJAlof+4lXwEQ9k
-HehpklLqgoQPyCyF0TDwWgqMs72tBo7gSZQ7G6tV1/iwrW/29YL5L90uZH8VqWyh
-Eoo2NoNifhPgjH0Wt7QcNiK+ofuqypEaf0akPguAW0ECgYEA9Kr1tVg+VTKRj0LM
-hXRGq96FmJ0mtwtCtoBG7O91llskN3k/wcHjNsh+OqNSrU0S5SoSJiKjDm9j9CL+
-Weo+oX432zB0Y37mHYGo2tn97UeZw7sCLxdpI7E8MNDRyUw0fECfwzb6qk2eeueS
-AaE2D9mwgpPTL4Oh3obsQlVhHgUCgYEA2bYc3Qcb0wVmx9XV23XKa3IIKxeilWw2
-rnVFifHNQ0VCZmSdj7OUx1VnyHM8PLxMme+h1bziXZDsIzGIyZ0xJqsud7muKtHX
-HSpfyGZi2pRMRvNM34Ql2ZwSiFQvLyKbSXKzkYaYUjCPG5nRUV/uDQJ+544fYuFz
-GzVw35Q7V5ECgYAx4VzAgOIMWofkbhjmKEN0LKSN7ZQEA06xiEttaJBPe/tRN+3w
-oYeiFpPtfniPjkQI5l/W2H7npXv6PNwgFqp4IBRhImrIEgn2AAYdwA2Nv2gwyEJ0
-uVAdD7gWt6vdXyTgJAiuMto9uB9ULTvOC0DLUHU02dBXQn3QsS8fguqOxQKBgFbF
-4CE0eSGVcI7Lqu5Kt4A1tg2+4N1fgTfto+bESoS6cSmwhqBE+lKstq3NSEAI6Rsd
-yMoQ+8RBQ+0PDC8XiQSZi+7KQiHs+ykzv3N3Y4UtQG2zAXvBBvkB3N0beKb3yx7u
-KaYiGEoB8rgUibo+WvoyeXQ/UEEwDJnWpgNm6HQRAoGAVchu4R+WGg6ItFP6kgNZ
-AWh8SunlDjBr7Bc7TEXyQm6JYKH2KFKY1YxSpKNOg7oCddIHqjm7PkS+v9VpWOIA
-T/43Ak+4x+Heb/JiiQjyT7hd9z50KLqdneDjGN5+xrRRw+nhca0qMG0+ldVrgkwk
-bw+41eqcCw800imGJNg/aH0=
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCUWLVdp1mrlWCE
+ujgB0Rjddra6tc8PfjnZUW8qlmOmqQTuPGtkVJBrMsHnmT554XQ+6UsdWylit38S
+7OJOPXR2n8jc67AnrlfnvHh+p/6Q97oiH0+Bcg+9Kl2V+zeNDaNd95JZF+nqlb6W
+WGVPadIILJjD5wWSXqi0C2spN6wyuq0kYFgzCnBJp6p2haIjQzdlstmTOJP9/FPx
+pWWTGBz540Bfyjv4LYPuXmIFf1eVh+xkh+VyONfG/ZJyc9MsINbFi3j/Low6sEgc
+ZoqTTopBB0XKJzQ3NtM63zVrjmizfWFW3a+KlFb9SYfdZUkah+wCQEgwZ7gKo13e
+XNf+3lFjAgMBAAECggEAFMsCUOg+F2DOsIvxgb+vPot/PVhyd3d34FAbxtphiWCz
+gjkRzHRV8dChgTbnnOnPA2przueSO3YA58QwTEss4duvqdeXT1iOlbrDIO2nLuaq
+PlIpMLmuHxOrHnSh6rF4rpke+EHz1Z+7oRMu0BzDBcZKe1sMWuHNv77B/HsDNfDh
+5mAB3bG3NYu73lfLoeA1x+r4eKBGQkcz98Uil+Nhdn1Tvef5nOa0jLHolHl5mzyW
+HXCKnJGvHbklcMll7R0jTJ/GPnhHVSBVpZVD9AsyGZc7a0fp6Cb5jmxUd3YkqWvj
+juCf7MYJHUEIWAPgSJ9CXkh2lhKoqWWNCWFA1tILkQKBgQDDz6Cjmkbl1k9CRipQ
+76z58SF7Aq6w7TQL/Wl1FSRb1tGgM7T+F/KXQRqEVF3kNPC9DuvcAEy4RwlNQ/Gb
+e2ZHS+ewJD8prjtCcKwr/U7hmgeYTvmvPWZICAe8aA0k1tRHmhuXftvBcozWjvI4
+DSRVDrHZCe7EGmd5ONXcoLNnXQKBgQDB8hjSeuaEuvDrcoagIl5YbbD2H+3avs7o
+K7eqWKozH2yGuMViTbBSad3GqW/7WxE2E+7cUqVKRECnOZOy91+kU9H3Y4gqYeIi
+Nd/ktVi49huvauvUAlFcLGEeR5k6gqFFIpWmMR8ksb+2rwkaLeV8J5dweRiB5iO/
+OJvu0zDPvwKBgFoWhInAyS2lVAHh1kY2fxbfYc/+g/DSsUdy57n+aCP4yakzIOpm
+ii4cvd50El6UHM5etxUY7jM6O4VY5SfrtjrHKWlYw9ahWpJO1GfIuyoAe84PR8Dg
+NtadGzILjUCNtkzbEia5LtHpqfJtAfPX2AdqI1j/wOJoY4LaK0q+hMvhAoGBAKd0
+623A5AyyhJYmeosFYcSkYWkBxJNcsLXSGXEhwUMG3OOt4e+w+I8QDYccgS3cQY4Z
+w+HEEid/qvs4QYkDAjeWj/I0pDtD7MZxEfIdio+ZH+pkPV7+6VTWlLxaQbk9VRWm
+eOPYAfXi649GrmtGOaDrZlyckkaGeVMAA9M/0iNtAoGBAK1KI3i+kjxV07KJJjrN
+TGERncEEtL4eMptiRB9QautH3rJQNR6LJLTda2uxcG/hmFHoyo8OVYN5ZSWqYwFH
+foIXGsF892Xnm13vc4O0nMZEq6d74NqU6SGI3skcuta8alU+332SY6kVLQdtXm92
+wagjbm1BGKw0V1uz9cWLgB9D
-----END PRIVATE KEY-----
diff --git a/jstests/libs/tenant_migration_donor.pem.digest.sha1 b/jstests/libs/tenant_migration_donor.pem.digest.sha1
index 804d455c5f5f4..9454a23078a9c 100644
--- a/jstests/libs/tenant_migration_donor.pem.digest.sha1
+++ b/jstests/libs/tenant_migration_donor.pem.digest.sha1
@@ -1 +1 @@
-EFDD44ECA40D0E353233605FFF194228782FADC4
\ No newline at end of file
+CFBF2C663DC9AAFC6835DDB66A9B9C244B307D5A
\ No newline at end of file
diff --git a/jstests/libs/tenant_migration_donor.pem.digest.sha256 b/jstests/libs/tenant_migration_donor.pem.digest.sha256
index 498f714d523d5..3fd0e3f5c0973 100644
--- a/jstests/libs/tenant_migration_donor.pem.digest.sha256
+++ b/jstests/libs/tenant_migration_donor.pem.digest.sha256
@@ -1 +1 @@
-5CCF1A18C3D88EEF873679E49946D55B6C8ED4B8F6CE72B70A388C1F7273FC5B
\ No newline at end of file
+71984A2DA870E8AB0F44DD6A11992780112B2F0D05E5BA4C39397DA9B11EB723
\ No newline at end of file
diff --git a/jstests/libs/tenant_migration_donor_expired.pem b/jstests/libs/tenant_migration_donor_expired.pem
index bc7c89bf47bbb..48f001a1c7c16 100644
--- a/jstests/libs/tenant_migration_donor_expired.pem
+++ b/jstests/libs/tenant_migration_donor_expired.pem
@@ -3,53 +3,53 @@
#
# Client certificate file for tenant migration donor which has passed its expiration date.
-----BEGIN CERTIFICATE-----
-MIID4zCCAsugAwIBAgIEdsgsBzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIID4zCCAsugAwIBAgIEbZ0UODANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjExMDExMDQxMzA1WhcNMjIwMTIzMDgxMzA1WjBrMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwMjIwMjA0MjAzWhcNMjMwNjA1MDA0MjAzWjBrMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxHzAdBgNVBAsMFnRlbmFudF9taWdyYXRpb25f
-ZG9ub3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCoe0ZBYmC5i1k0
-OHApTg3CFKIs/oV5IdRoDG5lKc0uowK87JGHwuCCRNT/9TfGhNJ/X86I1lHTbo/A
-2Q57C0+/R+Fj5fndFQLVGLKXwtf15/7/iDWS3/wz/WtlsnVePg54EiY1vM37LSXE
-nd10AAR9aXIs6FpY67Ku9RxExhluxnSHpw45dOkrvaWhRlaESmSCCYzdcHAjBLrc
-C3qiPi4OJ/3cuhCcEkgz0fvcTP5DqSYtz+n/yO0evu4zeNqypEjGIA1fneWVWsyI
-IXcIs9VzrR2RUzx3ejgq3JM6TollZKE2nkF+UerxslbqqvtwFYi4xUcBzFNMS/0f
-rn5bz145AgMBAAGjgYUwgYIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0l
-BAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFGT9EzQ+l9vWpncQ68RFggFKcC48MDQG
+ZG9ub3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAox42YtKA1C9/
+OrFpDLJe/q8hJsaKWkdp1lSXwR0Ptu4p7m83Lxeo/+ciriHEAxIKLoMqQ1XBTdiy
+gmWAu9rj3772pqDfQVqNC/1fwltQyyvzoLLcN3TcmRtHUhbslpqwJUPxBy7/8sKk
+X4oKr8pMeabE/KUvsspjQ1rcH+wM1j1IO8rsrjlki/7GuqzvAua+sGxoQLvd1ex5
+EV0hqA0MVslOqEKoKdjYqMe2/jzeTjRq0sa5WEYRHTrDchoJexAFH4YUe9GnF7EK
+Bwlg7X2LxfOCHOx74WHi8znpMxYzPtVcWxFQn5Hy6ztanb+fwEh2xzQf06dKIdzk
+wy1aMIxPAgMBAAGjgYUwgYIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0l
+BAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFMqUoRTLmcR4eWyS+JeN+nR7UtZXMDQG
CysGAQQBgo4pAgEBBCUxIzAhDBh0ZW5hbnRNaWdyYXRpb25Eb25vclJvbGUMBWFk
-bWluMA0GCSqGSIb3DQEBCwUAA4IBAQA8Llv++htxTPUiRJ/28GIg10BEKogNtoux
-wq6k/CX8PfzNKZ4AQ2yLVQ6QVfM80tUEDcd9mRm8UY6axNtqKhEOSZMDvnMJluQG
-+fELJLZV0uHCQghVxGGurJIDRhI65yV9KlKOwwa4LYsmk3BILfYrSrTcBobA4W7E
-lJo799J1knsx2Hory0MbEgaSRG/04e+xvr5ny885e0NyirHo7hl0Wgqad4J/ki1p
-CY5U39MHL7dobq9fRwhLf4fXPlx+xOAw8oWxvYZu8LGmCT9HGJ6Gkl3knGI6JyPB
-qNlsraJwWy33dkOQ+HYWT9faOuLFQnQEc66sMkLTO0j7OrxuqgXP
+bWluMA0GCSqGSIb3DQEBCwUAA4IBAQBZNPp72UtbhG43ZQqWvyTE1AWpPkhUR3n5
+xbih8X9b+2EikLRg6brxLBPM8leiH2AkpFpj3/givgpbbR4yFkKEHfQASeU1tbqp
+qPOtB5+5l/WvqcGtz6mV4/T4Hw2GfqACkTrVuJnR3ZJF4+jWDjfEMLtyRYNTKvPC
+wyNYek8+9LM9yd2LYk4zt5NNCCDJ8J92Cq6wRX7opWGxwcAhs3ESVcWMn6tR3gS2
+JrV7iXsi7qZSreW33E5n0fgP5+PMbVvYKQB14gaAqNWMfl7XI0ckJyqgEWvlhJY7
+ZWg6lZNCUHFYPHON+Vqixv9UgylvTkJdmxazyG/kpTKKNISDTpOE
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCoe0ZBYmC5i1k0
-OHApTg3CFKIs/oV5IdRoDG5lKc0uowK87JGHwuCCRNT/9TfGhNJ/X86I1lHTbo/A
-2Q57C0+/R+Fj5fndFQLVGLKXwtf15/7/iDWS3/wz/WtlsnVePg54EiY1vM37LSXE
-nd10AAR9aXIs6FpY67Ku9RxExhluxnSHpw45dOkrvaWhRlaESmSCCYzdcHAjBLrc
-C3qiPi4OJ/3cuhCcEkgz0fvcTP5DqSYtz+n/yO0evu4zeNqypEjGIA1fneWVWsyI
-IXcIs9VzrR2RUzx3ejgq3JM6TollZKE2nkF+UerxslbqqvtwFYi4xUcBzFNMS/0f
-rn5bz145AgMBAAECggEASn+wEithH8pZWHj4R5tnBAHBsGmfNW/bD2Mn0X3JZ6zX
-AhkNRN3MDm5HTSLuVc7XkVEY73LtXbOOxzRzbb8TtT4gkN7ZPIQU64tvNSIIAoli
-PTXKkraOO6L8QUzHDzQbi8ZitPJWlpFhGO7BRJDZa2ccAHWBtb4LFcf2hwadqlhI
-xt6psHS7zZ079U6NY/3jtPIr7OZYJpYPM9gyyzt4UsPbFzrEqFz98tHDQ+3hnVxd
-pRccFdqbw3R6IG3MLrRWMJUuKbqGtCjnhpvbbxEddp75y9BSK+k+SdkoTSdTPVVS
-QDn8/tXtQJ7QU4SjUp6FXWQey3kfCzSI0n4wLkmcgQKBgQDZNn0qv1Nviltm2fwP
-0PICNsA6jni5fiPrSaNAdOczOi7ZfmBVjhLgz9n96AXgjCc+xFnmjswXv4qbc2t3
-u0pbzJv9fc4tNB0Eb/jhjJSC6AXJ8iCOyXDoyFddAkjzjH7+8jc6RVFswnka2Qbt
-RO1YfxZ5LyOUaQbq/cAzGCOZMQKBgQDGkR4m8O9tpgDszZxa+oSnWc09kxEBNBfE
-1ub9QRBP1dpvuL/56EVUt5W9XON2xSNxLsdtBoOrgdBVx/WZoGBJfmJxMG/FQPTi
-6Ok3pqTwyn6keOtV12t/gLO/dfpTzrKYQyVau1tbxXHjL/EUi2DL85k60fZG5ter
-qHjpMa/TiQKBgQDPBkcP+iDM26K4CaVbcbtnbsWSSf44VUho1dt58LH3Okoy02d/
-w5Ssno5XmNAZL5usEDrbK1jMfave84gHKwP5MK+wUDq1DMlnVE1ys6lMh1YVHuf3
-D3nE9EWICUh8kHjuBu+qYyzzKvuAIj+e3yYURbSmtc4EgoGscUPBrw0m0QKBgQC3
-KDZ6zvTa3CuwQjv+A2SHBSN2r4gY5xchnuS0J+bG7UiukuipuKDy8uAxKlQ6Qr9d
-cDvNihu8AGLOLUKS8Ua/o89j/ryqYy8/en1cst8jqHTGey8AIFNs6adjbIx574f3
-QMBc/8LWVLqnR5qFS8b+eXUWt4MGhXncQes9lnJLmQKBgQC4OM/UziUXuDD7Fl6f
-79rd6JxbnfKlMhMnY7wpeeqyt3ziT5OQRhlkbfH/r5H+Tce2q5+xvXn6s6LtFUlf
-jPQjXyo/k0+k00n78hej0Q+CKYkPBvbB3VL1xoZa+o5QP/wQtm6ozTxpH7fuaQx9
-9W+75loYzXxN+xu1EFPfuLPpcA==
+MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQDAox42YtKA1C9/
+OrFpDLJe/q8hJsaKWkdp1lSXwR0Ptu4p7m83Lxeo/+ciriHEAxIKLoMqQ1XBTdiy
+gmWAu9rj3772pqDfQVqNC/1fwltQyyvzoLLcN3TcmRtHUhbslpqwJUPxBy7/8sKk
+X4oKr8pMeabE/KUvsspjQ1rcH+wM1j1IO8rsrjlki/7GuqzvAua+sGxoQLvd1ex5
+EV0hqA0MVslOqEKoKdjYqMe2/jzeTjRq0sa5WEYRHTrDchoJexAFH4YUe9GnF7EK
+Bwlg7X2LxfOCHOx74WHi8znpMxYzPtVcWxFQn5Hy6ztanb+fwEh2xzQf06dKIdzk
+wy1aMIxPAgMBAAECggEBAKHmdfXviEuOCX08ru5DJYaBNl7+X87XoT1qTR5dxzb1
+36SOKBqREamPqYqUHvzGN9smzEYw4VndO0qMHRLcz1LFMZHK6Vm5a4kvkntwtZ5p
+oz7WyHwcf7MHWs3OSEX/LXLtXvSrvOyP8taDFVl19OfhNjBIxewYr9BNQ4fNrG38
+CJeFcXua5bI7EEKWeRCo9unQs1/Ui0QQmxevsBxVFn8LB2oMaZRxpP4wf8Iw/tO3
+UilNwhG9FRpklFvjvn19VIP9b2dv2vPsfZ86tdmvBFrByjKpZ4ATSJYWj15mIeRv
+wuP/A63JqNfB4hK3PbIV76TWeZpagunqECddgUN4pcECgYEA/IXoSxPziogHLwkr
+bX8B6NLju6oAB2F0juJ/ycnA84ASOpHgdmBBiTId52ZjV9W5WhT8TdpNhHFmlg0V
+nwv2bTA9fj0cK8SDIw8moozqUpWDRPlOc7lSqp81tuwXDdfSQy3vu0XsloFeGu+F
+4DuX1popaCxkLSehLPEiYJBHyxsCgYEAw0of/012E2DiWbwdhwR2mfZS7naaVkzh
+bN5kU18Ef3ICkPHb5Y+UdVqqRIIz5R+IEnDCFKFSyKddfHEXFlUHBmZLNBpOH8BD
+fXQp4y626oROL2y3mDWWJjXNB0wiKpcOikf96QBDZ5AjvljnrramCdmdBeoL4OLS
+0Dgg9t/gAt0Cf1LbnV0FLuRFvhWw7I8BHZ9Hk3IAVgIV+CjqKQQNL0K2w0R0mTI+
+Y0KhLy6+adpoRmlIeYESIF1U2FRj9rQ5OKKbMZI6ewPhdyYJ0qODmV9/r5LyOsu+
+A2H426cp0Ga6akOgzwij6P15dWdhMIxjAW9pJi0IY9ZtKnWUqbMFpIkCgYBto94W
+/aw+8PPjoWOKfA0CH3MsAlZgAUXGU41L6VqjGqUqLz9fwJ5+zGovkFIGXw+MGtY7
+st7BXIV16iAmH5KUGzY5iFM4LD833dHuhDmZrLIgmg7xW5Ry213CrRG5i5lUNVru
+R4GQUTJgGZXpVw4dFZm8ykvk9DObRwfq0oH32QKBgDhdCAjpr+wCokHqGCNYWF3J
+NJIVGKF/gUFjROZqkDtOT5jVLEwKrGy0GUlnY8XDzFZtPa1cKISGjJ7s5rWKhjW/
+w4QS8Pv8Ms64TEIb/J9xqUrkguijBW373d4nJESGqEg5XgZbcivuP710uYHTf28k
+kAYbuWRygT+Bm9JyNwg1
-----END PRIVATE KEY-----
diff --git a/jstests/libs/tenant_migration_donor_expired.pem.digest.sha1 b/jstests/libs/tenant_migration_donor_expired.pem.digest.sha1
index 11bd90ecf3ac1..7ec9cd87b21a4 100644
--- a/jstests/libs/tenant_migration_donor_expired.pem.digest.sha1
+++ b/jstests/libs/tenant_migration_donor_expired.pem.digest.sha1
@@ -1 +1 @@
-1B3466B84D702B4AE46FA78B60B73763823312D3
\ No newline at end of file
+5F48E18C9FA1815556F64F6CEA54D507D6758560
\ No newline at end of file
diff --git a/jstests/libs/tenant_migration_donor_expired.pem.digest.sha256 b/jstests/libs/tenant_migration_donor_expired.pem.digest.sha256
index f81e94de59503..9009313f52656 100644
--- a/jstests/libs/tenant_migration_donor_expired.pem.digest.sha256
+++ b/jstests/libs/tenant_migration_donor_expired.pem.digest.sha256
@@ -1 +1 @@
-AFE0FFD75316145A8DB71A6FB4AB77EC379857F1D6374121F6139357EAA86110
\ No newline at end of file
+470FE82061694EB9A894488792DDFD4C290C471AB11970E2DA42984E26BAAE68
\ No newline at end of file
diff --git a/jstests/libs/tenant_migration_donor_insufficient_privileges.pem b/jstests/libs/tenant_migration_donor_insufficient_privileges.pem
index 20b487ac64bf6..86baaff3ac850 100644
--- a/jstests/libs/tenant_migration_donor_insufficient_privileges.pem
+++ b/jstests/libs/tenant_migration_donor_insufficient_privileges.pem
@@ -3,53 +3,53 @@
#
# Client certificate file for tenant migration donor without the required privileges.
-----BEGIN CERTIFICATE-----
-MIID2DCCAsCgAwIBAgIETPHVrTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIID2DCCAsCgAwIBAgIEX0I4eDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjBrMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQzWhcNMjUwOTEwMTQyODQzWjBrMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxHzAdBgNVBAsMFnRlbmFudF9taWdyYXRpb25f
-ZG9ub3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCukv88tmaxJrjz
-RBGNRwU6d7/GTet471tkLQtcY2+39zeSQiqDT0Ru6wcqnXbtfSBl9/RZoVUiMYgm
-WDH8PpdxFBPQ2UsuNqChEJGuoYHh4G0FqYbrdhFGEuNlT+NqgSvSDJ9cpo9tl0WK
-Qge+Bap0DaL05XH23Q4XQh1pZAHH7r5a95tOXIPcshxJ1YneP0FD26kjeQxgHnTB
-HR6RNPLElwuuE6EUPb8UwNRk3pRYbmv0OGgRGtbnH+Ols4ir0GJK/vtZZpusS1Om
-qgY+gSHXJ+pF/xk3jv3RZJtlnoXEwWIucMeXaJSEVBMrq87kS0fwgY4FGIqhSwp1
-jrc5liBJAgMBAAGjezB5MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1UdJQQM
-MAoGCCsGAQUFBwMCMB0GA1UdDgQWBBQwzCU8euanRmnO3RrjjwA1KLYh6TArBgsr
+ZG9ub3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDV99dY9BjryoLU
+sRX01PinnpONl8iK/B5KtkUKaBuPul1NBF6EBvo3GsluWxK7CVxaMrf/E4P5vHtB
+3Qx9lakRV1hel51r0rmNki8/cC635GF+w+e0u0Y5BW1cw22K9ukwyAL0BPW5PWmg
+438a0K3N00wlPLSWBPiQAn2lnXUuHOXuOF9yDA2pPr9SDcDsjVHic873kDXh/+aL
+dRiXBcp07qIwt8IedV+sKmpS76ItD7k+XfIyy5dQAGwaS03O/3Z2z5SW1Nfo8sqt
+u2fGH1RtmXieU46jz7Fu6zbmrwafXbLrUftkaIDx1LWloHbrHtI8uKUPq272USWX
+XNt+VoIhAgMBAAGjezB5MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1UdJQQM
+MAoGCCsGAQUFBwMCMB0GA1UdDgQWBBT4Sd58ZmKhJCMacxqVZgt+oqeOMTArBgsr
BgEEAYKOKQIBAQQcMRowGAwPcmVhZEFueURhdGFiYXNlDAVhZG1pbjANBgkqhkiG
-9w0BAQsFAAOCAQEAS9b6JY6ebxXHCzzKTx/GOi/OCgWrqlDthNtdVjluaRvSxBGk
-v+goonF3Z/8dXVwTLysRwz2BiJqROHaN6x9JvODPSlYyLOyIE3CafDUu6oLm5huC
-BS1pJURujRFdHeRCdLhtzgcfO6zIGRrkiMr7zSZYSqMBfkEaJLl2c9w/MMVQYDBf
-KOzC5rV9OEkiJcI0nonJMM98L1bKNPRdBqNB+8QlNxOeJXDmNAyG+vI3Mt2c452U
-AkXYYBT40e+DdiDzoxIA1DKo6Auc5IqFv9q+dTJVKsCuJCzV8+5bZS9DR286wAVf
-hG9C1ov75NeIAE6w7iyt2lV1FZRNSP2zBv6Y1A==
+9w0BAQsFAAOCAQEAcz9ghdMtXd5pqD/nJbms1k/mP2ufJ0vXlAZ4cYNlBVxQ5Jo2
+lRNQaRHey+pVn7ItkKuHpD+WUpEQ1f9yUKo4Xhc169Vn+k14jvDJRk4wBAGJXck+
+B9TNvdimyA8X0UxgqG/3Blh7c7n6jzXQWC9e9rm92i6RFPuLKg1i1f8TupS/evmd
+pe64qgy/mQmlexZt7DKm8la2aoXVkH8zxnhUVHd/pbN1ExPOLO+e8ZFqUR1EVzFh
+sU8/Vc/D0IYUYYHXPYD3fEV+4uxl8o6WzoyWLbkbRKWKDRnAVkvPacOwsL9I482x
+RQjm0kCzFRZi9E0lj0rMkVKRsEhDHt86WxvnKQ==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCukv88tmaxJrjz
-RBGNRwU6d7/GTet471tkLQtcY2+39zeSQiqDT0Ru6wcqnXbtfSBl9/RZoVUiMYgm
-WDH8PpdxFBPQ2UsuNqChEJGuoYHh4G0FqYbrdhFGEuNlT+NqgSvSDJ9cpo9tl0WK
-Qge+Bap0DaL05XH23Q4XQh1pZAHH7r5a95tOXIPcshxJ1YneP0FD26kjeQxgHnTB
-HR6RNPLElwuuE6EUPb8UwNRk3pRYbmv0OGgRGtbnH+Ols4ir0GJK/vtZZpusS1Om
-qgY+gSHXJ+pF/xk3jv3RZJtlnoXEwWIucMeXaJSEVBMrq87kS0fwgY4FGIqhSwp1
-jrc5liBJAgMBAAECggEAM9AHFwLf6bYfcHwEZZTtlVPTREz+FU+dJVdFJu+QSd6C
-zNL8gSp2miEBaIGBuazIf9se17dNaC+hRBgrBb2h/vrBgtvrN1UZSZR8WIMw5FRS
-pzqQg7PwHocenIQgcAzUb6w1ZC8/JHygNA+y8pAF7hnaAqtSrr0fIIIL7qqy6J6S
-vVCxiOyvXR+VHmvrltLIpfBPsg3pWLoygE0Nfd+ab93/kWNS1/TO+76GacaFi9pf
-f7jIRPny7wDBpij+ABtRrv0HPaH9Kh8GwmLfozCIUv3RbLX3+hVePjMlB7OC2JK6
-lnvHVb5LNdtNbvnaHleutchA2eov5IgXT7v6Wn+u8QKBgQDUkSKAUZpULQJIVBoz
-tJcnnwFbhvHE9Ysb+YTiPMKWtFLWctMM7ZdhfRtBlkvPL+d+xSvnOhiEjBsO1epz
-s5yfGoUM88kbKQLK+66IIQJBQvDUzB7J6WTrd9j0X1hg4CNWZPUP3E/vbnVF4Bjg
-DV0Zh69fGdMJGhOnLO7AOU5/BQKBgQDSPo3aLE6t2lEhH9jsSJtoD//N8TaYVise
-oGgAl4COemto3whZxutYOnwMLR/yaj4MQZ2eoIS87EwQkKEVKmiD/35qjT2mrdgq
-GUP8EVvSb9XlQmVwsd6gf0zUpL3nj3TSo1LrjZbmz5UU8QilbGR5YhU7TTQ9rB7s
-TtwIIXw3dQKBgQCOVGfPl9RITKLcn0O0H1Wet/8GtAKqqObuYlKx0ZFRq4hUAyqB
-24yjQEvBzMsR/bd50Lgm6WWFSvLLKj0EH93dQrqYA5xCzWELXR5uE/wYiqQLOGnd
-NKPYbrUcW8MniqzeqlbUueXkIgfwM680Rn5yG8I3YWlHDOjf5Vwv5a9DqQKBgFVQ
-cPuRczP1HWrVo47uP7HQnDsToNXcUY8SBGIJGG+4mZFqv9a+c43P3bqLLSWPmzIa
-Bj3yYSrQsGUga11NYi/+I2xVeCkE7mzW59GHsb0JCMEJWmSKqQ5z2deIKk+m1P8M
-q39Oa2ep4JMo5BtMitD+ziLMR2CnUb0Omxbpj6BpAoGAU/TBvFbx9x24eUrGpNql
-peRYIcUkq91PjW2LC8X8IK9VMGnznpynn6lkdwDoMydrXDZ7gG9pik9rEO0aVE8c
-Yo63x5Hq4+tSEFHiy7FuK7WA2XbLo67cYI/LFs0bCey7kUWG7xkmE4eLhVVOfZX5
-WfSMNStPHfPy0LrvK2JGs3w=
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDV99dY9BjryoLU
+sRX01PinnpONl8iK/B5KtkUKaBuPul1NBF6EBvo3GsluWxK7CVxaMrf/E4P5vHtB
+3Qx9lakRV1hel51r0rmNki8/cC635GF+w+e0u0Y5BW1cw22K9ukwyAL0BPW5PWmg
+438a0K3N00wlPLSWBPiQAn2lnXUuHOXuOF9yDA2pPr9SDcDsjVHic873kDXh/+aL
+dRiXBcp07qIwt8IedV+sKmpS76ItD7k+XfIyy5dQAGwaS03O/3Z2z5SW1Nfo8sqt
+u2fGH1RtmXieU46jz7Fu6zbmrwafXbLrUftkaIDx1LWloHbrHtI8uKUPq272USWX
+XNt+VoIhAgMBAAECggEBAKHF5w2cUJNF47EHyr/3naCt5oWxcrCSehymvoBlwiDj
+GNO4L/XGiLvNBurm9LxxEWAo5LB4gtn4xYBArsbsto0j6u5p113ETwJYulxOZyQm
+Cgib+b3NsgQ+tDbkkxf0Is79Ci6Q6XAKnKhGGL2fdSkfTn0A7tBWJdBL8c+bwxL+
+6G/0t/jzErXPNjHo9bdUagJO0Z09SzJbPvIUhEmfbPBPFb6NwTEbcTmWz3rUjpCZ
+5dtxAe6yYOja0eSfJ1nehx/rnFyWJoHpxI9MEopZ+eR3b7qlmlBeKWAeyLEjIHaL
+1n8TMgpoXTeOhOx1wPuFMNVM4e4mhVBko6UU3Ff7qoECgYEA7Wn3/UJhd+tQ07Mf
+Xid9dpLDcmo0UMO28PP4fT9GvsQuqMegmt0hCmu8pKlRJoFsMU4Ab30CNl8JtzD1
++A6WSM56QIDocZOx/RXtQfANbeRf3PHkZGI/2dWLxlYRvcM3dF28Ttk1RI5HDABz
+8Cfkc7py5lvstuGIF/rXuEY+ISkCgYEA5rf+QLLb5/T+i9FcT2d8U1iSLeNYcQdl
+Jr/Rt8H922fcXnq7QMNODyW1Ha5PYHUr0iTCamCe1vqjBBCqaBuphEwjdb/gv7FE
+XZPqst59GAIVTqlL5MGq/QDLgiGjkbLt9IMuUU4xKTDJ6XJHyamEw9JFn/oPB6ag
+ImHWHCL8IDkCgYAQ+qYor6mm2OZz7XiN1EctxrtBoITTTdv0iY447uCRXhh0K6q5
+yJzHUQMxx4YtOc5SDGENmCQjXVKljlPZBLoMxO+LU2zMSqLNFdddAt0DAfU1KPuL
+jldArfwMGDW2m5KIuPdxsCkWGRLNse6Yu1GkUS5MeUCJp8GC7YamACgLWQKBgQDO
+RAp0DzOY8agsmhT8DFnTLakqi0lDa8gb13wH29A3umgCs5j7MmB0HFMK4Q6n/rkZ
+m7GQZyc6rULWXIvsnWZ1F5jQKaYl2rZzLs2x8kVGFH62H5bxQc+7d115ztcsaBpi
+8nCTUeOTnsG1Cm1xtjMy2qdSeb28KlSZyHwiFYjwyQKBgQDrF5KHhheR/eX+h7m+
+Jn1m6cvODkFtB/I+3L50rBkR/iCcDQNgZ+HGLKr8LxQb0sj8eOlbpPs3KjrsYKPO
+T0CpmDDHLdiihIcWrWX8SIyMmyjZfUTtI5rNqxRLcw+hbOwj0yBRHJ2Dcfu8aNCk
+Au5feocGvD5BuFPV8b3HQWAESQ==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha1 b/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha1
index 916325df58371..50b1c4d4becdf 100644
--- a/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha1
+++ b/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha1
@@ -1 +1 @@
-F6F2FF0DD2BB3AAFDCAC5D9CA190FD4AA201F049
\ No newline at end of file
+D1CD97F3C9B816D7E8FC865E9CC330585A557F42
\ No newline at end of file
diff --git a/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha256 b/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha256
index eceeace3ad087..b2300a0290b05 100644
--- a/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha256
+++ b/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha256
@@ -1 +1 @@
-4085D0DD39563975418DE7088089C069DC1AE39B1EACB8DA2130C1CADCBE0B4F
\ No newline at end of file
+D405DB53817A6293A79BE7C00EF272471A42473AAEC6AC9FA10F8D2299C71CB0
\ No newline at end of file
diff --git a/jstests/libs/tenant_migration_recipient.pem b/jstests/libs/tenant_migration_recipient.pem
index 38a8f545f6e2f..b2783e16265e2 100644
--- a/jstests/libs/tenant_migration_recipient.pem
+++ b/jstests/libs/tenant_migration_recipient.pem
@@ -3,53 +3,53 @@
#
# Client certificate file for tenant migration recipient.
-----BEGIN CERTIFICATE-----
-MIID6zCCAtOgAwIBAgIEJcofjTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIID6zCCAtOgAwIBAgIEKzC/fzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBvMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQzWhcNMjUwOTEwMTQyODQzWjBvMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxIzAhBgNVBAsMGnRlbmFudF9taWdyYXRpb25f
-cmVjaXBpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAznSAJWeK
-aX4D4hQE3Z81yntvNjbWntvmEgSQ6+Bh5+10hmpsdVE/oQD+IrfDRl4PZxNJSOvH
-3ZI47begaYPl1qPQizf0TAHve2RwXo5S1hdKofJi5CZ6xbAlwvxyN0/CBk8CaJ1B
-UyDWmRpsP1Qutwr0LVK1Sl1r//hy5eEutg8Qq8gjTcyYE2cWoNGgPxV2Eg4ds4sA
-Y0NZtJt80bZQhIt/LT70Z69me7Y1gCKgk26UydXZU6xvctL7klRuDcAhiC+hsmEZ
-WZmM8El9dtovv88WwtGG7qVakW1SJ+4hxDUCIZMdriq49FXAyBgnD+Jj/YaJgEhC
-ZBNNnur6qqJK3wIDAQABo4GJMIGGMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMG
-A1UdJQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBSZXizXFINpk1j//W0zmqBVh745
-FjA4BgsrBgEEAYKOKQIBAQQpMScwJQwcdGVuYW50TWlncmF0aW9uUmVjaXBpZW50
-Um9sZQwFYWRtaW4wDQYJKoZIhvcNAQELBQADggEBAKbaeByk1xCNdW/5anhcxvY6
-8Dyu3sqUhVyCP2VmS7JPjtt+HfzQGVyh7vKLYPoVt7vQp/2fKPvDnahBTbOeWlRQ
-UIO86xdqaEX6GskRu4CMGB+1d4fnQxjQZkBwwQEMMXP+ooeBCDuwp2+zYoOgGdvn
-ZXL7Ui8xEuJeqyczZAhcsk+Xnk/ZW5b7EeLIgjHhrkxxB6MdNejA+xlU1qKhA2xL
-qQeAtYDRhl7zOzlBj2Wx2mX+6Ph1IW9IHtgMxPyjjgeEoblSxRf7nqOIGbXs8mPo
-ap2I7iSNjo1vyykbWA6vJNIS40z8A8EhSmHSPe4aw326CYN0VeywCOy0TNcHu0o=
+cmVjaXBpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx1QP5Zls
+94qZiqNtDGshF6xcws4kcJqdVCrPwsI7bdu/jx8KWQ9Lj2UPXXST1B5tHCLXjWkv
+PYhNMxqAXXLLzKiwCSlT//pN2ja5sy4IVC8olXMCsXa2/S7Mlxzoi8XI6Z5U+rGp
+7Zwi2y3kxAe/DY6mJvZgF/8N2DYnZUBsb+0JTOE9zmonKadKEttOF+qnNvo2HYEZ
+urKeuHvWv6lOuKpPIodTan1k4BmbaoX1XQuE1H7zHlPtpP62D5qI+UctB2Uyd3+V
+eAObDMHic5nlxVVG4SvdZQvM5TjrxeT8FREWXIvAtHI2KBLlsWqMO+wDiWJPXB10
+VjMSrwOB39U/wQIDAQABo4GJMIGGMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMG
+A1UdJQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBRbjCQ7AhUFIJEF5kCQF+pawaus
+ETA4BgsrBgEEAYKOKQIBAQQpMScwJQwcdGVuYW50TWlncmF0aW9uUmVjaXBpZW50
+Um9sZQwFYWRtaW4wDQYJKoZIhvcNAQELBQADggEBAAAlw+U1ZMmEEv/cHQnQlPdn
+dVYc1V3hP4tDugVavCWuDzAps2+qu8QVLL7SekvW0OsCCS6jEcM4lS5dIVhx9fl7
+UOVMhrTd+XrKTsaeweOwq49aCDXkgaByGtmoXJWjL28wsrp3dSkObviaKKekFfuC
+d8j5Ea9YCa5M8EwuE1M3P6OaA1E2OGQkDEPFrhwfrKBdO2RZwwbH5FEUHvJXLCjg
+BIP99tweL1UGHH11dUonFuIjVCU/FcWTxksjGS0l/1biOsatSBt7Xal8Uv76NCnT
+zNQRFsURJRm01mIZxf+djCrSg0JUCKnrJOMFcg12yeGzRw6WPtxQikW2pxnOK9s=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDOdIAlZ4ppfgPi
-FATdnzXKe282Ntae2+YSBJDr4GHn7XSGamx1UT+hAP4it8NGXg9nE0lI68fdkjjt
-t6Bpg+XWo9CLN/RMAe97ZHBejlLWF0qh8mLkJnrFsCXC/HI3T8IGTwJonUFTINaZ
-Gmw/VC63CvQtUrVKXWv/+HLl4S62DxCryCNNzJgTZxag0aA/FXYSDh2ziwBjQ1m0
-m3zRtlCEi38tPvRnr2Z7tjWAIqCTbpTJ1dlTrG9y0vuSVG4NwCGIL6GyYRlZmYzw
-SX122i+/zxbC0YbupVqRbVIn7iHENQIhkx2uKrj0VcDIGCcP4mP9homASEJkE02e
-6vqqokrfAgMBAAECggEAH4CHTS3PYOlpjkWfuHFis9LB1XPoq7TCFl27/0HtroX9
-EcWLZxtOqjKMlJ+VnFcd4ox+0jGn/ZciOKOcRn1pxKaaW6PeEvr4n8tjmgn9ec10
-BeIGVOnKMf/+wxHiG65/3JaRt4THQvfvxszRV5hwyF1ThNbp8r7ui9BNN+Z0SjMc
-GOTyf1j6Y9dcE6I8pJVtNp9o7fyYYrF9AE75QWv+x8DXSuIhcEFkFzmyJJHk1Psi
-7T5JfmmI53KrvE/9n7MbkWQIC4YGrSYwUY0jKsu3xskXVOYAOW7GPLol+OTviJBq
-4IVAckfA/Q6qOlnYQUkKqaRn5gjFmARhUEkLzmDaAQKBgQDsLbZvkOVsMDYrZtCv
-z0smeEq3z/sHUK7DJsOEBFVlSAG3TcjZW6STNJaFetZF26H4pAqbFZqKIv5J5V3D
-egvHDxSfWynwop/VydyEZlTr/Ymxdm7YThnzvqXbr8q0X0QVSnhCtyKe6WJeoWK1
-HXWcWgSDK0Rm5Ul5Y/H61Y31jwKBgQDfyC4VHN2AWLOVVrgF+IsvwML/9xTuCBZw
-ak+9Zm8/7X6++J2Fe2GMmyYAWMDZhJmd+yoZDajZ0WCyHHvxCT2wyJQCWiHNs8dm
-QsDR8w7TDgZdkUQ9l48YtHV5QjOYd1/WsqWvWsOdZrXBxEzuLjF9140LY75WWuXa
-QULT0KjNsQKBgFzYy7OoXsjdWy5MyRWUhJKnD5ibZrBFg66seohXu8qJOEN7jM4G
-PPix38qxs7La2R4KPzEgmRRdFWKvjODgBelHgG/1QtOa2wMMzGgwYoozrgHZ8VGP
-wpwUAtgOj73BBd7o28Y+gJnAXi+delSBNE83BDcct1NMKEpUhDh748b5AoGAbxK8
-FMykAL2GPXyykHwiEcy6vTPVlqQ7BwctlxPN6kwmWgGqpgNsks67WPa0mgKgAPMW
-nSLTiXHMbfuHZUcx8JpOJLC6EJDISzCc5lifJhDTBgRBe9TLOPbxTAOY+ndeAYHg
-jR4e8/R0CL43dQ1GkXC42EtkwRYS+nbkBaCO+LECgYEA2R+GXgZ87UWwB+3X3uAW
-HQ/mP5PfQzVph/K35DbJxfy5A6kwyB/BWEqxoaZH5Deoj4pZU7NmpB0smEx3O4fh
-UgmyPlkayz1gbEHJ1PWe36j4bi7C36yyZbwc5iqJRvWv+CqtWAbHaammdQ17d6Dc
-S1VNokxppp+iUUKFYjL/vs4=
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDHVA/lmWz3ipmK
+o20MayEXrFzCziRwmp1UKs/Cwjtt27+PHwpZD0uPZQ9ddJPUHm0cIteNaS89iE0z
+GoBdcsvMqLAJKVP/+k3aNrmzLghULyiVcwKxdrb9LsyXHOiLxcjpnlT6santnCLb
+LeTEB78NjqYm9mAX/w3YNidlQGxv7QlM4T3Oaicpp0oS204X6qc2+jYdgRm6sp64
+e9a/qU64qk8ih1NqfWTgGZtqhfVdC4TUfvMeU+2k/rYPmoj5Ry0HZTJ3f5V4A5sM
+weJzmeXFVUbhK91lC8zlOOvF5PwVERZci8C0cjYoEuWxaow77AOJYk9cHXRWMxKv
+A4Hf1T/BAgMBAAECggEBAIPbRtzSPnQOGn3MmAGECJMZcQ9owFBA51xbKa6jQB7v
+I+vwU68QYCKObriauoOyUOkw3zhrYVWqCa+Jk2q92rUazcBxt2B42vFEDFnMVLQj
+sgwS1bBBYNFhGPPJqsdzYfGwzHQ9/LPy+lfVJyTKHwc/2wehKXYWQblKMYtYL6Nl
+lYoBJg+ukhI/k9CssCWUzu6GT2phCPmm2tH9IODyPZYQj9ZUG2PFcVDyR+9aQKb4
+RaaOzrNzt31srGIHNjW7QD0yEwNsTiZ05DTAS0ExGt6TnHQcbqSkxT1djZVurZhd
+0zT3rGEhsAkqNQ/mMQ3SRyB2pwhXZ+/aqqGl4bsSrLUCgYEA8nAXqzr6nXHBQv9c
++kwiTuINVYO/Hss1owTDzPEv3E+z/mVstOo8KUNBT1DiHARwrYMJ6nra5lah41MH
+F+X2/nUqYC/yIrtrlCOnQHr6d2znIpWq1Sj9/8MzfMgZuhdUmQTrqc2Tmno0bh5K
+eBPxUrNsftGy4GlY7WVLkuCAzwMCgYEA0nqbSedoOqCwIdkVgi2libIX2TWZpBej
+LI4cNLA0YPV6GqG3hFkxs8D2XEIovEG3q+3dzgqmCXFg24HgaYFr11b4m3XYj/3V
+CiGC/G1c3QxvkJCieseRtsyhS/H5++X9YFyZG5Fk5JzkArO7Nae890o+Nom6j0C3
+q1j8XtUqaOsCgYA1k4X3hkIqFyCBgNN4UOjoC9ashj/vOzMwQnZOzSIpiseZOarL
+VFRVPhKpx5MgY+7OuX2wftPvQUfnZ8rSgjSSSSxDM3VMLaT5iOOGQWcmiz+NCgxF
+rRhstCOluMbOtCcy5b56uP6cjdMWXsVQQf/7qcEZSZhBNaa8V71ayRhOJwKBgGK2
+Pb4bWTalxfUZ6oxy+//oFmc4TCY5L73lqDTNrsMKXYm/7mGTs0uqY+BA5vnjmFB5
+7lyDgftLwTExaB4TJJCJqW6/hiGB2jg2H0hjwfmpq2kRbCJJFn202rDTe3o4Euzu
+gJ/9QGQ3cHgT8ujnEiioGSVa0rP8lic2RKX1Mz6hAoGAVJcNEifTD0GPB+VKl/vE
+dDSUHz3MrlbI5o6hORCuG9Gyf/wzUaCuOJrgTwBsa3iMYh+9tK8TGCZYw+fQhaun
+JWRmkU8tjqHMsc1PssIcDaZpbHWoYDt09jYssqPjrQir6m6JYeo8AZYZOaorKXrg
+QrF3oemjAKBVppgXuFrLzo8=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/tenant_migration_recipient.pem.digest.sha1 b/jstests/libs/tenant_migration_recipient.pem.digest.sha1
index 7dad2694bdea1..ffa61296b2f3c 100644
--- a/jstests/libs/tenant_migration_recipient.pem.digest.sha1
+++ b/jstests/libs/tenant_migration_recipient.pem.digest.sha1
@@ -1 +1 @@
-379766C97E0E06045E08F4397740FD9EC54CC14A
\ No newline at end of file
+11FA775E0D82623200531C010564BAB34F3EDB9E
\ No newline at end of file
diff --git a/jstests/libs/tenant_migration_recipient.pem.digest.sha256 b/jstests/libs/tenant_migration_recipient.pem.digest.sha256
index ad01d1b9815c2..659d908545581 100644
--- a/jstests/libs/tenant_migration_recipient.pem.digest.sha256
+++ b/jstests/libs/tenant_migration_recipient.pem.digest.sha256
@@ -1 +1 @@
-CA5F02E7CFEB7509E1D6BD9BEAC00A40A80527CF07FB2029442DE8296A4DF2C0
\ No newline at end of file
+A4FAA454C6AE379571ECEAB1DEC16A9222B15D189BD0B731D72C2DBD82C2874D
\ No newline at end of file
diff --git a/jstests/libs/tenant_migration_recipient_expired.pem b/jstests/libs/tenant_migration_recipient_expired.pem
index 6abbf4baf0f19..2ff44ea00665a 100644
--- a/jstests/libs/tenant_migration_recipient_expired.pem
+++ b/jstests/libs/tenant_migration_recipient_expired.pem
@@ -3,53 +3,53 @@
#
# Client certificate file for tenant migration recipient which has passed its expiration date.
-----BEGIN CERTIFICATE-----
-MIID6zCCAtOgAwIBAgIEVJVyCTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIID6zCCAtOgAwIBAgIEUUGPJjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjExMDExMDQxMzA2WhcNMjIwMTIzMDgxMzA2WjBvMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwMjIwMjA0MjA0WhcNMjMwNjA1MDA0MjA0WjBvMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxIzAhBgNVBAsMGnRlbmFudF9taWdyYXRpb25f
-cmVjaXBpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqcz9uXUw
-mmYzgMZHQVct/Aep+Pt9bXl4UndRN1Fy1d8QKIshEqMVUuBh9WsI5rSg55RfH0MZ
-V4bpq8ah+HjfL9tPAw3seYPRg2rVfGre5QXYfardZnxcDvVrQ6Ln+/eTmtgQM7qX
-YCSJu+UskcwfObsAXEC9S/QSTcLyWtypNBsggCzm6hF/qNLoDdq2T9D4rAhVZsQ3
-k8B01DgywjWGSwTeOdnNaB1VZW3QKE9mLgbiQhS++NR+KRtUd5/TnFLs0tU3sgUK
-osSS/tBx60wR2zUnX190uY7ljg/oQkPpcdJ/Mc1BDUKu2cYGL8w1+pooT4CTSLVQ
-GRw3kZvW0/1LNwIDAQABo4GJMIGGMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMG
-A1UdJQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBQaGcRjaxKtM6ejvAJUx6b7VpyG
-JDA4BgsrBgEEAYKOKQIBAQQpMScwJQwcdGVuYW50TWlncmF0aW9uUmVjaXBpZW50
-Um9sZQwFYWRtaW4wDQYJKoZIhvcNAQELBQADggEBADcRbGHg4C1yyFs9nVvODhED
-U8UoXIPv6GJvQVS/hrYgQutjeESTUDEP74MB8GVSVtzFwiI0Ls+2JI8iVOGuAfbX
-XJNvKTuxBa/UY3IxZC4bTvTWEXhfuLaOCnAE2srHK4wrZgOQQgegQc0JYrzwjO0W
-pvP7FP6BhFfGo0DIRIfOvyMgD2Z4IqinAsdCOrbL4lLSgt8HXRh2R8IFfNSoE1F7
-fkocKa22emVFOEq6O+hNNaXiWSMYDaP0GSSKU4ywSMpDLbF67kbivGmrCFHO8yDJ
-5ST2qCxpbGF10pp/0K7uThoA8SIKVVpm7BWoBMnOkoLmyPc0JhHZUChwbNFq6H8=
+cmVjaXBpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAude6yEIe
+i5zDr7r6m3W4qKs5W2tQ57o5IZVP3uWEPrNW5Zn33jQNmgeUiD2MXj5JjgZBuJFo
+KbBjL9n65sn0pVVZPr3LVH9G6grr2jMdTyzqGYeubvsE1NPgcvd9roW2uKDCL/WK
+jRI2GVFuVVeW6u/XTNcy5atA/jm9j+LTvK/zNlal4U3rsta1QsRKcl7BSsyBws1n
+7fdMA9OdYISBsrG4MtB8vHMItdIQAIfrq8KBtG+F+7rsToUNVhrn94vPUqqyUmG2
+3/5ulCRw3gI4wcEVPQVVFt0FLWNm/3GySdCayiu1E1BP+enkSwkoq7goF/rQbVD1
+D8QelyqOk0m2LQIDAQABo4GJMIGGMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMG
+A1UdJQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBSnXaXIw+RLmwSDU5RyzZF/ONRv
+fDA4BgsrBgEEAYKOKQIBAQQpMScwJQwcdGVuYW50TWlncmF0aW9uUmVjaXBpZW50
+Um9sZQwFYWRtaW4wDQYJKoZIhvcNAQELBQADggEBAAk4lcSNw6ybY/Pf//+pWI/E
+176XpAM1gouNgFxpVFHt41YrLfMnyEGcqo7s3blkZNmfhzocpdZfNb8XOrU0v24S
+J2ABfI70kVDesqofh2bOwi5vkg/FacpLCz/bxMVUr8tQ5vCcmjManJsgZpyGVK2Y
+1ZEsIHkEVZ393LXtaTQIaqVlHz3M1d7crh7nEKfcXI5ntc4WOoSkzuW/yC6WuAbQ
+lWyy8IWTQpH778x8pDYWBgrw8BK+yjqOLT66Ca9/ZlfXPoQXk839X048Dc1qQFoT
+zY8sw9Vv/zCFzLsqF03yJCkznG5LvEsPAtQ0WZtpFLV1klI+RUPQ/bQjbmPXxvk=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCpzP25dTCaZjOA
-xkdBVy38B6n4+31teXhSd1E3UXLV3xAoiyESoxVS4GH1awjmtKDnlF8fQxlXhumr
-xqH4eN8v208DDex5g9GDatV8at7lBdh9qt1mfFwO9WtDouf795Oa2BAzupdgJIm7
-5SyRzB85uwBcQL1L9BJNwvJa3Kk0GyCALObqEX+o0ugN2rZP0PisCFVmxDeTwHTU
-ODLCNYZLBN452c1oHVVlbdAoT2YuBuJCFL741H4pG1R3n9OcUuzS1TeyBQqixJL+
-0HHrTBHbNSdfX3S5juWOD+hCQ+lx0n8xzUENQq7ZxgYvzDX6mihPgJNItVAZHDeR
-m9bT/Us3AgMBAAECggEACc0qbDUmjBMQMAPCAzSME/tBRX0G3XXgiyoWBxJthYjt
-vtlhHFlLltAa93apOd/9VcDLyNsvsEy6Wk8J9HTNtU58fhmkp9MDGgnlrtvgZ+nx
-eDePtiXBjp9+BJZ3u6Sr4YlNTh8cM9GIHc95xiS5PLTdrXWTN7osAE8bfEE2hypd
-mSVSGL/PBEo6+jsaEWsZW366U7KC/2vl8SBsxlEAXneKv+fw3BhngwghRDsQOFBt
-XX72GnRTUOt1nroMlA+s3Ao3XZrqD6Z145pTMqXen5IoCjBrUWk7n4uwxi10MY7k
-LbxOxh2oghwDbbTYgi2bgkYHfIk5mEsn/mxhAXjjCQKBgQDbR2HUhAusirpvDFX/
-e1r8RmovGT0SLEjIfNfrHHJlpRRlvUeQP0pVIH0pEootROZeA/ujofc8NMfEdrTw
-dcufxITlpPvKk8+o6zgaQ0dDWZ9zIPiWMrWwfyEZyNJoTsp1gyLQbp47bViBfJsA
-td1nX4Ep1WRgIq3SQ2l3bh03YwKBgQDGPHP2chMao6ln/T/wp1Aa8bD5aHjSCa6q
-TzsRQeUNVIl14oFE8G+vtgr04el2qztWB0MoXyioqaH09NOny6EFeqZwPo1vQTN5
-Q7HeMDx7TvtOCXOxRCe0F5LR3etsOcNekTInXu9AwOnt4UH52gMVziAuVdTAfXO4
-6GTk2wV3HQKBgCxDO3c3dFfO5RU3a0CX+OTFnfeF47MAZ2y47qjR5DGqYfSrgX+X
-lvyaA0nAKU48AzhlG22LaymnCdAZmiqTzJeihqUIaZ8ZuShC2t9KR19L/wixVhyT
-feNztg3LYNWXWfzgjK2ANsaOKvhwW6WIEHomaB82qP1S4r13yBlIi/M7AoGBAJ1i
-4a6IYyKDTbyCFIG8VJ0PxrI9f69CgKo2vW62ImSy+W/epUNWoVWf8pL5yaGt0S48
-FdW3t1AxXaRdvK07vlvbRMlY4HG0Emn0lQMSyPIdgugyGOhkdCFHlgmJZ6BFPBeY
-r3kSpmGCyDdU4Ey+CjUFsgcXnhI1h+sGlxQbz/sBAoGBAIB/YchF5tOGpls4k8be
-+YD4eB2spHWtJRRbnTYPnGY4xqOzEII14agzvi4Kh7j5O7TE5SZW3m2GjPV7HmFI
-mrU24lspIJjyrZAZXxDT+Y8Ki7yp/ITS38W79DKV/dFnjKG2oUy4ZIs86OkWbNc0
-N1CQppZPRt8/QREmB77yT9ic
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC517rIQh6LnMOv
+uvqbdbioqzlba1DnujkhlU/e5YQ+s1blmffeNA2aB5SIPYxePkmOBkG4kWgpsGMv
+2frmyfSlVVk+vctUf0bqCuvaMx1PLOoZh65u+wTU0+By932uhba4oMIv9YqNEjYZ
+UW5VV5bq79dM1zLlq0D+Ob2P4tO8r/M2VqXhTeuy1rVCxEpyXsFKzIHCzWft90wD
+051ghIGysbgy0Hy8cwi10hAAh+urwoG0b4X7uuxOhQ1WGuf3i89SqrJSYbbf/m6U
+JHDeAjjBwRU9BVUW3QUtY2b/cbJJ0JrKK7UTUE/56eRLCSiruCgX+tBtUPUPxB6X
+Ko6TSbYtAgMBAAECggEBAIzmWt7qAm6ndFYP1WF4Z4C6EMqXGsgWEKq9ocjFCvbe
+0ctSaPM3U/isNpj00S/C2dGsPLfKxsaUzR7Cjc/c3ndbtkYzb62osgNQNHFCv73/
+t52TmVTbTFZwLYP80HU5O8fPWmsyJtG+NPYNHzHlLq8BGKNRpW0r4iLddDNbLl2n
+XezuETlqKO/gcOSaD0zGU2MH+IOLGBgN0GZnOcM6njEOSgvou2/iD6J8DHa+79Cp
+fhheGsD6C7fMhU1nl/IfPCXsKw+s9f+b653k5xSRmxCtjz4zjzXe5SfLXi1V3Xn7
+sbFe0A8NUE2urk+zPoJvnT38/TPuJYV48Rr+vhkQ+qECgYEA5i9JMCEGVZM1xRcq
+snl7MuGq+2EnVCRaQvGr5kc65VUrRTt6Nwb2YFn+/RyRGNoW02dehmeOz/uPxQBq
+FzQUYSr0c61eO2Zuxtoz4z1KdgnMBRkdu8ueHs552N85xsbuHDGW45GCR32l6rfp
+JjYNcN0tcRm0z3f0Y55JFoVUGQMCgYEAzq9c76yaAjnLY04f8QxwJbZFsgMpC/s1
+XzFw3/k9PQLTA8A3HY9qRvrx821n7lAeRWTJLpI12tyUxE5euXeMdcD0rwYplDkL
+3gABl24O79bzCzdNtQaWbftpTDdHfE7c1C2UMVkYhBSRe97fzWzQmJOSlIiD/HCK
+FKP5XorFFQ8CgYEAnK7fQLWaHDICTdBBLg9m/vGBc29kV/AOyLa8bhlaS7S3qX6c
+6EwC9P8NhLknQyVgmDIqs45WNdhkupJXpMe8f4+/qeX+2KwXB6CL5UhZIRP2HhpE
+lQo67Xlak3cPWvEaL3LJ5MmtRoCOqcDaITp117eWMQBwhTB/2DdsRLoozoUCgYBG
+n9GH7FirzgfbpQRxH6jqmf/ytfW+rFYucvx46/eq814beADdTvYnzvQCFC5hVmsh
+lzDPgxJ3+Mu00Hq5sUoDqq1xaQ+oIF4Xu7TPg8IC3lYqTPl2lAuvBSuT6Ye5yFDZ
+B8INor4n08dqplufHEGjATojGwrOpb3pE6bQUWbKrQKBgAcDXQi+OPuT0ivy8AFT
+CDjXfW51Ijm66/QTuajbqDI5sQMp4/LN+7H7uFuPdblxVyEG5kCFYDw6a92DL1Rl
+MgJ5jptMMt42O335DwRcafxnHDrrXbsMlEjsjnpie0LbEztNXpgEduCGItvTCU9y
+h5RULLCWiLZ4PsOvg6mIkTD2
-----END PRIVATE KEY-----
diff --git a/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha1 b/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha1
index 3af12b7a100b1..f25685bacf51d 100644
--- a/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha1
+++ b/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha1
@@ -1 +1 @@
-EC814337A356D99D55DF854FF71CF72C1CCA102D
\ No newline at end of file
+6226C8F81AD07D4668FC4203A97AF390FF87D25F
\ No newline at end of file
diff --git a/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha256 b/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha256
index 10f7d3cef53d1..75d0129fe43b2 100644
--- a/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha256
+++ b/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha256
@@ -1 +1 @@
-A74CF5D200100E22D6DA8AC683657C387F2604B152402552415FF156FF04C8AE
\ No newline at end of file
+220121B04479EC8277B1F1DC865584C44C8607C0D8CC37A100C9D9E7A3865767
\ No newline at end of file
diff --git a/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem b/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem
index c2dbff6bbfba9..7d5104edd81be 100644
--- a/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem
+++ b/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem
@@ -3,53 +3,53 @@
#
# Client certificate file for tenant migration recipient without the required privileges.
-----BEGIN CERTIFICATE-----
-MIID3DCCAsSgAwIBAgIEOPFUxzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
+MIID3DCCAsSgAwIBAgIEFHYlLDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs
-IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBvMQswCQYD
+IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ0WhcNMjUwOTEwMTQyODQ0WjBvMQswCQYD
VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
dHkxEDAOBgNVBAoMB01vbmdvREIxIzAhBgNVBAsMGnRlbmFudF9taWdyYXRpb25f
-cmVjaXBpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxQcS9Du5
-cu+WrloqzHqOq8aO71x5fco09rDW12XEt7BUaqadNqBMbyO1JO3KsYHuG3Z1bGZB
-6MPXKwoZb7QVGMjKkyb1inPYLC+1guE8skolKRE8mzDoYclzZ8nlNLxP85axOHTE
-ON8rOZzViX1wevW/4Wk5YykRl1q4PcI9sS0ApKGyUsD1aYuV8/5HT1flF4quQ8GH
-MgaI3CuaI/JpyPJhHXv07McQ40c/3B87bvteJ2zB+PUCnYClza4xr461zK5TmeER
-nSu4AUgH2Pp/YYpWF6PqHiiZmvWot1DFAjobFoNDhSpG6DlAxuZYGvreRfhZvyNw
-+rcvl8VET0eQXwIDAQABo3sweTAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV
-HSUEDDAKBggrBgEFBQcDAjAdBgNVHQ4EFgQU60uDTycUKt17FwGh7+ngdgXw5DAw
+cmVjaXBpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtycgACfm
+cEqAUBVftYHO+uSfZPlr3T03pSGVnekRBKvTsE0Uq1oprtc2Fw0OU/YLMzt8zw6v
+syS4vgiXUcida7+yTp4zDegSzt3EGTcPompHjSzSK0Ex2s51GQMDOMoqG1ZE5Ze7
+RG9wrp2o+B/41svqddPipNr8J6vJLjDEDJVxSREAbbQhUDFWPO1QL8hdphhDvCwc
+OOCXqVLAGZ+QTHahh8//ppMF8ih6J3zKQQ3t1Hl2LwqsdIIRJLuUstwG5PP9XyGu
+hCiob0DBN06zFPU2MP2MTh4rFW+9kk2OyYc9KOZV08iuSPwa7QDL1Ob2vZ5BPhFD
+1cUBhLPQ7wrEsQIDAQABo3sweTAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV
+HSUEDDAKBggrBgEFBQcDAjAdBgNVHQ4EFgQUmHFFVgfTM67SWC9P09ppm5aKWvEw
KwYLKwYBBAGCjikCAQEEHDEaMBgMD3JlYWRBbnlEYXRhYmFzZQwFYWRtaW4wDQYJ
-KoZIhvcNAQELBQADggEBAMD5QD/cX2nL+wmH1Q3NmGPynKuNODwOUMKxvf2ST+Vw
-WR5A6UFyjIBvQeUQOkN3Tt2XSMLGJL0r/T+HSxwNR0yYy8pMUXuZei41F3Z6LZcN
-ckr/ZEoighkmc7U4GvLZYdGNgeKA1v/FCsaiy4hWwOP+50XKVMKCXnW/yNGjK/u8
-0Q016rm92fYv19wZJlNgNElzHVcefAwHRxjZBSGVbmDvG4YGelZwBPfwIPKgMYKj
-M5gwmhd18ileIoC2C/TjKw2kaKsQY79gn7JUVvIhMumeaZXsk7HDiqBeNrPzbxIG
-/7KEFMPbsM1ydbHSwfdfeU2wDL3upUcZAHkPNwgA7go=
+KoZIhvcNAQELBQADggEBAD7vEz/KGq6ky/gMQGHtACd8oNUrotpXbw1CfZj6Tn7x
+Evy46a9MVhUlPL59RqsIKmG7ptZXonEqDIiJJrzZ53XD0nMrCmm99c5WV6rtDa19
+5CsHeZfxjmZJIPHPt0s2tNOusQlcxPViOkWoMf04K7u0OLli+5M6ftyFZuMgS+GQ
+8MX1n678DgM1kgTj2uyKOm7LUKKqLtp+y5CkdwhqVlKptUicdRUEmW7dekAeUA56
+sDma2XSt4GImSB9BbGypCM5+md+E3e0bI7LNCNx3bIt1BQW2jMVyYuCps9b/SlKP
+vt84e9JwCSXu5jZI83Lo4aTNxS97BeCkALQ5uDBncAI=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDFBxL0O7ly75au
-WirMeo6rxo7vXHl9yjT2sNbXZcS3sFRqpp02oExvI7Uk7cqxge4bdnVsZkHow9cr
-ChlvtBUYyMqTJvWKc9gsL7WC4TyySiUpETybMOhhyXNnyeU0vE/zlrE4dMQ43ys5
-nNWJfXB69b/haTljKRGXWrg9wj2xLQCkobJSwPVpi5Xz/kdPV+UXiq5DwYcyBojc
-K5oj8mnI8mEde/TsxxDjRz/cHztu+14nbMH49QKdgKXNrjGvjrXMrlOZ4RGdK7gB
-SAfY+n9hilYXo+oeKJma9ai3UMUCOhsWg0OFKkboOUDG5lga+t5F+Fm/I3D6ty+X
-xURPR5BfAgMBAAECggEBAJYibNQEyqyFWwmilahY32yPkg1dJwquUauFV0CtQLhE
-Oh9GtNeSUIwD5p2sQV7/xgPRQWsHhMOsr8IRIQ7YZ5cKMirtbf9BhQEunZ6MqWam
-Tyi7BLaxDvKswi7dTrXfpQDV3FdLytYXYGW3V5Q8LMDVkJBgUSV5fvkZXPlakQ33
-BqsYTTWTwR0GE9tsr0mkJX1sL61K/aargX4Qw3iL2OdLQorCsXyptguzZZ5gKtco
-D009bWXorBiTy7AuzFsXMpPxIvV8j+BZxPgAYTFpXS3dE8iEEbnEp0WeyrpgGVvf
-IQMZf+R5XKhRJnXYq2yFPnYo2812gBRP+lqJEVJahwkCgYEA/+sAvRy8HjrxJYtv
-zoRLDdTrS2OjfiURIb8EbI0qpJZsyP/4CRAInGS80MG3dnZrWiTuhHUpAsxB7MwL
-EU5A583deX/KtQd6DG5kaNP9tmATm4N+8MlzZ/Cc6+noPOj/7UwTrzh02U/4nC6v
-9FFIvXWfn3zbSflmVXvCBR2gr/0CgYEAxRc9SqqR0bqVvSqXvIjx5DdsY8Z0/gaZ
-6zg6hZWtNCjWZ8JdO3Nfb7Gq8WyHbuT0hMZKspoUkQcJDKgevPuLgYa9azkT+JEc
-64kE8inL1OCAKwZwrV7ytscHFwIaGz9w5FiDMpAHzmvLIo1BygkAapqtnlhFnLHT
-7Ui7YfPyqosCgYEAht1oaUDAXkn/lSKTTEjpaKOhT/x1R1/vVFJe2XnXVB81uwMx
-ykzZzNlFq9m6fkJPtpSp4cIAV8oen9SzrG4JxF737TSMNbR3/B8c6SV2meqtypGU
-jv4KxGbHu4dr7NV5Maua4AcnhPIg+OWdsmo3pChdc2YQBj7ZUAlFKP8BFF0CgYBL
-XQuO1ZJUxCgl4XeMHEGCpr7HmEd8K0IH88RM+GL2ovb5ThUgeolvyFw5XOqcuOfX
-LYu6p7hdjHclY9P9J6K8sK0Vpwm/7p1EY2yEvP64M/BOg3Pql3fv9EL9xUv+R0//
-wOjKGNEjWYyfdaxGeRKzsNnaxp43wLfs7/bXCdUs6QKBgAoH7eGj/0t5tHE9xS3X
-Nc+PAnEIUdSO7Y7Pz0weTiM4zcPXLnbuULPQuqzqSVYG4bgPh5pckRXkZkn5/8PS
-KkwYxCalzZSskSI8R5MsVjvhGZ6Ntw+zuTF991BqQ0PgTWwM8e5XfzWyiQ3Ao42a
-d1VZUQaPZimyoO+OdvKCDSzN
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC3JyAAJ+ZwSoBQ
+FV+1gc765J9k+WvdPTelIZWd6REEq9OwTRSrWimu1zYXDQ5T9gszO3zPDq+zJLi+
+CJdRyJ1rv7JOnjMN6BLO3cQZNw+iakeNLNIrQTHaznUZAwM4yiobVkTll7tEb3Cu
+naj4H/jWy+p10+Kk2vwnq8kuMMQMlXFJEQBttCFQMVY87VAvyF2mGEO8LBw44Jep
+UsAZn5BMdqGHz/+mkwXyKHonfMpBDe3UeXYvCqx0ghEku5Sy3Abk8/1fIa6EKKhv
+QME3TrMU9TYw/YxOHisVb72STY7Jhz0o5lXTyK5I/BrtAMvU5va9nkE+EUPVxQGE
+s9DvCsSxAgMBAAECggEAK348CC1xeJsIi3v35Xd0+nmOLIFajwg5jZpDGGO/adeQ
+Bp7XWLWpjl6G9sRf8apNEJaA1f7L1IPU5zPNE1vLnknAgxDoWNYSmVNqfP5pVQ1d
+/nV1V1Y9C5PZlN31RugjRaif5dG5Y8/+90hzVrDo+8Ei5WYyvSlPlfAibzifZck0
+V9ULQQ20O3UTW/8Uss+W8gzxMkTm15q0BwT/41mRyIupY4o/GNjaoDUDDOHkkDox
+dWJY9mXc/zxd9rtUfyR8nS5C/OZ3yE7vuubT3Vnx4Fdwh7zE5pbvid6k6UPlE8bD
+JtvBnbF13HbUMhAmF9pnWBKlFMGI8UZHPUj4PzVc0QKBgQDhcK/ZdnVGuwJ7i48e
+zg6scv7JsVFQf4atXVa1Fmq2CHk8ew8rBKPW1FmIiqti96TInuDkaxG5Ei7N+RHZ
+9d2/pR7Bp4W5vik39ymINCO4N4nWvLsHrq432+yfjmKcD00HTOxAUxVC+EDTzAYy
+qrhGOivjghAbHtAJ99bEUW2p9QKBgQDP+vdIhU1XSDID+rU/WVTctr6wa5MkFMik
+LZowFVvicGELpQvtb7+WqeE/Yu8WSeAUKI4Nd91TbgPCgLNuRjae/sczA00euQYi
+MI9jctQj94kht5TvUsUbGdPvpXPqZKDF1FzFlbSPwHGTAtaSYzyaM05bLZwIrLST
+euW+AG5OTQKBgE6YOrZV1g0SpYrs3Lignf0BGlK2vuKRkyJdqBz587oCukGbpW4Z
+8AS1g4FW1ulp7MkEmuMHcOZUsHyemNqkHOrzZgWdocFfyn74bEJP6yQOOL/kjE/h
+VlujEJuPyFEgBHrHFpZWYNM4OWGdf4uqeRQCs8pdTcAmveC7xuQqf1EBAoGASOuT
+PAgQ3+NeNer3FWj6yhAAt9Zf1qy61GVwxB8ZAkGopO6PSgZ+RBL3+MN1VHk+aZL2
+i15VaWUPSGjbgHR5vjFSIl6r8XOp5N7lx1aBbsLhf8LgIzEADsp1dnaqN6pkfDhC
+9rvQfP5aU9MQ7G6C1wXaGTnalWgIQBU86+s2wZkCgYEAu3yXsCDpXQHoZsYFCR8g
+UDvYLPuR9KSSZ5hKbyFjUNilnuI8tImxdRL4vcreM9gWqlWNacJGZmeXLwLIXpE6
+iZK+PwT+ncX2L34y8+OyvIYzsNGNaXLAdcCSoa4Kk3+tQikyx8189sdeF2mAHfng
+OSRkOmDc2t+1VEKJWQco8Ow=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha1 b/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha1
index 3e44d403fc1ea..862df3e59bfce 100644
--- a/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha1
+++ b/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha1
@@ -1 +1 @@
-4DDA0775EAC5EE548618B1595E6D67EF7F13E2F1
\ No newline at end of file
+CB1C9F72C1CA410D989BD8E3BCEC2CA9D3BCAAD5
\ No newline at end of file
diff --git a/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha256 b/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha256
index b045f141e8474..5c3dd58cee87d 100644
--- a/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha256
+++ b/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha256
@@ -1 +1 @@
-0909312EFC6E36BD1A03D9FA6CB6129E18B9FB80FA2EB8ADB57B5075162509AB
\ No newline at end of file
+F97E83D586E2BA3ABFC633E94F2F7083BC05752C01C56C7DDF036ED1F8116949
\ No newline at end of file
diff --git a/jstests/libs/test_background_ops.js b/jstests/libs/test_background_ops.js
index f08ab644b987c..6b6aaab7ff65f 100644
--- a/jstests/libs/test_background_ops.js
+++ b/jstests/libs/test_background_ops.js
@@ -132,8 +132,8 @@ var startParallelOps = function(mongo, proc, args, context) {
var args = stored.args;
eval("args = " + args);
- result = undefined;
- err = undefined;
+ let result = undefined;
+ let err = undefined;
try {
result = operation.apply(null, args);
@@ -186,7 +186,7 @@ var startParallelOps = function(mongo, proc, args, context) {
rawJoin(options);
- result = getResult(mongo, procName);
+ let result = getResult(mongo, procName);
assert.neq(result, null);
@@ -232,7 +232,7 @@ var RandomFunctionContext = function(context) {
Random.randShardKeyValue = function(shardKey) {
var keyValue = {};
- for (field in shardKey) {
+ for (let field in shardKey) {
keyValue[field] = Random.randInt(1, 100);
}
@@ -241,7 +241,7 @@ var RandomFunctionContext = function(context) {
Random.randCluster = function() {
var numShards = 2; // Random.randInt( 1, 10 )
- var rs = false; // Random.randBool()
+ const rs = false; // Random.randBool()
var st = new ShardingTest({shards: numShards, mongos: 4, other: {rs: rs}});
return st;
diff --git a/jstests/libs/trace_missing_docs.js b/jstests/libs/trace_missing_docs.js
index a23cd859509c6..0ea5f07b867e6 100644
--- a/jstests/libs/trace_missing_docs.js
+++ b/jstests/libs/trace_missing_docs.js
@@ -80,4 +80,4 @@ function traceMissingDoc(coll, doc, mongos) {
}
return allOps;
-}
\ No newline at end of file
+}
diff --git a/jstests/libs/transactions_util.js b/jstests/libs/transactions_util.js
index c1531ec514e78..868bd59025b3b 100644
--- a/jstests/libs/transactions_util.js
+++ b/jstests/libs/transactions_util.js
@@ -17,6 +17,7 @@ var TransactionsUtil = (function() {
'getMore',
'insert',
'update',
+ 'bulkWrite',
]);
const kCmdsThatWrite = new Set([
@@ -25,6 +26,7 @@ var TransactionsUtil = (function() {
'findAndModify',
'findandmodify',
'delete',
+ 'bulkWrite',
]);
// Indicates an aggregation command with a pipeline that cannot run in a transaction but can
@@ -45,14 +47,34 @@ var TransactionsUtil = (function() {
return false;
}
- if (dbName === 'local' || dbName === 'config' || dbName === 'admin') {
- return false;
- }
+ // bulkWrite always operates on the admin DB so cannot check the dbName directly.
+ // Operating namespaces are also contained within a 'nsInfo' array in the command.
+ if (cmdName === 'bulkWrite') {
+ // 'nsInfo' does not exist in command.
+ if (!cmdObj['nsInfo']) {
+ return false;
+ }
- if (kCmdsThatWrite.has(cmdName)) {
- if (cmdObj[cmdName].startsWith('system.')) {
+ // Loop through 'nsInfo'.
+ for (const ns of cmdObj['nsInfo']) {
+ if (!ns['ns']) {
+ return false;
+ }
+ var db = ns['ns'].split('.', 1)[0];
+ if (db === 'local' || db === 'config' || db === 'system') {
+ return false;
+ }
+ }
+ } else {
+ if (dbName === 'local' || dbName === 'config' || dbName === 'admin') {
return false;
}
+
+ if (kCmdsThatWrite.has(cmdName)) {
+ if (cmdObj[cmdName].startsWith('system.')) {
+ return false;
+ }
+ }
}
if (cmdObj.lsid === undefined) {
diff --git a/jstests/libs/trusted-ca.pem b/jstests/libs/trusted-ca.pem
index 4190341ac516a..e47011e5963db 100644
--- a/jstests/libs/trusted-ca.pem
+++ b/jstests/libs/trusted-ca.pem
@@ -3,52 +3,52 @@
#
# CA for alternate client/server certificate chain.
-----BEGIN CERTIFICATE-----
-MIIDojCCAoqgAwIBAgIEclbQATANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV
+MIIDojCCAoqgAwIBAgIEc+efUTANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEfMB0GA1UEAwwWVHJ1c3Rl
-ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMjAxMjcyMTU5NDhaFw0yNDA0MzAyMTU5NDha
+ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMzA2MDkxNDI4NDdaFw0yNTA5MTAxNDI4NDda
MHwxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3
IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVsMR8w
HQYDVQQDDBZUcnVzdGVkIEtlcm5lbCBUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEF
-AAOCAQ8AMIIBCgKCAQEA8h+5axgTodw8KmHz/rcPcy2N/etFkipOVL0i3Ug6JcKk
-DjuSIdyLULuIQlR8nXWQ3hW9CZ2gDCeSnmnUKY6GWDQPHoSUJPhmGkXPuPBXivcL
-QpLVZeOHrqR4+SHzOA3317LF/QYm9kC3dEZIz+dWUlTHs4NFwR+Yo84XNosSGaUh
-o0mK5YcBx0W7y82rNrijcygOkXF9QrANUZfUz5uQ/ZPDjgoISqFvgMzJtpL6LqSC
-TbsUM4NbPSYECDFzIosO+rhYCUsgZ5pE6NWZjmKzq4+zeb/2iSIoEb7U/5f6i4H4
-880y+usrcsBuNCS1OVHaEB1ZrlinJbzplB3nV9Hj1wIDAQABoywwKjAMBgNVHRME
+AAOCAQ8AMIIBCgKCAQEAn4/NB8z28VxeJ2Opsvm83sjk4dZGkok1Z9QlKS9VcTZU
+sfYN2nrCUEq0mMGg7mFsbSBgZq0a1IoRYP0Ci1ycaqqg0iLGlvNAsBhazVgnlr6O
+P1j+hkf5JGM7r+ZgVF/0u7i9EFAgVs8EwqCH/RE5p0oJ5ncGiNf92KB/uG0r/eWz
+TF3/VGuudWcOaCzs8MMMWY4iYDpm5QWUnS7eu/VWW1efGH6ZEEo63bnAFsQZu6xZ
+yKOKealhiDLRVatigFqZh6oLQoEckl4+QzWKWxscAHuMuTy+fWYLdhtrGZIBEutO
+DmzUMupifSy70VMt9nPcD3/Z93agswMJuU5hktpvUQIDAQABoywwKjAMBgNVHRME
BTADAQH/MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsF
-AAOCAQEArn+KmfD2JEXa0G81jY+v1+XBT4BCcFExbdpOYbIoo2m0Qvx+sla5+Qu7
-nG51R+3rnkVPr03ogKYtf3hYtQJk6DqfuF0V9ESYkz09XRwyW93mh3z4yumXnk3y
-d6SG2quC6iJV0EqT/OnmmveGBpxaBjf80ezRq+8t0mVGeNwZSxv0OprAkmKIIDM8
-Qa1/LlGhStiU+hN62c3m4wHdY5jreRYH7NyIZCHJ/wKgo0cDWWdJ4MeAaQhuijUI
-BaNg6mFHlxVMMRGIGSduUhu7vHzjbAES6kJxdIpDM8tZMlRZQ3ORml5s9onSMb2n
-NmJkjwyB62odD+yrygWRLtFMJmKODQ==
+AAOCAQEAfDOw6TjvfP6w137p3z+FncTYQM8a+Ytgtniy4VvJjLXyev4ibzGyBiBk
+Pj6Y5AcCVRyxzUgPnL3kNOTOPI2HMRLu6WR3vzzvJPZQcetTt91A9rGr6C/I08gS
+AlPaWFsiMmJML/QxH/C5Jh1wvoRha69U0IlXITGHiGBvmYtvjUXD12S6W95zlbSO
+g9zKc/MBZxe+bjaR5e4l+ieMI5QvBf3ehTg8g0kV7CEA0ZCmbuHL/yLkIz+Yvf7l
+QK4NXwZCOq+ERpugG0cGh1zwk5K7N3MsBvA5NhyPQnN/4WHZ3c0Lqznf6m4h7QyW
+U0F1wL+qogbpLVQ/oZOdnjUm9JzlIA==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDyH7lrGBOh3Dwq
-YfP+tw9zLY3960WSKk5UvSLdSDolwqQOO5Ih3ItQu4hCVHyddZDeFb0JnaAMJ5Ke
-adQpjoZYNA8ehJQk+GYaRc+48FeK9wtCktVl44eupHj5IfM4DffXssX9Bib2QLd0
-RkjP51ZSVMezg0XBH5ijzhc2ixIZpSGjSYrlhwHHRbvLzas2uKNzKA6RcX1CsA1R
-l9TPm5D9k8OOCghKoW+AzMm2kvoupIJNuxQzg1s9JgQIMXMiiw76uFgJSyBnmkTo
-1ZmOYrOrj7N5v/aJIigRvtT/l/qLgfjzzTL66ytywG40JLU5UdoQHVmuWKclvOmU
-HedX0ePXAgMBAAECggEAP28UUvyB2Dws/yWemTM4twJOWyISnhK7ZvQOeE79/Pqb
-pKbyyDBRx5r6PfaIl3A/vg+P8T78uXJ8tUggr6qJg/5Pn704Wt9BUMzNeTRumxfS
-OWTix8juuVCZ6Rt970epkTdjK8E63/VgmvP3C5EeSn+vulZAJjKy+Od7qWySF2lr
-ikpuEvzy+l81NWu/YbBgD1JlNdn9IPr/hmBeOwUyOII9uiDzSvvUQb9Q2wFbW07O
-DjPG1SxCYr54vGL3REmyOzhLPp+5OiHZtF7w76vyq/pTCz7eUfPwTwIo3DAybvPx
-vfIeBLfTIpdTL9XyCPhoIuwj+6AA6MfRKATi/0OLsQKBgQD7uVgvlDE2+a8xjfFT
-Z676Vm6hiMJ3lrUysRvJJkTAiL6fzqS42gOu3dTk6UakuAxiGpkBOtKG6/j4HYbL
-AUGhpfyLwyKfdVn9OyAZFxW786MKtdYt2iKZ+c5EEb2MFndQGBa0ErtaG0nPHlTn
-vm9Kf3bR0n2WXdb0pzLlL2NsLQKBgQD2PKHxK0zRKiy3js5nxUK1feYtTlqWsvxO
-wM94qSOQ4r19QpdbIU8/ywWw8O3j5p+etWqy6KlSShDvP1QYlx5zEo5vhZl96Cfz
-Ixk6eHrxsi8r1ARcCgDc+05ZqyGhjPOCZI5y4JXNbdnVhKnfzioFgs9mUL+IwzxQ
-l8hnGDOekwKBgQC8nwGphU3rd+UDKO1wJf1DsIhtmmC947wbJANCEt7pto3AicN8
-kEC6Q5dHgFVjEFaXHH8SINWoLCH/KYDblTFsw0geIjZLbk/kJO3EXzv1/nZpjB/V
-c+MBpeIdt31k/2CgL6yzemXQ+ymvfXb5wAT8uc57I1Lf3ak44iCNA5locQKBgBoz
-7Y+6adxAPq3x1mkYhrbFhP8BPYaX2V2QCwPuC8jZOAziTzT1YHeLZTmlCcucuROs
-foQ3Wf1VwGOVO5+6RRU9vkaD0weg8s9exsqAE6UwrvBdCXG55smdPIMyQMvuMeOI
-S9hRqd9Id0vPaDxWtgYMG4HpydgF3p8856iA3M1dAoGBAN1s4DE+5KsnrM888dp5
-z38+QEYyuO3DMuSE/72lnb+7rfTg+75tfdLdKhJrACHlGC1LvnfJ5oLUVWwMgqSZ
-BVBDkykYxd82ATRjfLj3S75nIj70vgb6vGUpm9CommhvRuUIwi9GAp+LzI5f9w1n
-1i9azD5QQ+vNVmfXX61/0ehq
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCfj80HzPbxXF4n
+Y6my+bzeyOTh1kaSiTVn1CUpL1VxNlSx9g3aesJQSrSYwaDuYWxtIGBmrRrUihFg
+/QKLXJxqqqDSIsaW80CwGFrNWCeWvo4/WP6GR/kkYzuv5mBUX/S7uL0QUCBWzwTC
+oIf9ETmnSgnmdwaI1/3YoH+4bSv95bNMXf9Ua651Zw5oLOzwwwxZjiJgOmblBZSd
+Lt679VZbV58YfpkQSjrducAWxBm7rFnIo4p5qWGIMtFVq2KAWpmHqgtCgRySXj5D
+NYpbGxwAe4y5PL59Zgt2G2sZkgES604ObNQy6mJ9LLvRUy32c9wPf9n3dqCzAwm5
+TmGS2m9RAgMBAAECggEAN0loTW+jkPXkWdmajz0hSEBrriTExzlrm9JSHfccr+GX
+byJGopEuiwuXj+ZnkLGJFDbHsEwCo+pDI0wHeapZOExu9OC/1sXqgBmwPL5y2TSl
+rSV066o8lXMc11IHxrSI5BKaz/AzhGCu0ZE2DWXOiDyLOaO8S+YrOshB+PE68Pn9
+/qWZWSWfOz1nsWYX8ywu+R3o3Kw2RPfn/9lruxjJCVntuCvZcWmaeiBvxMdl6jeC
+LvlT6+4tkE/lEA/sgRMow5hD2aysSnsIkcorwwZZiCkMDVavNXlwZJxIaHH8nnad
+2Pxaf0PP/BXfTIKHinuzprVwQ9WeKukbJv/pjNw4AQKBgQDSUBE1L7dAynXFLA2M
+cYucpuWTMqjGt8uRQ9ceDXE7Z1SGiawIPCgmHofoB0mgddRoRxWm5iNupPRRXvpw
+hBT4I0LXbMjCuJIO7TSeRJsWSSUqEaZL0dFx/5GmV3NACkTR/4NRikILt2o3L3pP
+9Yv7efRcRHUgZLW3cb6P1e8CIQKBgQDCOVy1O9mJZBeg0kQonGQZu814GxFvR/zx
+z2Islh0+axKS3/l2v/yb5bepjfomdisjQxqvhfim1KmJv+P8TP6WFmxwjg85tUdk
+oMi3oTszxjCA+eYfj0yQGv0+UvseGm0TfTuiAO0SL3CWu7XrKN4ob+yLnnd9gQqP
+BTGQ7cgnMQKBgQCQNYq3F5LmgG0k2EIqDSmYLvC4cEI+kISrhQMafkkoXIAfCIPH
+2cgF62Vxep3Hw0P0hNmZ6bBeDAnjSeccA9WFGFia8uLucjTku04bQBu/ukQbhqKq
+1qJxMrciglBqlx/9huD6pn0HH6tbT9jkvxBPTZ57Lg3KOoRH11y9sAoFQQKBgC9O
+v4kZYdw4OBUhAh8OMMef2eVGWpHLbA4OIHCRw9+/Ps/tpBrLmqDybDDtdx/FKq61
+GpvkOvOP2xfFWKfMrTorjhBAWe8Je7FEBH/N0tjCjm/r7qSDR/fVyxdSKP5lG2pi
+15KXPSdvzLG6WQ5Fbw9Ua756Q8qbEtJRRohxko4RAoGAfzCC1wa5zzmeY2/B0vqs
+ul1AKBD7h8XbgKmRrEn9OAQShAxdwQKQa0N9DQh/N2K4s3pfrUMY9kLS6wxBfD0T
+xt5K6vlqxnDw4swbB86SbP1gvmU/nL0dnLfqZJsAsNzMqRxhWjc5ysm0IXKV1O1q
+Bm8Kwf1jENRP5Lie83AW64M=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/trusted-ca.pem.digest.sha1 b/jstests/libs/trusted-ca.pem.digest.sha1
index 44694a1db8f31..8fa1c7ff42115 100644
--- a/jstests/libs/trusted-ca.pem.digest.sha1
+++ b/jstests/libs/trusted-ca.pem.digest.sha1
@@ -1 +1 @@
-199E4C0E9DBAC3EDDA339125CB8ABBFF8DDF8442
\ No newline at end of file
+53A3A10554962C1188FED99A346C0BD4F43B3A27
\ No newline at end of file
diff --git a/jstests/libs/trusted-ca.pem.digest.sha256 b/jstests/libs/trusted-ca.pem.digest.sha256
index d01ec86644853..04db01b02639e 100644
--- a/jstests/libs/trusted-ca.pem.digest.sha256
+++ b/jstests/libs/trusted-ca.pem.digest.sha256
@@ -1 +1 @@
-2A513BF013D0031D93537E82077983549D3A462EF43C9AA32D15BC6A3D13EEED
\ No newline at end of file
+415233491DF74176550B70F1D87272991A4F09E560A06315F6845E55631CB50F
\ No newline at end of file
diff --git a/jstests/libs/trusted-client.pem b/jstests/libs/trusted-client.pem
index 4589a236f233f..2535fdf114cc8 100644
--- a/jstests/libs/trusted-client.pem
+++ b/jstests/libs/trusted-client.pem
@@ -3,52 +3,52 @@
#
# Client certificate for trusted chain.
-----BEGIN CERTIFICATE-----
-MIIDrjCCApagAwIBAgIEL4rPNzANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV
+MIIDrjCCApagAwIBAgIEEsAWmjANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEfMB0GA1UEAwwWVHJ1c3Rl
-ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMjAxMjcyMTU5NDhaFw0yNDA0MzAyMTU5NDha
+ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMzA2MDkxNDI4NDdaFw0yNTA5MTAxNDI4NDda
MIGAMQswCQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5l
dyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEj
MCEGA1UEAwwaVHJ1c3RlZCBLZXJuZWwgVGVzdCBDbGllbnQwggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQC/O+/T5vzk4N45xzpTy7wJCA0Vqrg4rjMW38Ne
-L03rZI937BN1LFhvSVDp3aKSdA8dgA1oMZxFbNhYw1C73PUKP3GhP+u+Fd4aUwUy
-mkSGYPW4XIESDcc9hVRtWDM5PH2GtMxuT9xOLJqL4FIjzBjjuQpI5VltF9gdgheX
-kpCUhk3A3XWqoyQ7exnq8rxfcvgmto+NUtuIjaHxbuDIzm+tCwS4LFgYVuCEc3G3
-MwIFW7VNJyMYdNhejBfAu0i0w91VhvWg2s2fEG0Xcel5TsP8a3dYGKaCVZ3C2MsV
-fQf8/cI6X6nn+vcfFvtAQhGAGfD/ry9rOuXELElb3cezO2Q3AgMBAAGjMzAxMBMG
+DQEBAQUAA4IBDwAwggEKAoIBAQC3N+lL+aVjQmmDjnVLaO5c6SFs/kT17q3cGx2r
+VnhkF/nXUGTn9G9CwL2YR89IFFxM5pzPXGXX/MQzHZI1fief+2qGWK8z/fd4XKar
+jLmyQ2qFUz2zCN4yMlzHJI8Jr+wzyylz6wfxjyPaC9Jd7t5gKAVMUvNNPU9UamDU
+bhxFk11E9ot0/bToAf0sv4wfndt+xA2AWtt4Vm+F44zuptL1C+UXqEXtY+t+j3jZ
+iw+/rbRaKksawgWb9zn4SvmOLVy7SR7IgAAcI/SEPG+dSXlyDD9BHRR6YbwcQPB+
+V+IHo3R41d8mQ2n6FMTdrv342G0CDuR4sH8c3fmbP1mqNqmdAgMBAAGjMzAxMBMG
A1UdJQQMMAoGCCsGAQUFBwMCMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATAN
-BgkqhkiG9w0BAQsFAAOCAQEAKtc2fA9kFJb8/bxJidpQjUAqiRgFOW+VYA2DTpQJ
-5+2mgEFutClVGjEdcmHseO14a5l6W2KSbJAYpv2ZIhFuBXpPsHk16H7j9SR+RmNp
-NkFhqWbEhdOVm+KNS/4M4wRl0s5RnbXpPvxdvwGIdcplUck+cFWtcSCU1OZYZL3z
-F4nN+pdoydLUx5zzHMkQSoFj8Hwv9IfIXhDFLkOhvGE8BS0r7dR2q2TEzA2cBg0R
-KEqDGzWRAuAZv78Cg7TfIVgTA0DYaZIL+6F3QK8UzFrQLScbfKCXUJudD/P/AHvj
-s/RYLhJGDEOjHov5Egixkd9DPBtq91N3eCzdSyZrIGQ73g==
+BgkqhkiG9w0BAQsFAAOCAQEAQ5OWvZJ/WIdj6/3DTPGa/l1bwPLHtPs0EZVwO82f
+JwIZGk+6/WNVmnejgoqp5cSJUzeJ+9qodoqmIySiQYsPQ29t/6WBhfUkWQ45Tc9D
+uMYfWiJNyIZGZSvimg4LU3XW0Wh+KNOE0WTiNn6+EpP5Bd3mpp7cqKrBsglUiuyh
+ECC6pv9kknBlMWRbtvSxbwSNnuoAoD4ACWiUIFl9cE1ot7q1Yy9tP6PUcd4ma2bk
+t/CmtA89+8+8b8euElCsREeESwHViuH61cHVA5L1MdRGJ5gkITzau1SJuB44s4Oo
+u/VDOqjAgwYx5P0clbareAAFu6zlarFN3b2Udr6T9O/FEQ==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC/O+/T5vzk4N45
-xzpTy7wJCA0Vqrg4rjMW38NeL03rZI937BN1LFhvSVDp3aKSdA8dgA1oMZxFbNhY
-w1C73PUKP3GhP+u+Fd4aUwUymkSGYPW4XIESDcc9hVRtWDM5PH2GtMxuT9xOLJqL
-4FIjzBjjuQpI5VltF9gdgheXkpCUhk3A3XWqoyQ7exnq8rxfcvgmto+NUtuIjaHx
-buDIzm+tCwS4LFgYVuCEc3G3MwIFW7VNJyMYdNhejBfAu0i0w91VhvWg2s2fEG0X
-cel5TsP8a3dYGKaCVZ3C2MsVfQf8/cI6X6nn+vcfFvtAQhGAGfD/ry9rOuXELElb
-3cezO2Q3AgMBAAECggEAcIotUV8WZPuQzB/ay4WSWx5J1P5q+7BIkKWOq9ba3DSJ
-2eEsckBuqs9Sts6f5eA3JP0+5pqLhK/RgisvT99wtV8w19xuN6dW1dcVr/npacuV
-z7Fgo+dH4YSdctv9CSn4FVZBGIoW9Ep7iOWycS6jVyCGVO/j+LeXj4YHrEOsojqC
-+YRTv+LbY+aWWcL7SuiATIEqWqFvjqmW5sTtmblUG/TtW/D8XB8UQdwEbfQsZx0j
-uVxDXx4qLGaEOMjE1DQpzTicNQ3z8r4Ij4egfUj71TuByrVCqn4PFcaq74U/1BPA
-C9CMm8qdD79qpLmum/y4SyXgahajQHLIVgnAR9y+AQKBgQDiaGqpTL6Q02TEtV8X
-B2yt4Ep5kiNfNKTqXjpIuOZGJOIJoSWt5UZ0fCcXfza8/cirDtbQ9ZXubRGP8kwD
-uAATCWY+cpjKSbz+tIxa3C9S/mL1LZY9AZRJMpWnNF3hTCCVBhjQnqIDxWzW8Jbr
-AubgCp/2EMaUi5nGXIpU4UwGVwKBgQDYOpw8OP819QET1vEfixUjGz3KExvgecMw
-2vj8U2fHQomrRfpzGY2GhgollR7gONeFgDZSsqh6Cts5Kx2rR68KfMCX3dk88Xub
-U3FszxTCugPlKH90tXrNPJcI+Bz6cuynmt0syR0ZntNVFWNV7Hoj3LLAY7NCdwg7
-QKBZlhwlIQKBgQDQsiMvb5hxcwp98Bl45tUc6ZR8v2Jvjd0+VCExi45ntfPNoIdU
-5fStxwtZs1/Hkb95PjJxZw8POeZoY5YCD4eyBIYEpimEvbfCqLZ/wlq1C9w32A+W
-qHABkOk1uSWYWU3nUDlrg+4fv8n1zsuuUXxzpBeTAB+sKYpTuFWirBIh/QKBgDGf
-QbuQWQFI+LZU3YEfqfokhkmZmQwq5WCQ4BMIEQjpfC6SIKfJdXEp0apOToemg89f
-XRgdaAyZ8TVtb6GfEcyWVJyFjRUvVe6Pd7hAzcLibYJpiNZ/z27KON8WEZBoT2cn
-YxqkDMmUaWtdPS661kzkmSENwFXAe2Mdsa7dhBgBAoGARAs0NF71RQniqTvSy+SB
-GJ3/sjCFrs66IbejqtbiF0aWjpPoIGRLh+lOYzSpeWGX3blhs6YkzYYcsamYVdaE
-ltxC5Cw3eMke2Z8X3jrLyoGJUGwZzrURxcWvWPJsbUJAfu804htLAt35aHdmWGKT
-mVeQMft12zXHLOIbQKCN4Po=
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC3N+lL+aVjQmmD
+jnVLaO5c6SFs/kT17q3cGx2rVnhkF/nXUGTn9G9CwL2YR89IFFxM5pzPXGXX/MQz
+HZI1fief+2qGWK8z/fd4XKarjLmyQ2qFUz2zCN4yMlzHJI8Jr+wzyylz6wfxjyPa
+C9Jd7t5gKAVMUvNNPU9UamDUbhxFk11E9ot0/bToAf0sv4wfndt+xA2AWtt4Vm+F
+44zuptL1C+UXqEXtY+t+j3jZiw+/rbRaKksawgWb9zn4SvmOLVy7SR7IgAAcI/SE
+PG+dSXlyDD9BHRR6YbwcQPB+V+IHo3R41d8mQ2n6FMTdrv342G0CDuR4sH8c3fmb
+P1mqNqmdAgMBAAECggEADopIYHgqcOsnVoJqupZUPFlgBrdTH7VWZN5eB2fwW/kv
+IbBGocXbQo/rO/eO8qhy/sNadHZfurlblLbnEIm3eVHJjWniVZz1E78+luSvI/+H
+p5cIkXnFsHQguski9ODaPO4FlyZy1e/HJ5nCkyPO0BC9AuaeWmXgMtHSZ2lpovfB
+dPlL7KN55AZVSvB5enSb12m9G+RTygDWAKRiV3oule1yVnr7+wVrROoZpLGEzy+2
+5iLmrENWwDaGPg+UYK7dbKgYoU71Rfb1un7HTN4grcsJDaJSdho/ESxh3jXdD2Zi
+YJKGyUDxgJwLvgJmYb1adXwKwFAv//43Re9O8Ed3HQKBgQDq8pMqmnxpsHHvM0kT
+d52vIFNGP3GbMjZ7bQqA18nv08HXetcNnEu/hOkvyEvl/setLQw6ZXAYMlRFaKq6
+9gx0z9kP2MWoehno/1sroJuHcAm52cs7h10E80O/QblVw3/wDQ7N/xBwNRENc0t/
+tBiOE+lX2aCjjVgmPG9jFnEIewKBgQDHorrAS/1ay6iZs0jfMAtIX/AWkTgFVvoa
+HyEPDKX0Ae8HqQ65U1bEWRJMeUrm+YbH1fTTw6SRvEP8KVpUI+e0mvwHlSG6Rrjk
+6DuYw3aX/GmltYOOaEcn58xDqmVjf3pOLmdDw3Db3XNVQ/0xLl3pIIZOiSlWwK8o
+kKUZANOWxwKBgEjjsNyKyQZRA0fedVFgzr7CHJOyoyAu19A66ANI7xFEdOLQ7V7/
+mlB0f5OfZEyaWoBTdXO4fKpFEvflnPIb24lx4jmUWt4F43vxnOtQDHBcRegutWpm
+eGlMvzvavBDrcgmbQ3iNJSBqljvCBS49RVDuxHnIvpl/BFX4ceYf1Dl5AoGAMPw0
+GNVMkIfikfxVCDbQveyUi+UMOvsxmVFEGl4/JlWHInkQHNdfNgSpIcytXWmi8cwF
+LCUYb44jnG9FM9ovk7hn2TB1uzB5U+nMHdrwcbDE951Hb50UwKz3CS95e2WfTCnE
+uvLs8XYwQ57QNM4syvRpIEI1u4zCLhD4Ad+bC6cCgYAJ9mxLDp20ZLEI6nqxaPwU
+ddlcFjowbkkjchz8w5aFjkySuDe5AzPPh7HeGvn924ahRPbSXzmSIovc8UKFKQKs
+zcxWlbF5EADBxzFbojqHvm0INEIgDlcUuOXb9XwcGk2Si9fzTWiY9O+3Zh+iRE+M
+VgMoS3t+wWZnahGy5FSDvQ==
-----END PRIVATE KEY-----
diff --git a/jstests/libs/trusted-client.pem.digest.sha1 b/jstests/libs/trusted-client.pem.digest.sha1
index 246c340c1c99b..e5cdff63e2b6b 100644
--- a/jstests/libs/trusted-client.pem.digest.sha1
+++ b/jstests/libs/trusted-client.pem.digest.sha1
@@ -1 +1 @@
-C142DB298EC99AB0277EA16903D001DF297A7F1A
\ No newline at end of file
+0A3CEFF09FFC8F5978B32666F039D8E2C061BB3A
\ No newline at end of file
diff --git a/jstests/libs/trusted-client.pem.digest.sha256 b/jstests/libs/trusted-client.pem.digest.sha256
index 7eb442a40ea56..f79bd8e829d67 100644
--- a/jstests/libs/trusted-client.pem.digest.sha256
+++ b/jstests/libs/trusted-client.pem.digest.sha256
@@ -1 +1 @@
-19998D06B253E27ADE91239F6EC9B94329EC16369B09D6FDFA35FD2685652027
\ No newline at end of file
+330CD2E18004BE2479D58988108A9B7DBC1A4768DBA2A512C956C8FD3DB5F52D
\ No newline at end of file
diff --git a/jstests/libs/trusted-client.pfx b/jstests/libs/trusted-client.pfx
index c1d9bea7f948d..df405f69c2937 100644
Binary files a/jstests/libs/trusted-client.pfx and b/jstests/libs/trusted-client.pfx differ
diff --git a/jstests/libs/trusted-server.pem b/jstests/libs/trusted-server.pem
index 0895bd191b516..d52b3a3e2b94a 100644
--- a/jstests/libs/trusted-server.pem
+++ b/jstests/libs/trusted-server.pem
@@ -3,52 +3,52 @@
#
# Server certificate for trusted chain.
-----BEGIN CERTIFICATE-----
-MIIDrjCCApagAwIBAgIEOQrseTANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV
+MIIDrjCCApagAwIBAgIEALTCNDANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV
UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO
BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEfMB0GA1UEAwwWVHJ1c3Rl
-ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMjAxMjcyMTU5NDhaFw0yNDA0MzAyMTU5NDha
+ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMzA2MDkxNDI4NDhaFw0yNTA5MTAxNDI4NDha
MIGAMQswCQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5l
dyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEj
MCEGA1UEAwwaVHJ1c3RlZCBLZXJuZWwgVGVzdCBTZXJ2ZXIwggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQDD4DY+/aqU2WCuHbCJete03bAbdYzQHGDlbfql
-0GcXON5aJEUwCyXbnKZjcxi55vxItF4KTau6ipiVXq+ukt8Maixi8gifAIYUVfft
-Naiitn2JubzfatjKfCVpQI/JnFOhugSw/obJPhAqw02g4Ul6tDvoqa7rC9wRmKXH
-TyRr5xkrRi1dNhWX7s/hvRYwoMX35ipKhW7RepHxG6byLvKlKWCMgDhzIphNRNYp
-DopzzvvL9Of2siab51lzFdfYZ4cMGZt3o+uY6Kmb63glv60Omf21x33hGnU2ucrd
-yYxgF43+8J8RJeVGGzD3sjE2ZTI21ZUQM3/BLT+PnHgB1u2XAgMBAAGjMzAxMBMG
+DQEBAQUAA4IBDwAwggEKAoIBAQCn6mB0JG5+uY2Qv2HhlqvBAJr/caKuedeqWG9I
+SDbtSR+slY09fpmjKxC1UdsgkXJwKfC1Mqcdtj5PgdgGuzcype+7+eEcbd4yUuuw
+kPnMZNYTbGSEH160NWjfOCWMTxRLDrYVRCJTw5LkCD5QA2mNS3SldqxZQ/E/Cjw1
+5BgurjC3HaENcvwgK1vHvm15bt1TF5YYEC9vfo5pMeG+9MXeSLJ5DgBwcMPjKpQ6
+9HBAE5/AzpVqQBTUPECxdfPKxd1652jfkvja8vBKVxkWIK4n8jy/adrju1ci/7lU
+S/ec9hIL4pEk0LuXFBmtkLrSUtYJyrV/4rhwXlmaUjv4FRhZAgMBAAGjMzAxMBMG
A1UdJQQMMAoGCCsGAQUFBwMBMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATAN
-BgkqhkiG9w0BAQsFAAOCAQEAtGG7O9KZ6fO6PByz/C9i0O6ClhkKuq85kaH2hCpA
-apJ2eM1LcfmJ9DgiMmvN6QevTnkkJG42MXqKVbd5mglnmGPRFgQtqIkFS4ERKvj1
-3/IjwvPP1k0rCbuJQ6J8tmEmZDIATYWd7qauH0K2KvtH9sjB+9ghTfBZzIx2Y5We
-+k4ETF5p1Kqu7aBmEXOfcldh9iedqkDUNP7fWQKXPDJ8a5oFVTkAU2MZQCczOp2h
-SNdj6jb8qaWoUp4cVs0sswC/nhI4DbNrfQXmRZisMuywRP+gzH5vr9VUsRvHAan6
-orla7WPITYTuWf8qV2grNJwQaN0AhNktXl7U7h3kIDd5fg==
+BgkqhkiG9w0BAQsFAAOCAQEAbBag9QSwjtviJ6LefGx7CvyOtf4z1Lz/Niw6o3lS
+sBE/UPStas65VRS97lCIrQ73Hm0Kmvf7lQ6pkyhC/fcxiWvs9oFLNCtNBREDtWzd
+yywJhwA8tKorFnKGKuguL9zUbEnlh5H8a4dLkNSwPf2+BkJGeAKo0LZUOzBTJkrt
+H1glT/ABIV/ApmHtnX0guhvtGU3XPUDdfNvDgdW1UgxWLzBzKUYDGoyHvUj9VW2i
+U2QaM4NiYnC6lCMFf69w7sDh75NBKn4QoZB5oL/lyt2Y7gMGfQU4pBatQSA+u2Qd
+gzIkpzECim6JnX+SnOYIuKAqYZazoY5sBFx0c+iUT4GdEg==
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDD4DY+/aqU2WCu
-HbCJete03bAbdYzQHGDlbfql0GcXON5aJEUwCyXbnKZjcxi55vxItF4KTau6ipiV
-Xq+ukt8Maixi8gifAIYUVfftNaiitn2JubzfatjKfCVpQI/JnFOhugSw/obJPhAq
-w02g4Ul6tDvoqa7rC9wRmKXHTyRr5xkrRi1dNhWX7s/hvRYwoMX35ipKhW7RepHx
-G6byLvKlKWCMgDhzIphNRNYpDopzzvvL9Of2siab51lzFdfYZ4cMGZt3o+uY6Kmb
-63glv60Omf21x33hGnU2ucrdyYxgF43+8J8RJeVGGzD3sjE2ZTI21ZUQM3/BLT+P
-nHgB1u2XAgMBAAECggEBAKRUSG5UVnYGYOvykJbmbm2YxxRibD50cH5K5EuUKcj7
-tt+dKkaCM8KpC5bHiuj79o/wcizmuLoJaLV3+J/XLDW4JYDlWYI76EDLVvK1X8MS
-owOWxfQSKcrGmIRS287ize8Sprju8JmI7ftSWqAsIX9GEjTBlrQvAPPqQrqRlRbI
-E3pZoWO2EJ/WjhbLUCmet9AIm+hfG6/CnnzvH0CIYDr4/CFaI3dzfD4Oeeoe6gEs
-KE/I/q1ya7JAA9o+wVNL3TpJ293yXSXxX+YcMSgrSpgwadkkDr+8AFplDwfJx1uA
-UTJs5SMeKym+jrQgxIAJFjLEuh3fClAfe+TgC1Pwc3ECgYEA9to/lMS99qwLX6aU
-hFeCeY199O2qtsGqG7Fb2R0LXgxbux9hAIFZkzAOma3ZUwvMJOmwO8K2CVGtGNfB
-uC5V6OSes7u73FsHzg6nVg5f4J+6V69QT3UhDNxmEmykrZ/3y2yrGBJdARuezrA3
-B/BWjvm1x2ZQHNOzliyWzWDgYmUCgYEAyyJgSFGWd60GqEGFfbhh0GvIhjAAnBys
-Rha2Y3rxfwhzP4We9WlBawTbdi+SIeKyoIUVvMILspadyhJCqlmKjVX2E34ejvJr
-bqrZsucWtDu3pLbdpxIgkvadilyPeFuJLfSbevuHWePynjfgXNTLqeXKao+hxbIx
-8uUo2MoDEksCgYEAnkXpJlXPBu3gjP+dkg0Z1x3leLk6D9u0WfUp0tdQhoid+Chy
-ZYSKDlltwxM4mIqj5bcADBEX7nmz5o3P42uyIZUUPGFOXkbvhirXF9I3nypKrBX4
-BDoxarGVsJKAM+KxWnjeapy7jQ5MkHFjl2990EZDSwpKq3EBYHRObHiE5hECgYEA
-sh4wgvlXkRTdM4hQf07AJjt8l31b64eQ39xmjZBuVc+ZDugh3FsxC6A0t8s6vg/E
-RdCsoNkd3LkJHvkoD6t4PDpWDf2W2g1jeqTBQ4gydlOu0fWvIiJZAcTRm1NSUl0h
-SnSkKRCJPJ8OTQH98Ch/dTiTZyEO3wVDmxKp4H4yd0UCgYB9kUwKpSJZmOE36y6w
-mXT1YUK0lR41v6jDEJVBegNChVuXuFk9hEooyCmOVRRGGm96Qu+tAzVZcoya8+LE
-YJJQD3invZW+IJhrwKlgDCqSqaiAfOz+Yw/H/vUaohwjT6okgrC3uzEYnneL1DA/
-7XxoLcbhaN51O9KJ/U0jZ4iEQg==
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCn6mB0JG5+uY2Q
+v2HhlqvBAJr/caKuedeqWG9ISDbtSR+slY09fpmjKxC1UdsgkXJwKfC1Mqcdtj5P
+gdgGuzcype+7+eEcbd4yUuuwkPnMZNYTbGSEH160NWjfOCWMTxRLDrYVRCJTw5Lk
+CD5QA2mNS3SldqxZQ/E/Cjw15BgurjC3HaENcvwgK1vHvm15bt1TF5YYEC9vfo5p
+MeG+9MXeSLJ5DgBwcMPjKpQ69HBAE5/AzpVqQBTUPECxdfPKxd1652jfkvja8vBK
+VxkWIK4n8jy/adrju1ci/7lUS/ec9hIL4pEk0LuXFBmtkLrSUtYJyrV/4rhwXlma
+Ujv4FRhZAgMBAAECggEBAKLg24ztZe0w6hw1OKoNcC83+iG8xbP+5m8Ld26i9fy3
+yKytKKM08azv8jYf37G4xEv+ssnhB2/vvoN7DOSCp6lOgrxRR8Y+4KDqBEtyZYyZ
+Z3rR0rLChyfBAyg7m1h9wM6Jx5/bRn0AIbVmNaOBm9p+DfkOo2G3YQToVb1ksvxb
+UfnGBuSL6qXL8ABuL247he1AY3w0yDa0DQcYd0YjAebQjavbN3i1xAdKN+JFBHIS
+4gviMOAk7WoPWf6k86CMXHUexyqvti146AdgvAqUjtlnuzjDqCyyuIvwxET4f5ix
+pYhEK+SOht7sA+JYdtBZiDLdmMKXmNX5tfGL5i4oGHECgYEA30oKGXvZNmq4C/7R
+ndWPxuQ0NkX3pUiGjN66flNf57leHffd5iQclGD3QIR0YTpDaAZ6jF3qV10kTZ9p
++UuRA6Kd7GEyV2GYxMmmVzvA9QALDN/D8dCCJc+PO8976SFj8wllMVG41aTTtzqS
+tr59FKlUEgE1EZgEqS+9RDoayN0CgYEAwIOr9vYM/vITEDSImpxp46+Wr2+cUkJ2
+yffAwTHE1D8fRn8AbxMg5AriOw/LkOKD8zGBlORpf5D76dEE5NiEZGEq6o69WEM2
+hmYa7mcCnhEB3QIYcUIuvuRo8Fh12YG/SXd3kA6WInt2taEanGhFhsIfdKwW2/eH
+dtuD+pz1l60CgYBVmWitxFDND7RmxNVPEKQBt1JczA3YCympr2kHLKdDikiObItg
+ws9CArpGvYBwFYnpwzIPxaEkxMw6Bbb2nwWEwz6Pc+N8pCmQp01he8LJKa8SzGWt
+uiVqoVtjrnLuOKK8dQmaEp4tKPcQ8x9zdys0VIWqMVRK3mBLC8Ye9bd28QKBgE1w
+Kaori0q6IUTxfMmEhWua4+gp4x2LsrDHQff0hxJBWdlHmOsVLLPOVPYhAmeDVkRq
+847q2i0AKvUAqRFMruSZ2WOEi1GHp9UkGU0wjnL6sF8wSpi0YI1U34lea/lUIZfx
+wFxoIag5NaUV6thjcSQlzfVmi8NrrEf5QPt8S6X5AoGANaCh2qtSZisawT6sjNpI
+CwD1UhR45iYUaTx5VRDa79PK7SY7Hw5nCfzdo77I7Plec/OgptbAxJOmVtx5Jgc1
+TcvX3eaJHD3Wz1nKoRfQnR4e3sp8/SJs9QiNieSq8jwyKmvhILl/UmfHrg0PdItZ
+QX6Y2VLLEayhllPo5hiUWLo=
-----END PRIVATE KEY-----
diff --git a/jstests/libs/trusted-server.pem.digest.sha1 b/jstests/libs/trusted-server.pem.digest.sha1
index 4a0f953a8da10..aa8559b350ed1 100644
--- a/jstests/libs/trusted-server.pem.digest.sha1
+++ b/jstests/libs/trusted-server.pem.digest.sha1
@@ -1 +1 @@
-50922DBE6EFF8BDFB3EA8054EEF9B2A090B9E83A
\ No newline at end of file
+4B9818A479578E91263E92744C3E77E73311C3CF
\ No newline at end of file
diff --git a/jstests/libs/trusted-server.pem.digest.sha256 b/jstests/libs/trusted-server.pem.digest.sha256
index fb2bfdf3fcf51..aede4d3a54586 100644
--- a/jstests/libs/trusted-server.pem.digest.sha256
+++ b/jstests/libs/trusted-server.pem.digest.sha256
@@ -1 +1 @@
-3928416446FFAFFC95B07E9FE82F223F2CDD01689DBE513172B7A2D27904B616
\ No newline at end of file
+2C0022083F31AC3DA66694713E26A7D2C1F012D880E85244D4B6ACC975652CA1
\ No newline at end of file
diff --git a/jstests/libs/trusted-server.pfx b/jstests/libs/trusted-server.pfx
index 57b9c3080196a..57cc9f6c0db7c 100644
Binary files a/jstests/libs/trusted-server.pfx and b/jstests/libs/trusted-server.pfx differ
diff --git a/jstests/libs/ttl_util.js b/jstests/libs/ttl_util.js
index f17b89e404533..723170cae2215 100644
--- a/jstests/libs/ttl_util.js
+++ b/jstests/libs/ttl_util.js
@@ -4,7 +4,7 @@
load("jstests/libs/fixture_helpers.js");
-const TTLUtil = class {
+export const TTLUtil = class {
/**
* Wait until documents inserted before a call to this function have been visited by a TTL
* monitor pass. On replica sets, by default the function waits for the TTL deletes to become
diff --git a/jstests/libs/txns/txn_passthrough_runner_selftest.js b/jstests/libs/txns/txn_passthrough_runner_selftest.js
index 70bc723dc953c..9c69e9c86a1b2 100644
--- a/jstests/libs/txns/txn_passthrough_runner_selftest.js
+++ b/jstests/libs/txns/txn_passthrough_runner_selftest.js
@@ -15,20 +15,12 @@ db.setProfilingLevel(2);
const coll = db[testName];
assert.commandWorked(coll.insert({x: 1}));
-/* TODO(SERVER-47835) undenylist
-let commands = db.system.profile.find().toArray();
-// Check that the insert is not visible because the txn has not committed.
-assert.eq(commands.length, 0);
-*/
+
// Use a dummy, unrelated operation to signal the txn runner to commit the transaction.
assert.commandWorked(db.runCommand({ping: 1}));
let commands = db.system.profile.find().toArray();
// Assert the insert is now visible.
assert.eq(commands.length, 1);
-/* TODO(SERVER-47835) replace above assertion with below assertion.
-assert.eq(commands.length, 2);*/
-/* TODO(SERVER-47835) uncomment
-assert.eq(commands[1].command.find, 'system.profile');*/
assert.eq(commands[0].command.insert, testName);
})();
diff --git a/jstests/libs/uuid_util.js b/jstests/libs/uuid_util.js
index 796d294942843..a9df57db5666a 100644
--- a/jstests/libs/uuid_util.js
+++ b/jstests/libs/uuid_util.js
@@ -32,4 +32,4 @@ function getUUIDFromListCollections(db, collName) {
function extractUUIDFromObject(uuid) {
const uuidString = uuid.toString();
return uuidString.substring(6, uuidString.length - 2);
-}
\ No newline at end of file
+}
diff --git a/jstests/libs/wildcard_index_helpers.js b/jstests/libs/wildcard_index_helpers.js
index 0977a0ad7be40..df712192fe978 100644
--- a/jstests/libs/wildcard_index_helpers.js
+++ b/jstests/libs/wildcard_index_helpers.js
@@ -1,12 +1,9 @@
/**
* Common utility functions for testing functionality of Wildcard Indexes.
*/
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
-"use strict";
-
-const WildcardIndexHelpers = (function() {
- load("jstests/libs/analyze_plan.js");
-
+export const WildcardIndexHelpers = (function() {
/**
* Asserts that the given explain contains the given expectedIndexName in the winningPlan.
*/
diff --git a/jstests/libs/write_concern_util.js b/jstests/libs/write_concern_util.js
index fff2517c75498..f02f7a65d8b14 100644
--- a/jstests/libs/write_concern_util.js
+++ b/jstests/libs/write_concern_util.js
@@ -154,4 +154,4 @@ function runWriteConcernRetryabilityTest(priConn, secConn, cmd, kNodes, dbName,
checkWriteConcernTimedOut(testDB2.runCommand(cmd));
restartServerReplication(secConn);
-}
\ No newline at end of file
+}
diff --git a/jstests/multiVersion/genericBinVersion/batched_multi_deletes_large_transaction.js b/jstests/multiVersion/genericBinVersion/batched_multi_deletes_large_transaction.js
deleted file mode 100644
index 6eac1f493556d..0000000000000
--- a/jstests/multiVersion/genericBinVersion/batched_multi_deletes_large_transaction.js
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Tests that multi-deletes in a mixed version cluster replicate as individual delete operations.
- *
- * Batched multi-deletes were introduced in 6.1 so a replica set running in 6.0 FCV will not be
- * able to take advantage of this feature.
- *
- * @tags: [
- * requires_replication,
- * ]
- */
-(function() {
-'use strict';
-
-function runTest(primaryBinVersion, secondaryBinVersion) {
- const testLogPrefix =
- 'primary-' + primaryBinVersion + '-secondary-' + secondaryBinVersion + ': ';
- jsTestLog(testLogPrefix + 'Starting test case.');
- const rst = new ReplSetTest({
- nodes: [
- {
- binVersion: primaryBinVersion,
- },
- {
- binVersion: secondaryBinVersion,
- rsConfig: {votes: 0, priority: 0},
- },
- ]
- });
- rst.startSet();
- rst.initiate();
-
- let primary = rst.getPrimary();
- const db = primary.getDB('test');
- const coll = db.t;
-
- const docIds = [0, 1, 2, 3];
- assert.commandWorked(coll.insert(docIds.map((x) => {
- return {_id: x, x: x};
- })));
-
- assert.commandWorked(coll.remove({}));
- // Check oplog entries generated for the multi-delete operation.
- // Oplog entries will be returned in reverse timestamp order (most recent first).
- const ops = rst.findOplog(primary, {op: 'd', ns: coll.getFullName()}).toArray();
- jsTestLog(testLogPrefix + 'applyOps oplog entries: ' + tojson(ops));
- assert.eq(ops.length,
- docIds.length,
- 'number oplog entries should match documents inserted initially');
- const deletedDocIds = ops.map((entry) => entry.o._id).flat();
- jsTestLog(testLogPrefix + 'deleted doc _ids: ' + tojson(deletedDocIds));
- assert.sameMembers(deletedDocIds, docIds);
-
- rst.stopSet();
- jsTestLog(testLogPrefix + 'Test case finished successfully.');
-}
-
-runTest('latest', 'last-lts');
-runTest('last-lts', 'latest');
-})();
diff --git a/jstests/multiVersion/genericBinVersion/load_keys_on_upgrade.js b/jstests/multiVersion/genericBinVersion/load_keys_on_upgrade.js
index cbcc526127417..d0b2236980597 100644
--- a/jstests/multiVersion/genericBinVersion/load_keys_on_upgrade.js
+++ b/jstests/multiVersion/genericBinVersion/load_keys_on_upgrade.js
@@ -3,7 +3,7 @@
// admin.system.keys on upgrade.
//
-load('./jstests/multiVersion/libs/multi_rs.js');
+load('jstests/multiVersion/libs/multi_rs.js');
var oldVersion = "last-lts";
@@ -53,4 +53,4 @@ assert.eq(1, rsConn.getDB("admin").auth("root", "root"));
assert.commandWorked(rsConn.adminCommand({hello: 1}));
print("clusterTime2: " + tojson(rsConn.getDB("admin").getSession().getClusterTime()));
-rst.stopSet();
\ No newline at end of file
+rst.stopSet();
diff --git a/jstests/multiVersion/genericBinVersion/migration_between_mixed_version_mongods.js b/jstests/multiVersion/genericBinVersion/migration_between_mixed_version_mongods.js
index 9bde5a6798725..9c85cf7390a33 100644
--- a/jstests/multiVersion/genericBinVersion/migration_between_mixed_version_mongods.js
+++ b/jstests/multiVersion/genericBinVersion/migration_between_mixed_version_mongods.js
@@ -7,7 +7,7 @@
// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-load("./jstests/multiVersion/libs/verify_versions.js");
+load("jstests/multiVersion/libs/verify_versions.js");
(function() {
"use strict";
diff --git a/jstests/multiVersion/genericBinVersion/minor_version_upgrade_replset.js b/jstests/multiVersion/genericBinVersion/minor_version_upgrade_replset.js
index a0dffdf776e86..7389b496f8e36 100644
--- a/jstests/multiVersion/genericBinVersion/minor_version_upgrade_replset.js
+++ b/jstests/multiVersion/genericBinVersion/minor_version_upgrade_replset.js
@@ -2,8 +2,8 @@
// Tests upgrading a replica set
//
-load('./jstests/multiVersion/libs/multi_rs.js');
-load('./jstests/libs/test_background_ops.js');
+load('jstests/multiVersion/libs/multi_rs.js');
+load('jstests/libs/test_background_ops.js');
var oldVersion = "last-lts";
diff --git a/jstests/multiVersion/genericBinVersion/mixed_replica_set_with_latest_primary.js b/jstests/multiVersion/genericBinVersion/mixed_replica_set_with_latest_primary.js
index 546ae94e95b7f..751b3a66dde53 100644
--- a/jstests/multiVersion/genericBinVersion/mixed_replica_set_with_latest_primary.js
+++ b/jstests/multiVersion/genericBinVersion/mixed_replica_set_with_latest_primary.js
@@ -4,7 +4,7 @@
(function() {
"use strict";
-load('./jstests/multiVersion/libs/multi_rs.js');
+load('jstests/multiVersion/libs/multi_rs.js');
const lastLTSVersion = "last-lts";
const latestVersion = "latest";
diff --git a/jstests/multiVersion/genericBinVersion/rollback_capped_deletions.js b/jstests/multiVersion/genericBinVersion/rollback_capped_deletions.js
index a01eef7abd31d..a477ba3fe1de5 100644
--- a/jstests/multiVersion/genericBinVersion/rollback_capped_deletions.js
+++ b/jstests/multiVersion/genericBinVersion/rollback_capped_deletions.js
@@ -47,4 +47,4 @@ try {
// The fast count checks occur when tearing down the fixture as part of the consistency checks.
rollbackTest.stop();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/multiVersion/genericBinVersion/skip_level_upgrade.js b/jstests/multiVersion/genericBinVersion/skip_level_upgrade.js
index 103d31a423913..221e353cb666c 100644
--- a/jstests/multiVersion/genericBinVersion/skip_level_upgrade.js
+++ b/jstests/multiVersion/genericBinVersion/skip_level_upgrade.js
@@ -31,7 +31,6 @@ const defaultOptions = {
// This lists all binary versions older than the last-lts version.
const versions = [
- {binVersion: '4.2', testCollection: 'four_two'},
{binVersion: '5.0', testCollection: 'five_zero'},
];
diff --git a/jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js b/jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js
index 3b9bbf90cabb6..492fb5d66af00 100644
--- a/jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js
+++ b/jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js
@@ -11,7 +11,11 @@ load('jstests/multiVersion/libs/multi_rs.js');
const timeFieldName = "time";
// Note that this list will need to be kept up to date as versions are added/dropped.
-const upgradeVersions = [{binVersion: "6.0", fcv: "6.0"}, {binVersion: "latest"}];
+const upgradeVersions = [
+ {binVersion: "6.0", fcv: "6.0"},
+ {binVersion: "last-lts", fcv: lastLTSFCV},
+ {binVersion: "latest"}
+];
/*
* Creates a collection, populates it with `docs`, runs the `query` and ensures that the result set
diff --git a/jstests/multiVersion/genericSetFCVUsage/1_test_launching_replset.js b/jstests/multiVersion/genericSetFCVUsage/1_test_launching_replset.js
index 69c110df30faf..5dfb74acd2113 100644
--- a/jstests/multiVersion/genericSetFCVUsage/1_test_launching_replset.js
+++ b/jstests/multiVersion/genericSetFCVUsage/1_test_launching_replset.js
@@ -3,7 +3,7 @@
//
//
-load('./jstests/multiVersion/libs/verify_versions.js');
+load('jstests/multiVersion/libs/verify_versions.js');
(function() {
"use strict";
diff --git a/jstests/multiVersion/genericSetFCVUsage/2_test_launching_cluster.js b/jstests/multiVersion/genericSetFCVUsage/2_test_launching_cluster.js
index 00e025e910db2..1e0b1561a7938 100644
--- a/jstests/multiVersion/genericSetFCVUsage/2_test_launching_cluster.js
+++ b/jstests/multiVersion/genericSetFCVUsage/2_test_launching_cluster.js
@@ -3,7 +3,7 @@
//
//
-load('./jstests/multiVersion/libs/verify_versions.js');
+load('jstests/multiVersion/libs/verify_versions.js');
(function() {
"use strict";
diff --git a/jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js b/jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js
index 976a2f01c52d3..56f5a7399da6c 100644
--- a/jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js
+++ b/jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js
@@ -2,8 +2,8 @@
// Tests upgrading then downgrading a replica set
//
-load('./jstests/multiVersion/libs/multi_rs.js');
-load('./jstests/libs/test_background_ops.js');
+load('jstests/multiVersion/libs/multi_rs.js');
+load('jstests/libs/test_background_ops.js');
for (let oldVersion of ["last-lts", "last-continuous"]) {
jsTest.log("Testing upgrade/downgrade with " + oldVersion);
diff --git a/jstests/multiVersion/genericSetFCVUsage/abort_unprepared_transactions_on_FCV_downgrade.js b/jstests/multiVersion/genericSetFCVUsage/abort_unprepared_transactions_on_FCV_downgrade.js
index 603caeac82632..91ceb52c68737 100644
--- a/jstests/multiVersion/genericSetFCVUsage/abort_unprepared_transactions_on_FCV_downgrade.js
+++ b/jstests/multiVersion/genericSetFCVUsage/abort_unprepared_transactions_on_FCV_downgrade.js
@@ -7,7 +7,7 @@
(function() {
"use strict";
-function runTest(downgradeFCV) {
+function runTest(downgradeFCV, succeedDowngrade) {
const rst = new ReplSetTest({nodes: [{binVersion: "latest"}]});
rst.startSet();
rst.initiate();
@@ -34,9 +34,20 @@ function runTest(downgradeFCV) {
assert.commandFailedWithCode(testDB.runCommand({drop: collName, maxTimeMS: 1000}),
ErrorCodes.MaxTimeMSExpired);
- jsTestLog("Downgrade the featureCompatibilityVersion.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: downgradeFCV}));
- checkFCV(adminDB, downgradeFCV);
+ if (succeedDowngrade) {
+ jsTestLog("Downgrade the featureCompatibilityVersion.");
+ assert.commandWorked(
+ testDB.adminCommand({setFeatureCompatibilityVersion: downgradeFCV}));
+ checkFCV(adminDB, downgradeFCV);
+ } else {
+ jsTestLog(
+ "Downgrade the featureCompatibilityVersion but fail after transitioning to the intermediary downgrading state.");
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'failDowngrading', mode: "alwaysOn"}));
+ assert.commandFailedWithCode(
+ testDB.adminCommand({setFeatureCompatibilityVersion: downgradeFCV}), 549181);
+ checkFCV(adminDB, downgradeFCV, downgradeFCV);
+ }
jsTestLog("Drop the collection. This should succeed, since the transaction was aborted.");
assert.commandWorked(testDB.runCommand({drop: collName}));
@@ -45,17 +56,22 @@ function runTest(downgradeFCV) {
assert.commandFailedWithCode(session.commitTransaction_forTesting(),
ErrorCodes.NoSuchTransaction);
} finally {
- jsTestLog("Restore the original featureCompatibilityVersion.");
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, latestFCV);
+ // We can't upgrade from "downgrading to lastContinuous" -> latest.
+ if (succeedDowngrade || downgradeFCV == lastLTSFCV) {
+ jsTestLog("Restore the original featureCompatibilityVersion.");
+ assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+ checkFCV(adminDB, latestFCV);
+ }
}
session.endSession();
rst.stopSet();
}
-runTest(lastLTSFCV);
+runTest(lastLTSFCV, true /* succeedDowngrade */);
+runTest(lastLTSFCV, false /* succeedDowngrade */);
if (lastLTSFCV !== lastContinuousFCV) {
- runTest(lastContinuousFCV);
+ runTest(lastContinuousFCV, true /* succeedDowngrade */);
+ runTest(lastContinuousFCV, false /* succeedDowngrade */);
}
}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js b/jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js
deleted file mode 100644
index eabdfe3e40ee1..0000000000000
--- a/jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js
+++ /dev/null
@@ -1,34 +0,0 @@
-//
-// Test checks whether the balancer correctly detects a mixed set of shards
-//
-
-// Test mixed version between "latest" and "last-lts"/"last-continuous".
-for (let versions of [["latest", "last-lts"], ["latest", "last-continuous"]]) {
- jsTest.log("Starting cluster with shard binVersion: " + tojson(versions));
-
- var options = {
- mongosOptions: {verbose: 1, useLogFiles: true},
- configOptions: {},
- shardOptions: {binVersion: versions},
- enableBalancer: true
- };
-
- var st = new ShardingTest({shards: 3, mongos: 1, other: options});
-
- var mongos = st.s0;
- var admin = mongos.getDB("admin");
- var coll = mongos.getCollection("foo.bar");
-
- printjson(admin.runCommand({enableSharding: coll.getDB() + ""}));
- st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
- printjson(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-
- assert.soon(function() {
- var log = cat(mongos.fullOptions.logFile);
- return /multiVersion cluster detected/.test(log);
- }, "multiVersion warning not printed!", 30 * 16 * 60 * 1000, 5 * 1000);
-
- st.stop();
-
- jsTest.log("DONE!");
-}
diff --git a/jstests/multiVersion/genericSetFCVUsage/can_upgrade_FCV_after_failed_downgrade.js b/jstests/multiVersion/genericSetFCVUsage/can_upgrade_FCV_after_failed_downgrade.js
index 66eaca15793eb..af1c209ae5614 100644
--- a/jstests/multiVersion/genericSetFCVUsage/can_upgrade_FCV_after_failed_downgrade.js
+++ b/jstests/multiVersion/genericSetFCVUsage/can_upgrade_FCV_after_failed_downgrade.js
@@ -5,11 +5,7 @@
* @tags: [requires_fcv_70]
*/
-(function() {
-"use strict";
-
load("jstests/libs/fail_point_util.js");
-load("jstests/libs/feature_flag_util.js");
const latest = "latest";
@@ -188,5 +184,4 @@ function runShardingTest() {
runStandaloneTest();
runReplicaSetTest();
testConfigServerFCVTimestampIsAlwaysNewer();
-runShardingTest();
-})();
+runShardingTest();
\ No newline at end of file
diff --git a/jstests/multiVersion/genericSetFCVUsage/cannot_downgrade_from_latest_to_last_continuous.js b/jstests/multiVersion/genericSetFCVUsage/cannot_downgrade_from_latest_to_last_continuous.js
index 37a931971aeaf..87dbea4ebca69 100644
--- a/jstests/multiVersion/genericSetFCVUsage/cannot_downgrade_from_latest_to_last_continuous.js
+++ b/jstests/multiVersion/genericSetFCVUsage/cannot_downgrade_from_latest_to_last_continuous.js
@@ -66,9 +66,11 @@ function runShardingTest() {
st.stop();
}
-runStandaloneTest();
-runReplicaSetTest();
-runShardingTest();
+if (lastContinuousFCV != lastLTSFCV) {
+ runStandaloneTest();
+ runReplicaSetTest();
+ runShardingTest();
+}
TestData.setParameters.disableTransitionFromLatestToLastContinuous = false;
})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js
index f72d4e764e84b..9d33dd7ae99a9 100644
--- a/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js
+++ b/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js
@@ -13,7 +13,7 @@ const testName = "collection_validator_feature_compatibility_version";
const dbpath = MongoRunner.dataPath + testName;
// An array of feature flags that must be enabled to run feature flag tests.
-const featureFlagsToEnable = ["featureFlagUserRoles"];
+const featureFlagsToEnable = [];
// These arrays should be populated with
//
@@ -32,25 +32,12 @@ const testCasesLastContinuous = [
//
];
const testCasesLastContinuousWithFeatureFlags = [
- // TODO SERVER-70689: Remove this case when 7.0 becomes lastLTS.
- {
- validator: {$expr: {$eq: ["$$USER_ROLES", []]}},
- nonMatchingDocument: {a: 1},
- lastStableErrCode: 17276
- }
+
];
const testCasesLastStable = testCasesLastContinuous.concat([]);
const testCasesLastStableWithFeatureFlags = testCasesLastContinuousWithFeatureFlags.concat([]);
-// The addition of the $$USER_ROLES system variable is slightly different than the usual use case of
-// this test file. This means that some of the following commands won't work/fail as expected for
-// the $$USER_ROLES test case.
-// TODO SERVER-70689: Remove this function and references to it.
-function testCaseDoesNotReferenceUserRoles(testCase) {
- return testCase.validator.$expr.$eq[0] != "$$USER_ROLES";
-}
-
// Tests Feature Compatibility Version behavior of the validator of a collection by executing test
// cases 'testCases' and using a previous stable version 'lastVersion' of mongod. 'lastVersion' can
// have values "last-lts" and "last-continuous".
@@ -244,29 +231,21 @@ function testCollectionValidatorFCVBehavior(lastVersion, testCases, featureFlags
testDB = conn.getDB(testName);
testCases.forEach(function(test, i) {
- // In this case, using $$USER_ROLES on the last FCV will cause the collection
- // creation to fail during parsing because the necessary feature flag will not have been
- // enabled.
- // TODO SERVER-70689: Remove the guard of this if-statement and keep the body.
- if (testCaseDoesNotReferenceUserRoles(test)) {
- const coll = testDB["coll3" + i];
- // Even though the feature compatibility version is the last version, we should still
- // be able to add a validator using new query features, because
- // internalValidateFeaturesAsPrimary is false.
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {validator: test.validator}),
- `Expected to be able to create collection with validator ${
- tojson(test.validator)}`);
-
- // We should also be able to modify a collection to have a validator using new query
- // features.
- coll.drop();
- assert.commandWorked(testDB.createCollection(coll.getName()));
- assert.commandWorked(
- testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
- `Expected to be able to modify collection validator to be ${
- tojson(test.validator)}`);
- }
+ const coll = testDB["coll3" + i];
+ // Even though the feature compatibility version is the last version, we should still
+ // be able to add a validator using new query features, because
+ // internalValidateFeaturesAsPrimary is false.
+ assert.commandWorked(
+ testDB.createCollection(coll.getName(), {validator: test.validator}),
+ `Expected to be able to create collection with validator ${tojson(test.validator)}`);
+
+ // We should also be able to modify a collection to have a validator using new query
+ // features.
+ coll.drop();
+ assert.commandWorked(testDB.createCollection(coll.getName()));
+ assert.commandWorked(
+ testDB.runCommand({collMod: coll.getName(), validator: test.validator}),
+ `Expected to be able to modify collection validator to be ${tojson(test.validator)}`);
});
MongoRunner.stopMongod(conn);
diff --git a/jstests/multiVersion/genericSetFCVUsage/default_startup_FCV_parameter.js b/jstests/multiVersion/genericSetFCVUsage/default_startup_FCV_parameter.js
new file mode 100644
index 0000000000000..e4b552b4dcffe
--- /dev/null
+++ b/jstests/multiVersion/genericSetFCVUsage/default_startup_FCV_parameter.js
@@ -0,0 +1,244 @@
+/**
+ * Tests the defaultStartupFCV startup parameter.
+ */
+
+(function() {
+"use strict";
+
+TestData.setParameters = TestData.setParameters || {};
+TestData.setParameters.disableTransitionFromLatestToLastContinuous = true;
+
+const latest = "latest";
+const testName = "default_startup_FCV_parameter";
+const dbpath = MongoRunner.dataPath + testName;
+resetDbpath(dbpath);
+
+function runStandaloneTest() {
+ jsTestLog("Test starting with defaultStartupFCV = lastLTS");
+ let conn = MongoRunner.runMongod(
+ {binVersion: latest, setParameter: "defaultStartupFCV=" + lastLTSFCV});
+ assert.neq(null, conn);
+ let adminDB = conn.getDB("admin");
+ checkFCV(adminDB, lastLTSFCV);
+ MongoRunner.stopMongod(conn);
+
+ jsTestLog("Test starting with defaultStartupFCV = lastContinuous");
+ conn = MongoRunner.runMongod({
+ binVersion: latest,
+ dbpath: dbpath,
+ setParameter: "defaultStartupFCV=" + lastContinuousFCV
+ });
+ assert.neq(null, conn);
+ adminDB = conn.getDB("admin");
+ checkFCV(adminDB, lastContinuousFCV);
+ MongoRunner.stopMongod(conn);
+
+ clearRawMongoProgramOutput();
+ jsTestLog("Test starting with defaultStartupFCV when there is already an existing FCV.");
+ conn = MongoRunner.runMongod({
+ binVersion: latest,
+ dbpath: dbpath,
+ noCleanData: true,
+ setParameter: "defaultStartupFCV=" + lastLTSFCV
+ });
+ assert.neq(null, conn);
+ adminDB = conn.getDB("admin");
+ // The FCV should still be the original FCV, not the provided defaultStartupFCV.
+ checkFCV(adminDB, lastContinuousFCV);
+ assert(rawMongoProgramOutput().includes(
+ "Ignoring the provided defaultStartupFCV parameter since the FCV already exists"));
+ MongoRunner.stopMongod(conn);
+
+ jsTestLog("Test starting with defaultStartupFCV = latest");
+ conn =
+ MongoRunner.runMongod({binVersion: latest, setParameter: "defaultStartupFCV=" + latestFCV});
+ assert.neq(null, conn);
+ adminDB = conn.getDB("admin");
+ checkFCV(adminDB, latestFCV);
+ MongoRunner.stopMongod(conn);
+
+ clearRawMongoProgramOutput();
+ jsTestLog("Test starting with invalid defaultStartupFCV, FCV should default to latest");
+ conn = MongoRunner.runMongod({binVersion: latest, setParameter: "defaultStartupFCV=hello"});
+ assert.neq(null, conn);
+ adminDB = conn.getDB("admin");
+ checkFCV(adminDB, latestFCV);
+ assert(rawMongoProgramOutput().includes("The provided 'defaultStartupFCV' is not a valid FCV"));
+ MongoRunner.stopMongod(conn);
+
+ clearRawMongoProgramOutput();
+ jsTestLog("Test starting with invalid defaultStartupFCV, FCV should default to latest");
+ conn = MongoRunner.runMongod({binVersion: latest, setParameter: "defaultStartupFCV=5.0"});
+ assert.neq(null, conn);
+ adminDB = conn.getDB("admin");
+ checkFCV(adminDB, latestFCV);
+ assert(rawMongoProgramOutput().includes("The provided 'defaultStartupFCV' is not a valid FCV"));
+ MongoRunner.stopMongod(conn);
+}
+
+function runReplicaSetTest() {
+ jsTestLog("Test starting with defaultStartupFCV = lastLTS");
+ let rst = new ReplSetTest({
+ nodes: [
+ {
+ binVersion: latest,
+ setParameter: {defaultStartupFCV: lastLTSFCV},
+
+ },
+ {
+ binVersion: latest,
+ // The second node will initial sync from the primary and end up with lastLTSFCV.
+ setParameter: {defaultStartupFCV: lastContinuousFCV},
+ rsConfig: {priority: 0},
+ }
+ ]
+ });
+ rst.startSet();
+ rst.initiate();
+ assert.neq(null, rst);
+ let primaryAdminDB = rst.getPrimary().getDB("admin");
+ let secondaryAdminDB = rst.getSecondary().getDB("admin");
+ checkFCV(primaryAdminDB, lastLTSFCV);
+ checkFCV(secondaryAdminDB, lastLTSFCV);
+ rst.stopSet();
+
+ jsTestLog("Test starting with defaultStartupFCV = lastContinuous");
+ rst = new ReplSetTest({
+ nodes: [
+ {
+ binVersion: latest,
+ dbpath: dbpath + "1",
+ setParameter: {defaultStartupFCV: lastContinuousFCV},
+
+ },
+ {
+ binVersion: latest,
+ dbpath: dbpath + "2",
+ // The second node will initial sync from the primary and end up with
+ // lastContinuousFCV.
+ setParameter: {defaultStartupFCV: lastLTSFCV},
+ rsConfig: {priority: 0},
+ }
+ ]
+ });
+ rst.startSet();
+ rst.initiate();
+ assert.neq(null, rst);
+ primaryAdminDB = rst.getPrimary().getDB("admin");
+ secondaryAdminDB = rst.getSecondary().getDB("admin");
+ checkFCV(primaryAdminDB, lastContinuousFCV);
+ checkFCV(secondaryAdminDB, lastContinuousFCV);
+ rst.stopSet(null /* signal */, true /* forRestart */);
+
+ clearRawMongoProgramOutput();
+ jsTestLog("Test starting with defaultStartupFCV when there is already an existing FCV.");
+ rst.startSet({restart: true, setParameter: {defaultStartupFCV: lastLTSFCV}});
+ assert.neq(null, rst);
+ primaryAdminDB = rst.getPrimary().getDB("admin");
+ secondaryAdminDB = rst.getSecondary().getDB("admin");
+ // The FCV should still be the original FCV, not the provided defaultStartupFCV.
+ checkFCV(primaryAdminDB, lastContinuousFCV);
+ checkFCV(secondaryAdminDB, lastContinuousFCV);
+ rst.stopSet();
+
+ jsTestLog("Test starting with defaultStartupFCV = latest");
+ rst = new ReplSetTest({
+ nodes: 2,
+ nodeOptions: {binVersion: latest, setParameter: {defaultStartupFCV: latestFCV}}
+ });
+ rst.startSet();
+ rst.initiate();
+ assert.neq(null, rst);
+ primaryAdminDB = rst.getPrimary().getDB("admin");
+ secondaryAdminDB = rst.getSecondary().getDB("admin");
+ checkFCV(primaryAdminDB, latestFCV);
+ checkFCV(secondaryAdminDB, latestFCV);
+ rst.stopSet();
+
+ clearRawMongoProgramOutput();
+ jsTestLog("Test starting with invalid defaultStartupFCV, FCV should default to latest");
+ rst = new ReplSetTest(
+ {nodes: 2, nodeOptions: {binVersion: latest, setParameter: {defaultStartupFCV: "hello"}}});
+ rst.startSet();
+ rst.initiate();
+ assert.neq(null, rst);
+ primaryAdminDB = rst.getPrimary().getDB("admin");
+ secondaryAdminDB = rst.getSecondary().getDB("admin");
+ checkFCV(primaryAdminDB, latestFCV);
+ checkFCV(secondaryAdminDB, latestFCV);
+ assert(rawMongoProgramOutput().includes("The provided 'defaultStartupFCV' is not a valid FCV"));
+ rst.stopSet();
+
+ clearRawMongoProgramOutput();
+ jsTestLog("Test starting with invalid defaultStartupFCV, FCV should default to latest");
+ rst = new ReplSetTest(
+ {nodes: 2, nodeOptions: {binVersion: latest, setParameter: {defaultStartupFCV: "5.0"}}});
+ rst.startSet();
+ rst.initiate();
+ assert.neq(null, rst);
+ primaryAdminDB = rst.getPrimary().getDB("admin");
+ secondaryAdminDB = rst.getSecondary().getDB("admin");
+ checkFCV(primaryAdminDB, latestFCV);
+ checkFCV(secondaryAdminDB, latestFCV);
+ assert(rawMongoProgramOutput().includes("The provided 'defaultStartupFCV' is not a valid FCV"));
+ rst.stopSet();
+}
+
+function runShardingTest() {
+ jsTestLog("Test starting sharded cluster with defaultStartupFCV = lastLTS");
+ let st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ config: 1,
+ // Shards should ignore the defaultStartupFCV parameter.
+ shardOptions: {binVersion: latest, setParameter: {defaultStartupFCV: latestFCV}},
+ configOptions: {binVersion: latest, setParameter: {defaultStartupFCV: lastLTSFCV}}
+ });
+ let configPrimaryAdminDB = st.configRS.getPrimary().getDB("admin");
+ let shard0PrimaryAdminDB = st.rs0.getPrimary().getDB("admin");
+ let shard1PrimaryAdminDB = st.rs1.getPrimary().getDB("admin");
+
+ checkFCV(configPrimaryAdminDB, lastLTSFCV);
+ checkFCV(shard0PrimaryAdminDB, lastLTSFCV);
+ checkFCV(shard1PrimaryAdminDB, lastLTSFCV);
+ st.stop();
+
+ st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ config: 1,
+ configOptions: {binVersion: latest, setParameter: {defaultStartupFCV: lastContinuousFCV}}
+ });
+ configPrimaryAdminDB = st.configRS.getPrimary().getDB("admin");
+ shard0PrimaryAdminDB = st.rs0.getPrimary().getDB("admin");
+ shard1PrimaryAdminDB = st.rs1.getPrimary().getDB("admin");
+
+ checkFCV(configPrimaryAdminDB, lastContinuousFCV);
+ checkFCV(shard0PrimaryAdminDB, lastContinuousFCV);
+ checkFCV(shard1PrimaryAdminDB, lastContinuousFCV);
+
+ jsTestLog("Test that a replica set started with shardsvr still defaults to lastLTS");
+ const newShard = new ReplSetTest({
+ nodes: 2,
+ nodeOptions: {binVersion: latest, setParameter: {defaultStartupFCV: latestFCV}}
+ });
+ newShard.startSet({shardsvr: ''});
+ newShard.initiate();
+
+ let primaryAdminDB = newShard.getPrimary().getDB("admin");
+ let secondaryAdminDB = newShard.getSecondary().getDB("admin");
+ checkFCV(primaryAdminDB, lastLTSFCV);
+ checkFCV(secondaryAdminDB, lastLTSFCV);
+ assert.commandWorked(st.s.adminCommand({addShard: newShard.getURL(), name: newShard.name}));
+
+ jsTestLog("Test that the FCV should be set to the cluster's FCV after running addShard");
+ checkFCV(primaryAdminDB, lastContinuousFCV);
+ checkFCV(secondaryAdminDB, lastContinuousFCV);
+ newShard.stopSet();
+ st.stop();
+}
+
+runStandaloneTest();
+runReplicaSetTest();
+runShardingTest();
+})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js b/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js
index e5c79d024bdce..b701d8a0859de 100644
--- a/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js
+++ b/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js
@@ -1,7 +1,7 @@
// Test the downgrade of a replica set succeeds, while reads and writes continue.
-load('./jstests/multiVersion/libs/multi_rs.js');
-load('./jstests/libs/test_background_ops.js');
+load('jstests/multiVersion/libs/multi_rs.js');
+load('jstests/libs/test_background_ops.js');
let newVersion = "latest";
diff --git a/jstests/multiVersion/genericSetFCVUsage/fcv_upgrade_fails_during_is_cleaning_server_metadata.js b/jstests/multiVersion/genericSetFCVUsage/fcv_upgrade_fails_during_is_cleaning_server_metadata.js
index 8c598bad5a1d7..4e79e2e31aa6a 100644
--- a/jstests/multiVersion/genericSetFCVUsage/fcv_upgrade_fails_during_is_cleaning_server_metadata.js
+++ b/jstests/multiVersion/genericSetFCVUsage/fcv_upgrade_fails_during_is_cleaning_server_metadata.js
@@ -4,11 +4,7 @@
* @tags: [requires_fcv_70]
*/
-(function() {
-"use strict";
-
load("jstests/libs/fail_point_util.js");
-load("jstests/libs/feature_flag_util.js");
const latest = "latest";
const testName = "fcv_upgrade_fails_during_is_cleaning_server_metadata";
@@ -147,5 +143,4 @@ function runShardedClusterTest() {
runStandaloneTest();
runReplicaSetTest();
-runShardedClusterTest();
-})();
+runShardedClusterTest();
\ No newline at end of file
diff --git a/jstests/multiVersion/genericSetFCVUsage/initial_sync_downgraded_from_latest.js b/jstests/multiVersion/genericSetFCVUsage/initial_sync_downgraded_from_latest.js
index 12959cf2d111f..eb691cdf53035 100644
--- a/jstests/multiVersion/genericSetFCVUsage/initial_sync_downgraded_from_latest.js
+++ b/jstests/multiVersion/genericSetFCVUsage/initial_sync_downgraded_from_latest.js
@@ -5,7 +5,7 @@
'use strict';
-load("./jstests/multiVersion/libs/initial_sync.js");
+load("jstests/multiVersion/libs/initial_sync.js");
let replSetVersion = "latest";
diff --git a/jstests/multiVersion/genericSetFCVUsage/initial_sync_latest_from_downgraded.js b/jstests/multiVersion/genericSetFCVUsage/initial_sync_latest_from_downgraded.js
index d3f33a98f13b7..0e9dcb9154132 100644
--- a/jstests/multiVersion/genericSetFCVUsage/initial_sync_latest_from_downgraded.js
+++ b/jstests/multiVersion/genericSetFCVUsage/initial_sync_latest_from_downgraded.js
@@ -5,7 +5,7 @@
'use strict';
-load("./jstests/multiVersion/libs/initial_sync.js");
+load("jstests/multiVersion/libs/initial_sync.js");
let newSecondaryVersion = "latest";
diff --git a/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js b/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js
index 85949d3d06476..fb6a5f25aa7f7 100644
--- a/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js
+++ b/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js
@@ -31,12 +31,15 @@ const defaultOptions = {
// This lists all supported releases and needs to be kept up to date as versions are added and
// dropped.
const versions = [
- {binVersion: '4.2', featureCompatibilityVersion: '4.2', testCollection: 'four_two'},
{binVersion: '4.4', featureCompatibilityVersion: '4.4', testCollection: 'four_four'},
{binVersion: '5.0', featureCompatibilityVersion: '5.0', testCollection: 'five_zero'},
{binVersion: '6.0', featureCompatibilityVersion: '6.0', testCollection: 'six_zero'},
- {binVersion: 'last-lts', testCollection: 'last_lts'},
- {binVersion: 'last-continuous', testCollection: 'last_continuous'},
+ {binVersion: 'last-lts', featureCompatibilityVersion: lastLTSFCV, testCollection: 'last_lts'},
+ {
+ binVersion: 'last-continuous',
+ featureCompatibilityVersion: lastContinuousFCV,
+ testCollection: 'last_continuous'
+ },
{binVersion: 'latest', featureCompatibilityVersion: latestFCV, testCollection: 'latest'},
];
diff --git a/jstests/multiVersion/genericSetFCVUsage/restart_during_downgrading_fcv.js b/jstests/multiVersion/genericSetFCVUsage/restart_during_downgrading_fcv.js
index dde2219aeba76..5aa55f5576f95 100644
--- a/jstests/multiVersion/genericSetFCVUsage/restart_during_downgrading_fcv.js
+++ b/jstests/multiVersion/genericSetFCVUsage/restart_during_downgrading_fcv.js
@@ -3,11 +3,6 @@
* FCV is still in downgrading state and we can change FCV to upgraded state.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js");
-
const latest = "latest";
const testName = "restart_during_downgrading_fcv";
const dbpath = MongoRunner.dataPath + testName;
@@ -91,5 +86,4 @@ const runShardedClusterTest = function() {
runStandaloneTest();
runReplicaSetTest();
-runShardedClusterTest();
-})();
+runShardedClusterTest();
\ No newline at end of file
diff --git a/jstests/multiVersion/genericSetFCVUsage/rollback_downgraded_to_latest.js b/jstests/multiVersion/genericSetFCVUsage/rollback_downgraded_to_latest.js
index 1978172b68fee..3b18543ea7af7 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rollback_downgraded_to_latest.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rollback_downgraded_to_latest.js
@@ -14,4 +14,4 @@ testMultiversionRollback(testName, "last-lts", "latest");
testName = "multiversion_rollback_last_continuous_to_latest";
jsTestLog("Testing multiversion rollback from last-continuous to latest");
testMultiversionRollback(testName, "last-continuous", "latest");
-})();
\ No newline at end of file
+})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_downgraded.js b/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_downgraded.js
index d8328df339679..84b818421e35d 100644
--- a/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_downgraded.js
+++ b/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_downgraded.js
@@ -14,4 +14,4 @@ testMultiversionRollback(testName, "latest", "last-lts");
var testName = "multiversion_rollback_latest_to_last_continuous";
jsTestLog("Testing multiversion rollback from latest to last-continuous");
testMultiversionRollback(testName, "latest", "last-continuous");
-})();
\ No newline at end of file
+})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/run_feature_flag_multiversion_test.js b/jstests/multiVersion/genericSetFCVUsage/run_feature_flag_multiversion_test.js
index af1552dbbf9c8..0233592830aa2 100644
--- a/jstests/multiVersion/genericSetFCVUsage/run_feature_flag_multiversion_test.js
+++ b/jstests/multiVersion/genericSetFCVUsage/run_feature_flag_multiversion_test.js
@@ -22,8 +22,11 @@ function runTest(downgradeFCV) {
let primary = rst.getPrimary();
let adminDB = primary.getDB("admin");
- assert.commandWorked(adminDB.adminCommand({setFeatureCompatibilityVersion: downgradeFCV}));
- checkFCV(adminDB, downgradeFCV);
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'failDowngrading', mode: "alwaysOn"}));
+ assert.commandFailedWithCode(
+ adminDB.adminCommand({setFeatureCompatibilityVersion: downgradeFCV}), 549181);
+ checkFCV(adminDB, downgradeFCV, downgradeFCV);
if (downgradeFCV === lastLTSFCV) {
numLastLTSRuns++;
}
@@ -73,4 +76,4 @@ if (lastLTSFCV === lastContinuousFCV) {
assert.eq(numLastLTSRuns, 2);
assert.eq(numLastContRuns, 1);
}
-})();
\ No newline at end of file
+})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
index 3e74869a014cf..e30a6fc042279 100644
--- a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
+++ b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js
@@ -9,13 +9,9 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
// data.
TestData.skipCheckDBHashes = true;
-(function() {
-"use strict";
-
load("jstests/libs/index_catalog_helpers.js");
load("jstests/libs/write_concern_util.js");
load("jstests/replsets/rslib.js");
-load("jstests/libs/feature_flag_util.js");
let dbpath = MongoRunner.dataPath + "feature_compatibility_version";
resetDbpath(dbpath);
@@ -282,16 +278,6 @@ function runReplicaSetTest(downgradeVersion) {
assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
restartServerReplication(secondary);
- // If downgrading->upgrading feature is not enabled,
- // upgrading the FCV should fail if a previous downgrade has not yet completed.
- if (!FeatureFlagUtil.isEnabled(primaryAdminDB,
- "DowngradingToUpgrading",
- null /* user not specified */,
- true /* ignores FCV */)) {
- assert.commandFailedWithCode(
- primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}), 5147403);
- }
-
if (lastContinuousFCV !== lastLTSFCV) {
// We will fail if we have not yet completed a downgrade and attempt to downgrade to a
// different target version.
@@ -533,4 +519,3 @@ if (lastLTSFCV != lastContinuousFCV) {
runReplicaSetTest('last-continuous');
runShardingTest('last-continuous');
}
-})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/setfcv_aborts_reshard_collection.js b/jstests/multiVersion/genericSetFCVUsage/setfcv_aborts_reshard_collection.js
index a15d2bbb5ef26..009415f8ebec8 100644
--- a/jstests/multiVersion/genericSetFCVUsage/setfcv_aborts_reshard_collection.js
+++ b/jstests/multiVersion/genericSetFCVUsage/setfcv_aborts_reshard_collection.js
@@ -1,16 +1,17 @@
/**
* Tests that setFeatureCompatibilityVersion command aborts an ongoing reshardCollection command
*/
-(function() {
-"use strict";
-
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load("jstests/libs/parallel_shell_helpers.js");
load("jstests/sharding/libs/resharding_test_fixture.js");
load('jstests/libs/discover_topology.js');
load('jstests/libs/fail_point_util.js');
load('jstests/sharding/libs/sharded_transactions_helpers.js');
-function runTest(forcePooledConnectionsDropped) {
+// Global variable is used to avoid spinning up a set of servers just to see if the
+// feature flag is enabled.
+let reshardingImprovementsEnabled;
+function runTest({forcePooledConnectionsDropped, withUUID}) {
const reshardingTest =
new ReshardingTest({numDonors: 2, numRecipients: 2, reshardInPlace: true});
reshardingTest.setup();
@@ -29,6 +30,16 @@ function runTest(forcePooledConnectionsDropped) {
let mongos = inputCollection.getMongo();
+ if (reshardingImprovementsEnabled === undefined) {
+ reshardingImprovementsEnabled = FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements");
+ }
+ if (withUUID && !reshardingImprovementsEnabled) {
+ jsTestLog("Skipping test with UUID since featureFlagReshardingImprovements is not enabled");
+ reshardingTest.tearDown();
+ }
+ jsTestLog("Testing with forcePooledConnectionsDropped: " + forcePooledConnectionsDropped +
+ " withUUID: " + withUUID);
+
for (let x = 0; x < 1000; x++) {
assert.commandWorked(inputCollection.insert({oldKey: x, newKey: -1 * x}));
}
@@ -48,12 +59,14 @@ function runTest(forcePooledConnectionsDropped) {
const coordinatorDoc =
mongos.getCollection("config.reshardingOperations").findOne({ns: sourceNamespace});
- return coordinatorDoc === null || coordinatorDoc.state === "aborting";
+ return coordinatorDoc === null || coordinatorDoc.state === "aborting" ||
+ coordinatorDoc.state === "quiesced";
});
}
const recipientShardNames = reshardingTest.recipientShardNames;
let awaitShell;
+ let reshardingUUID = withUUID ? UUID() : undefined;
reshardingTest.withReshardingInBackground(
{
newShardKeyPattern: {newKey: 1},
@@ -61,6 +74,7 @@ function runTest(forcePooledConnectionsDropped) {
{min: {newKey: MinKey}, max: {newKey: 0}, shard: recipientShardNames[0]},
{min: {newKey: 0}, max: {newKey: MaxKey}, shard: recipientShardNames[1]},
],
+ reshardingUUID: reshardingUUID
},
() => {
// Wait for config server to have started resharding before sending setFCV, otherwise
@@ -149,7 +163,11 @@ function runTest(forcePooledConnectionsDropped) {
// This test case forces the setFCV command to call dropsConnections while the coordinator is in
// the process of establishing connections to the participant shards in order to ensure that the
// resharding operation does not stall.
-runTest(true);
+runTest({forcePooledConnectionsDropped: true});
+
+assert(reshardingImprovementsEnabled !== undefined);
-runTest(false);
-})();
+// We test with a UUID because we need for setFCV to abort the quiesce period as well, in order
+// to completely clear the config server's state collection. Because this test takes a while
+// we don't try all combinations of forcePooledCollectionsDropped and withUUID.
+runTest({forcePooledConnectionsDropped: false, withUUID: reshardingImprovementsEnabled});
diff --git a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_abort_on_fcv_change.js b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_abort_on_fcv_change.js
index 73326dd39ef96..0c9ebc56dfd4a 100644
--- a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_abort_on_fcv_change.js
+++ b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_abort_on_fcv_change.js
@@ -8,6 +8,7 @@
*/
import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js";
+import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js";
load("jstests/libs/fail_point_util.js");
load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject'
load("jstests/libs/parallelTester.js"); // for 'Thread'
@@ -16,7 +17,7 @@ load("jstests/replsets/rslib.js"); // for 'setLogVerbosity'
const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()});
const tenantId = ObjectId().str;
-const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB");
+const dbName = makeTenantDB(tenantId, "testDB");
const collName = "testColl";
const donorRst = tenantMigrationTest.getDonorRst();
diff --git a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch.js b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch.js
index c11f570e73511..4f0844c4133ee 100644
--- a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch.js
+++ b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch.js
@@ -8,7 +8,7 @@
*/
import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js";
-import {isShardMergeEnabled} from "jstests/replsets/libs/tenant_migration_util.js";
+import {isShardMergeEnabled, makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js";
load("jstests/libs/fail_point_util.js");
load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject'
load("jstests/libs/parallelTester.js"); // for 'Thread'
@@ -17,7 +17,7 @@ function runTest(downgradeFCV) {
const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()});
const tenantId = ObjectId().str;
- const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB");
+ const dbName = makeTenantDB(tenantId, "testDB");
const collName = "testColl";
const donorPrimary = tenantMigrationTest.getDonorPrimary();
diff --git a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch_after_failover.js b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch_after_failover.js
deleted file mode 100644
index 191c1e7a80201..0000000000000
--- a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch_after_failover.js
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Tests that restarting a migration attempt after a failover fails if the donor and recipient no
- * longer share the same FCV.
- * @tags: [
- * requires_majority_read_concern,
- * incompatible_with_windows_tls,
- * # Shard merge is not robust to failovers and restarts.
- * incompatible_with_shard_merge,
- * serverless,
- * ]
- */
-
-import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js";
-load("jstests/libs/fail_point_util.js");
-load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject'
-load("jstests/libs/parallelTester.js"); // for 'Thread'
-
-function runTest(downgradeFCV) {
- const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()});
-
- const tenantId = ObjectId().str;
- const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB");
- const collName = "testColl";
-
- const donorPrimary = tenantMigrationTest.getDonorPrimary();
- const recipientPrimary = tenantMigrationTest.getRecipientPrimary();
-
- tenantMigrationTest.insertDonorDB(dbName, collName);
-
- const migrationId = UUID();
- const migrationOpts = {
- migrationIdString: extractUUIDFromObject(migrationId),
- recipientConnString: tenantMigrationTest.getRecipientConnString(),
- tenantId: tenantId,
- };
-
- // Configure a failpoint to have the recipient primary hang after a successful initial
- // comparison.
- const recipientDB = recipientPrimary.getDB(dbName);
- const hangAfterFirstFCVcheck =
- configureFailPoint(recipientDB, "fpAfterComparingRecipientAndDonorFCV", {action: "hang"});
-
- // Start a migration and wait for recipient to hang at the failpoint.
- assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts));
- hangAfterFirstFCVcheck.wait();
-
- // Downgrade the FCV for the donor set.
- assert.commandWorked(donorPrimary.adminCommand({setFeatureCompatibilityVersion: downgradeFCV}));
-
- // Step up a new node in the recipient set and trigger a failover. The new primary should
- // attempt to resume cloning, but fail upon re-checking the FCVs.
- const recipientRst = tenantMigrationTest.getRecipientRst();
- const newRecipientPrimary = recipientRst.getSecondaries()[0];
- recipientRst.awaitLastOpCommitted();
- assert.commandWorked(newRecipientPrimary.adminCommand({replSetStepUp: 1}));
- hangAfterFirstFCVcheck.off();
- recipientRst.getPrimary();
-
- // Make sure we see the FCV mismatch detection message on the recipient regardless.
- checkLog.containsJson(newRecipientPrimary, 5382300);
-
- // Upgrade again to check on the status of the migration from the donor's point of view.
- assert.commandWorked(donorPrimary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- TenantMigrationTest.assertAborted(
- tenantMigrationTest.waitForMigrationToComplete(migrationOpts));
-
- tenantMigrationTest.stop();
-}
-
-runTest(lastContinuousFCV);
-if (lastContinuousFCV != lastLTSFCV) {
- runTest(lastLTSFCV);
-}
diff --git a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_recipient_abort_on_fcv_change.js b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_recipient_abort_on_fcv_change.js
index 717cf2e52878c..0ca01e7706343 100644
--- a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_recipient_abort_on_fcv_change.js
+++ b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_recipient_abort_on_fcv_change.js
@@ -8,6 +8,7 @@
*/
import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js";
+import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js";
load("jstests/libs/fail_point_util.js");
load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject'
load("jstests/libs/parallelTester.js"); // for 'Thread'
@@ -16,7 +17,7 @@ load("jstests/replsets/rslib.js"); // for 'setLogVerbosity'
const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()});
const tenantId = ObjectId().str;
-const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB");
+const dbName = makeTenantDB(tenantId, "testDB");
const collName = "testColl";
const donorRst = tenantMigrationTest.getDonorRst();
diff --git a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_save_fcv.js b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_save_fcv.js
deleted file mode 100644
index 16735b65a9305..0000000000000
--- a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_save_fcv.js
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Tests that we note down the recipient FCV at the beginning of a migration and that a change
- * in that FCV will abort the migration.
- * @tags: [
- * requires_majority_read_concern,
- * incompatible_with_windows_tls,
- * # Shard merge is not robust to failovers and restarts.
- * incompatible_with_shard_merge,
- * serverless,
- * ]
- */
-
-import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js";
-import {
- isShardMergeEnabled,
- makeX509OptionsForTest,
- runMigrationAsync
-} from "jstests/replsets/libs/tenant_migration_util.js";
-
-load("jstests/libs/fail_point_util.js");
-load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject'
-load("jstests/libs/parallelTester.js"); // for 'Thread'
-load("jstests/replsets/rslib.js"); // 'createRstArgs'
-
-function runTest(downgradeFCV) {
- const recipientRst = new ReplSetTest({
- nodes: 2,
- name: jsTestName() + "_recipient",
- nodeOptions: makeX509OptionsForTest().recipient
- });
-
- recipientRst.startSet();
- recipientRst.initiate();
-
- const tenantMigrationTest =
- new TenantMigrationTest({name: jsTestName(), recipientRst: recipientRst});
- const tenantId = ObjectId().str;
- const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB");
- const collName = "testColl";
-
- const recipientPrimary = tenantMigrationTest.getRecipientPrimary();
-
- tenantMigrationTest.insertDonorDB(dbName, collName);
-
- const migrationId = UUID();
- const migrationOpts = {
- migrationIdString: extractUUIDFromObject(migrationId),
- recipientConnString: tenantMigrationTest.getRecipientConnString(),
- tenantId: tenantId,
- };
-
- // Configure a failpoint to have the recipient primary hang after taking note of its FCV.
- const recipientDb = recipientPrimary.getDB(dbName);
- const hangAfterSavingFCV = configureFailPoint(
- recipientDb, "fpAfterRecordingRecipientPrimaryStartingFCV", {action: "hang"});
-
- // Start a migration and wait for recipient to hang at the failpoint.
- const donorRstArgs = createRstArgs(tenantMigrationTest.getDonorRst());
- const migrationThread = new Thread(runMigrationAsync, migrationOpts, donorRstArgs);
- migrationThread.start();
- hangAfterSavingFCV.wait();
-
- const isRunningMergeProtocol = isShardMergeEnabled(recipientDb);
-
- // Downgrade the FCV for the recipient set.
- assert.commandWorked(
- recipientPrimary.adminCommand({setFeatureCompatibilityVersion: downgradeFCV}));
-
- // Step up a new node in the recipient set and trigger a failover. The new primary should
- // attempt to resume cloning, but fail upon re-checking the FCV.
- const newRecipientPrimary = recipientRst.getSecondaries()[0];
- recipientRst.awaitLastOpCommitted();
- assert.commandWorked(newRecipientPrimary.adminCommand({replSetStepUp: 1}));
- hangAfterSavingFCV.off();
- recipientRst.getPrimary();
-
- // The migration will not be able to continue in the downgraded version.
- TenantMigrationTest.assertAborted(migrationThread.returnData());
- // Change-of-FCV detection message.
- if (isRunningMergeProtocol && MongoRunner.compareBinVersions(downgradeFCV, "5.2") < 0) {
- // FCV is too old for shard merge.
- checkLog.containsJson(newRecipientPrimary, 5949504);
- } else {
- // Can't change FCVs during a migration.
- checkLog.containsJson(newRecipientPrimary, 5356200);
- }
-
- tenantMigrationTest.stop();
- recipientRst.stopSet();
-}
-
-runTest(lastContinuousFCV);
-if (lastContinuousFCV != lastLTSFCV) {
- runTest(lastLTSFCV);
-}
diff --git a/jstests/multiVersion/genericSetFCVUsage/test_replica_set_startup_in_downgrading_state.js b/jstests/multiVersion/genericSetFCVUsage/test_replica_set_startup_in_downgrading_state.js
index ef03a3656f7bc..3c014f018d381 100644
--- a/jstests/multiVersion/genericSetFCVUsage/test_replica_set_startup_in_downgrading_state.js
+++ b/jstests/multiVersion/genericSetFCVUsage/test_replica_set_startup_in_downgrading_state.js
@@ -5,12 +5,8 @@
* @tags: [requires_fcv_70]
*/
-(function() {
-"use strict";
-
load('jstests/multiVersion/libs/verify_versions.js');
load('jstests/libs/fail_point_util.js');
-load("jstests/libs/feature_flag_util.js");
function runReplicaSet() {
let fcvDoc;
@@ -81,5 +77,4 @@ function runReplicaSet() {
rst.stopSet();
}
-runReplicaSet();
-})();
+runReplicaSet();
\ No newline at end of file
diff --git a/jstests/multiVersion/genericSetFCVUsage/test_sharding_startup_in_downgrading_state.js b/jstests/multiVersion/genericSetFCVUsage/test_sharding_startup_in_downgrading_state.js
index 71992e8004937..2c2e6bd76dcd7 100644
--- a/jstests/multiVersion/genericSetFCVUsage/test_sharding_startup_in_downgrading_state.js
+++ b/jstests/multiVersion/genericSetFCVUsage/test_sharding_startup_in_downgrading_state.js
@@ -5,12 +5,8 @@
* @tags: [requires_fcv_70]
*/
-(function() {
-"use strict";
-
load('jstests/multiVersion/libs/verify_versions.js');
load('jstests/libs/fail_point_util.js');
-load("jstests/libs/feature_flag_util.js");
function runSharding() {
let fcvDoc;
@@ -114,5 +110,4 @@ function runSharding() {
st.stop();
}
-runSharding();
-})();
+runSharding();
\ No newline at end of file
diff --git a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js
index fbb6a9a268782..eea18471f3994 100644
--- a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js
+++ b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js
@@ -6,8 +6,9 @@
(function() {
"use strict";
-load('./jstests/multiVersion/libs/multi_rs.js');
-load('./jstests/multiVersion/libs/multi_cluster.js');
+load('jstests/multiVersion/libs/multi_rs.js');
+load('jstests/multiVersion/libs/multi_cluster.js');
+load('jstests/multiVersion/libs/upgrade_downgrade_cluster_shared.js');
// When checking UUID consistency, the shell attempts to run a command on the node it believes is
// primary in each shard. However, this test restarts shards, and the node that is elected primary
@@ -16,21 +17,6 @@ load('./jstests/multiVersion/libs/multi_cluster.js');
// command is nondeterministic, skip the consistency check for this test.
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
-var testCRUDAndAgg = function(db) {
- assert.commandWorked(db.foo.insert({x: 1}));
- assert.commandWorked(db.foo.insert({x: -1}));
- assert.commandWorked(db.foo.update({x: 1}, {$set: {y: 1}}));
- assert.commandWorked(db.foo.update({x: -1}, {$set: {y: 1}}));
- var doc1 = db.foo.findOne({x: 1});
- assert.eq(1, doc1.y);
- var doc2 = db.foo.findOne({x: -1});
- assert.eq(1, doc2.y);
-
- assert.commandWorked(db.foo.remove({x: 1}, true));
- assert.commandWorked(db.foo.remove({x: -1}, true));
- assert.eq(null, db.foo.findOne());
-};
-
// Test upgrade/downgrade between "latest" and "last-lts"/"last-continuous".
for (let oldVersion of ["last-lts", "last-continuous"]) {
var st = new ShardingTest({
@@ -63,6 +49,7 @@ for (let oldVersion of ["last-lts", "last-continuous"]) {
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
// upgrade the config servers first
jsTest.log('upgrading config servers');
@@ -70,12 +57,14 @@ for (let oldVersion of ["last-lts", "last-continuous"]) {
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
// Restart mongos to clear all cache and force it to do remote calls.
st.restartMongoses();
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
// Then upgrade the shards.
jsTest.log('upgrading shard servers');
@@ -86,12 +75,14 @@ for (let oldVersion of ["last-lts", "last-continuous"]) {
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
// Restart mongos to clear all cache and force it to do remote calls.
st.restartMongoses();
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
// Finally, upgrade mongos
jsTest.log('upgrading mongos servers');
@@ -99,12 +90,14 @@ for (let oldVersion of ["last-lts", "last-continuous"]) {
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
// Restart mongos to clear all cache and force it to do remote calls.
st.restartMongoses();
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
// Check that version document is unmodified.
version = st.s.getCollection('config.version').findOne();
@@ -118,12 +111,14 @@ for (let oldVersion of ["last-lts", "last-continuous"]) {
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
// Restart mongos to clear all cache and force it to do remote calls.
st.restartMongoses();
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
jsTest.log('downgrading shard servers');
st.downgradeCluster(oldVersion, {downgradeMongos: false, downgradeConfigs: false});
@@ -133,24 +128,28 @@ for (let oldVersion of ["last-lts", "last-continuous"]) {
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
// Restart mongos to clear all cache and force it to do remote calls.
st.restartMongoses();
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
jsTest.log('downgrading config servers');
st.downgradeCluster(oldVersion, {downgradeMongos: false, downgradeShards: false});
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
// Restart mongos to clear all cache and force it to do remote calls.
st.restartMongoses();
testCRUDAndAgg(st.s.getDB('unsharded'));
testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
// Check that version document is unmodified.
version = st.s.getCollection('config.version').findOne();
diff --git a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster_config_shard.js b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster_config_shard.js
new file mode 100644
index 0000000000000..728f540a9ac7f
--- /dev/null
+++ b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster_config_shard.js
@@ -0,0 +1,161 @@
+/**
+ * Tests that CRUD, aggregation and DDL commands continue to work as expected with a config shard on
+ * both sharded and unsharded collection at each step of cluster upgrade/downgrade between last-lts
+ * and latest and between last-continuous and latest.
+ */
+(function() {
+"use strict";
+
+load('jstests/multiVersion/libs/multi_rs.js');
+load('jstests/multiVersion/libs/multi_cluster.js');
+load('jstests/multiVersion/libs/upgrade_downgrade_cluster_shared.js');
+
+// When checking UUID consistency, the shell attempts to run a command on the node it believes is
+// primary in each shard. However, this test restarts shards, and the node that is elected primary
+// after the restart may be different from the original primary. Since the shell does not retry on
+// NotWritablePrimary errors, and whether or not it detects the new primary before issuing the
+// command is nondeterministic, skip the consistency check for this test.
+TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
+
+// Test upgrade/downgrade between "latest" and "last-lts"/"last-continuous".
+for (let oldVersion of ["last-lts", "last-continuous"]) {
+ var st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ other: {
+ mongosOptions: {binVersion: oldVersion},
+ configOptions: {binVersion: oldVersion},
+ shardOptions: {binVersion: oldVersion},
+
+ rsOptions: {binVersion: oldVersion},
+ rs: true,
+ },
+ configShard: true
+ });
+ st.configRS.awaitReplication();
+
+ // check that config.version document gets initialized properly
+ var version = st.s.getCollection('config.version').findOne();
+ var clusterID = version.clusterId;
+ assert.neq(null, clusterID);
+
+ // Setup sharded collection
+ assert.commandWorked(st.s.adminCommand({enableSharding: 'sharded'}));
+ st.ensurePrimaryShard('sharded', st.shard0.shardName);
+
+ assert.commandWorked(st.s.adminCommand({shardCollection: 'sharded.foo', key: {x: 1}}));
+ assert.commandWorked(st.s.adminCommand({split: 'sharded.foo', middle: {x: 0}}));
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'sharded.foo', find: {x: 1}, to: st.shard1.shardName}));
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ // upgrade the config servers first
+ jsTest.log('upgrading config servers');
+ st.upgradeCluster("latest", {upgradeMongos: false, upgradeShards: false});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ // Then upgrade the shards.
+ jsTest.log('upgrading shard servers');
+ st.upgradeCluster("latest", {upgradeMongos: false, upgradeConfigs: false});
+
+ awaitRSClientHosts(st.s, st.rs0.getPrimary(), {ok: true, ismaster: true});
+ awaitRSClientHosts(st.s, st.rs1.getPrimary(), {ok: true, ismaster: true});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ // Finally, upgrade mongos
+ jsTest.log('upgrading mongos servers');
+ st.upgradeCluster("latest", {upgradeConfigs: false, upgradeShards: false});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ // Check that version document is unmodified.
+ version = st.s.getCollection('config.version').findOne();
+ assert.eq(clusterID, version.clusterId);
+
+ ///////////////////////////////////////////////////////////////////////////////////////////
+ // Downgrade back
+
+ jsTest.log('downgrading mongos servers');
+ st.downgradeCluster(oldVersion, {downgradeConfigs: false, downgradeShards: false});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ jsTest.log('downgrading shard servers');
+ st.downgradeCluster(oldVersion, {downgradeMongos: false, downgradeConfigs: false});
+
+ awaitRSClientHosts(st.s, st.rs0.getPrimary(), {ok: true, ismaster: true});
+ awaitRSClientHosts(st.s, st.rs1.getPrimary(), {ok: true, ismaster: true});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ jsTest.log('downgrading config servers');
+ st.downgradeCluster(oldVersion, {downgradeMongos: false, downgradeShards: false});
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ // Restart mongos to clear all cache and force it to do remote calls.
+ st.restartMongoses();
+
+ testCRUDAndAgg(st.s.getDB('unsharded'));
+ testCRUDAndAgg(st.s.getDB('sharded'));
+ testDDLOps(st);
+
+ // Check that version document is unmodified.
+ version = st.s.getCollection('config.version').findOne();
+ assert.eq(clusterID, version.clusterId);
+
+ st.stop();
+}
+})();
diff --git a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_idempotency.js b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_idempotency.js
index ff24256908a7c..d847a980d4049 100644
--- a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_idempotency.js
+++ b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_idempotency.js
@@ -57,4 +57,4 @@ function runTest(downgradeVersion) {
runTest('last-lts');
runTest('last-continuous');
-}());
\ No newline at end of file
+}());
diff --git a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_sharded_cluster.js b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_sharded_cluster.js
index 1d17a15ff935e..19e6fab60b9e9 100644
--- a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_sharded_cluster.js
+++ b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_sharded_cluster.js
@@ -10,18 +10,15 @@
* 6. Verify the data consistency after the downgrade procedure
*/
-(function() {
-'use strict';
-
load('jstests/multiVersion/libs/multi_cluster.js'); // For upgradeCluster
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const dbName = jsTestName();
function setupClusterAndDatabase(binVersion) {
const st = new ShardingTest({
mongos: 1,
- config: 1,
+ config: 2,
shards: 2,
other: {
mongosOptions: {binVersion: binVersion},
@@ -94,9 +91,42 @@ function checkConfigAndShardsFCV(expectedFCV) {
}
}
+// TODO(SERVER-77873): Remove checkReshardingActiveIndex; once the feature flag is removed the
+// check will be incorrect.
+function checkReshardingActiveIndex() {
+ const getActiveIndex = (node) => {
+ const indexes = st.configRS.getPrimary().getDB("config").reshardingOperations.getIndexes();
+ return indexes.find((index) => (index.name == "ReshardingCoordinatorActiveIndex"));
+ };
+ let activeIndex = getActiveIndex(st.configRS.getPrimary());
+ if (FeatureFlagUtil.isPresentAndEnabled(st.s, "ReshardingImprovements")) {
+ assert(
+ !activeIndex,
+ "With ReshardingImprovements enabled, the config.reshardingOperations ReshardingCoordinatorActiveIndex is present but should not be.");
+ }
+ // Since downgrading does not restore the index, we don't check for the index's presence
+ // until we force a step-up (re-initializing the coordinator)
+
+ assert.commandWorked(st.configRS.getSecondary().adminCommand({replSetStepUp: 1}));
+ st.configRS.waitForPrimaryOnlyServices(st.configRS.getPrimary());
+ activeIndex = getActiveIndex(st.configRS.getPrimary());
+ if (FeatureFlagUtil.isPresentAndEnabled(st.s, "ReshardingImprovements")) {
+ assert(
+ !activeIndex,
+ "With ReshardingImprovements enabled, the config.reshardingOperations ReshardingCoordinatorActiveIndex is present but should not be, after step-up.");
+ } else {
+ assert(
+ activeIndex,
+ "With ReshardingImprovements disabled, the config.reshardingOperations ReshardingCoordinatorActiveIndex is not present but should be, after step-up.");
+ assert(activeIndex.unique,
+ "The config.reshardingOperations ReshardingCoordinatorActiveIndex is not unique");
+ }
+}
+
function checkClusterBeforeUpgrade(fcv) {
checkConfigAndShardsFCV(fcv);
checkConfigVersionDoc();
+ checkReshardingActiveIndex();
}
function checkClusterAfterBinaryUpgrade() {
@@ -106,10 +136,12 @@ function checkClusterAfterBinaryUpgrade() {
function checkClusterAfterFCVUpgrade(fcv) {
checkConfigAndShardsFCV(fcv);
checkConfigVersionDoc();
+ checkReshardingActiveIndex();
}
function checkClusterAfterFCVDowngrade() {
checkConfigVersionDoc();
+ checkReshardingActiveIndex();
}
function checkClusterAfterBinaryDowngrade(fcv) {
@@ -153,5 +185,4 @@ for (const oldVersion of [lastLTSFCV, lastContinuousFCV]) {
checkClusterAfterBinaryDowngrade(oldVersion);
st.stop();
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js
index 2b7ed55bd953d..00e056952ab6a 100644
--- a/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js
+++ b/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js
@@ -13,7 +13,7 @@ const testName = "view_definition_feature_compatibility_version_multiversion";
const dbpath = MongoRunner.dataPath + testName;
// An array of feature flags that must be enabled to run feature flag tests.
-const featureFlagsToEnable = ["featureFlagUserRoles"];
+const featureFlagsToEnable = [];
// These arrays should be populated with aggregation pipelines that use
// aggregation features in new versions of mongod. This test ensures that a view
@@ -21,10 +21,7 @@ const featureFlagsToEnable = ["featureFlagUserRoles"];
// latest version, and rejects it when the feature compatibility version is the last
// version.
const testCasesLastContinuous = [];
-const testCasesLastContinuousWithFeatureFlags = [
- // TODO SERVER-70689: Remove this case when 7.0 becomes lastLTS.
- [{$project: {z: "$$USER_ROLES"}}]
-];
+const testCasesLastContinuousWithFeatureFlags = [];
// Anything that's incompatible with the last continuous release is incompatible with the last
// stable release.
@@ -32,14 +29,6 @@ const testCasesLastStable = testCasesLastContinuous.concat([]);
const testCasesLastStableWithFeatureFlags = testCasesLastContinuousWithFeatureFlags.concat([]);
-// The addition of the $$USER_ROLES system variable is slightly different than the usual use case of
-// this test file. This means that some of the following commands won't work/fail as expected for
-// the $$USER_ROLES test case.
-// TODO SERVER-70689: Remove this function and references to it.
-function testCaseDoesNotReferenceUserRoles(testCase) {
- return testCase[0].$project.z != "$$USER_ROLES";
-}
-
// Tests Feature Compatibility Version behavior of view creation while using aggregation pipelines
// 'testCases' and using a previous stable version 'lastVersion' of mongod.
// 'lastVersion' can have values "last-lts" and "last-continuous".
@@ -94,27 +83,17 @@ function testViewDefinitionFCVBehavior(lastVersion, testCases, featureFlags = []
// Read against an existing view using new query features should not fail.
testCases.forEach((pipe, i) => {
- if (testCaseDoesNotReferenceUserRoles(pipe)) {
- // The $$USER_ROLES value will be evaluated every time the view is queried, so the
- // following query would fail since we are running an older FCV.
- // TODO SERVER-70689: Remove the guard of this if-statement and keep the body.
- assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
- `Failed to query view with pipeline ${tojson(pipe)}`);
- }
+ assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
+ `Failed to query view with pipeline ${tojson(pipe)}`);
});
// Trying to create a new view in the same database as existing invalid view should fail,
// even if the new view doesn't use any new query features.
- if (testCaseDoesNotReferenceUserRoles(testCases[0])) {
- // Since the $$USER_ROLES variable won't be evaluated during this view creation, the view
- // creation will succeed even though we are on an older FCV.
- // TODO SERVER-70689: Remove the guard of this if-statement and keep the body.
- assert.commandFailedWithCode(
- testDB.createView("newViewOldFeatures", "coll", [{$project: {_id: 1}}]),
- ErrorCodes.QueryFeatureNotAllowed,
- `Expected *not* to be able to create view on database ${testDB} while in FCV ${
- binVersionToFCV(lastVersion)}`);
- }
+ assert.commandFailedWithCode(
+ testDB.createView("newViewOldFeatures", "coll", [{$project: {_id: 1}}]),
+ ErrorCodes.QueryFeatureNotAllowed,
+ `Expected *not* to be able to create view on database ${testDB} while in FCV ${
+ binVersionToFCV(lastVersion)}`);
// Trying to create a new view succeeds if it's on a separate database.
const testDB2 = conn.getDB(testName + '2');
@@ -171,13 +150,8 @@ function testViewDefinitionFCVBehavior(lastVersion, testCases, featureFlags = []
// Read against an existing view using new query features should not fail.
testCases.forEach((pipe, i) => {
- if (testCaseDoesNotReferenceUserRoles(pipe)) {
- // The view is evaluated on the fly, and the FCV is still set to the last version so the
- // evaluation of $$USER_ROLES will cause this to fail.
- // TODO SERVER-70689: Remove the guard of this if-statement and keep the body.
- assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
- `Failed to query view with pipeline ${tojson(pipe)}`);
- }
+ assert.commandWorked(testDB.runCommand({find: "firstView" + i}),
+ `Failed to query view with pipeline ${tojson(pipe)}`);
});
// Set the feature compatibility version back to the latest version.
@@ -217,32 +191,21 @@ function testViewDefinitionFCVBehavior(lastVersion, testCases, featureFlags = []
testDB = conn.getDB(testName);
testCases.forEach(function(pipe, i) {
- // In this case, using $$USER_ROLES on the last FCV version will cause the view
- // creation to fail during parsing because the necessary feature flag will not have been
- // enabled due to the older FCV.
- // TODO SERVER-70689: Remove the guard of this if-statement and keep the body.
- if (testCaseDoesNotReferenceUserRoles(pipe)) {
- // Even though the feature compatibility version is the last version, we should still be
- // able to create a view using new query features, because
- // internalValidateFeaturesAsPrimary is false.
- assert.commandWorked(
- testDB.createView("thirdView" + i, "coll", pipe),
- `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
- ` ${
- binVersionToFCV(
- lastVersion)} with internalValidateFeaturesAsPrimary=false`);
-
- // We should also be able to modify a view to use new query features.
- assert(testDB["thirdView" + i].drop(),
- `Drop of view with pipeline ${tojson(pipe)} failed`);
- assert.commandWorked(testDB.createView("thirdView" + i, "coll", []));
- assert.commandWorked(
- testDB.runCommand({collMod: "thirdView" + i, viewOn: "coll", pipeline: pipe}),
- `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` +
- ` ${
- binVersionToFCV(
- lastVersion)} with internalValidateFeaturesAsPrimary=false`);
- }
+ // Even though the feature compatibility version is the last version, we should still be
+ // able to create a view using new query features, because
+ // internalValidateFeaturesAsPrimary is false.
+ assert.commandWorked(
+ testDB.createView("thirdView" + i, "coll", pipe),
+ `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` +
+ ` ${binVersionToFCV(lastVersion)} with internalValidateFeaturesAsPrimary=false`);
+
+ // We should also be able to modify a view to use new query features.
+ assert(testDB["thirdView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`);
+ assert.commandWorked(testDB.createView("thirdView" + i, "coll", []));
+ assert.commandWorked(
+ testDB.runCommand({collMod: "thirdView" + i, viewOn: "coll", pipeline: pipe}),
+ `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` +
+ ` ${binVersionToFCV(lastVersion)} with internalValidateFeaturesAsPrimary=false`);
});
MongoRunner.stopMongod(conn);
diff --git a/jstests/multiVersion/libs/data_generators.js b/jstests/multiVersion/libs/data_generators.js
index a46fbe8439a4f..d1d4e8e2bb8a2 100644
--- a/jstests/multiVersion/libs/data_generators.js
+++ b/jstests/multiVersion/libs/data_generators.js
@@ -300,7 +300,7 @@ function DataGenerator() {
}
// Data we are using as a source for our testing
- testData = [
+ let testData = [
GenFlatObjectAllTypesHardCoded(),
GenFlatObjectAllTypes(0),
GenFlatObjectAllTypes(2),
@@ -398,7 +398,7 @@ function IndexDataGenerator(options) {
// Find the character (index into keyChars) that we currently have at this position, set
// this position to the next character in the keyChars sequence
- keyCharsIndex = keyChars.search(currentKey[currentKeyIndex]);
+ var keyCharsIndex = keyChars.search(currentKey[currentKeyIndex]);
currentKey = setCharAt(
currentKey, currentKeyIndex, keyChars[(keyCharsIndex + 1) % keyChars.length]);
currentKeyIndex = currentKeyIndex + 1;
@@ -519,7 +519,7 @@ function IndexDataGenerator(options) {
return GenIndexOptions(seed);
}
- testIndexes = [
+ let testIndexes = [
// Single Field Indexes
{"spec": GenSingleFieldIndex(1), "options": GenIndexOptions(0)},
{"spec": GenSingleFieldIndex(0), "options": GenIndexOptions(1)},
diff --git a/jstests/multiVersion/libs/initial_sync.js b/jstests/multiVersion/libs/initial_sync.js
index 329602f0c4b34..a1d3fc63c6f61 100644
--- a/jstests/multiVersion/libs/initial_sync.js
+++ b/jstests/multiVersion/libs/initial_sync.js
@@ -1,8 +1,8 @@
'use strict';
-load("./jstests/multiVersion/libs/multi_rs.js");
-load("./jstests/replsets/rslib.js");
+load("jstests/multiVersion/libs/multi_rs.js");
+load("jstests/replsets/rslib.js");
/**
* Test that starts up a replica set with 2 nodes of version 'replSetVersion', inserts some data,
@@ -47,4 +47,4 @@ var multversionInitialSyncTest = function(
rst.awaitSecondaryNodes();
rst.stopSet();
-};
\ No newline at end of file
+};
diff --git a/jstests/multiVersion/libs/multiversion_rollback.js b/jstests/multiVersion/libs/multiversion_rollback.js
index 37a7abadbfb38..d8e0419c40420 100644
--- a/jstests/multiVersion/libs/multiversion_rollback.js
+++ b/jstests/multiVersion/libs/multiversion_rollback.js
@@ -11,7 +11,6 @@ load("jstests/replsets/libs/rollback_test.js");
load("jstests/libs/collection_drop_recreate.js");
load('jstests/libs/parallel_shell_helpers.js');
load("jstests/libs/fail_point_util.js");
-load("jstests/libs/feature_flag_util.js");
function printFCVDoc(nodeAdminDB, logMessage) {
const fcvDoc = nodeAdminDB.system.version.findOne({_id: 'featureCompatibilityVersion'});
@@ -164,11 +163,7 @@ function testMultiversionRollbackLatestFromDowngrading(testName, upgradeImmediat
printFCVDoc(newPrimaryAdminDB, "New primary's FCV after rolling back: ");
checkFCV(newPrimaryAdminDB, lastLTSFCV, lastLTSFCV);
- if (upgradeImmediately &&
- FeatureFlagUtil.isEnabled(newPrimaryAdminDB,
- "DowngradingToUpgrading",
- null /* user not specified */,
- true /* ignores FCV */)) {
+ if (upgradeImmediately) {
// We can upgrade immediately.
assert.commandWorked(newPrimary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
diff --git a/jstests/multiVersion/libs/upgrade_downgrade_cluster_shared.js b/jstests/multiVersion/libs/upgrade_downgrade_cluster_shared.js
new file mode 100644
index 0000000000000..f2b98b37c1bc6
--- /dev/null
+++ b/jstests/multiVersion/libs/upgrade_downgrade_cluster_shared.js
@@ -0,0 +1,66 @@
+var testCRUDAndAgg = function(db) {
+ assert.commandWorked(db.foo.insert({x: 1}));
+ assert.commandWorked(db.foo.insert({x: -1}));
+ assert.commandWorked(db.foo.update({x: 1}, {$set: {y: 1}}));
+ assert.commandWorked(db.foo.update({x: -1}, {$set: {y: 1}}));
+ var doc1 = db.foo.findOne({x: 1});
+ assert.eq(1, doc1.y);
+ var doc2 = db.foo.findOne({x: -1});
+ assert.eq(1, doc2.y);
+
+ assert.commandWorked(db.foo.remove({x: 1}, true));
+ assert.commandWorked(db.foo.remove({x: -1}, true));
+ assert.eq(null, db.foo.findOne());
+};
+
+var testDDLOps = function(st) {
+ var shard0Name = st.shard0.shardName;
+ var shard1Name = st.shard1.shardName;
+ var db = st.s.getDB("sharded");
+ var configDB = st.s.getDB("config");
+ assert.commandWorked(db.foo.insert({x: 1}));
+
+ // moveChunk
+ var shard0NumChunks = configDB.chunks.find({shard: shard0Name}).toArray().length;
+ var shard1NumChunks = configDB.chunks.find({shard: shard1Name}).toArray().length;
+
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: "sharded.foo", find: {x: 1}, to: shard0Name}));
+
+ var newShard0NumChunks = configDB.chunks.find({shard: shard0Name}).toArray().length;
+ var newShard1NumChunks = configDB.chunks.find({shard: shard1Name}).toArray().length;
+ assert.eq(newShard0NumChunks, shard0NumChunks + 1);
+ assert.eq(newShard1NumChunks, shard1NumChunks - 1);
+
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: "sharded.foo", find: {x: 1}, to: shard1Name}));
+
+ // shardCollection
+ assert.eq(null, configDB.collections.findOne({_id: "sharded.apple"}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: "sharded.apple", key: {_id: 1}}));
+ assert.eq(1, configDB.collections.find({_id: "sharded.apple"}).toArray().length);
+
+ // renameCollection
+ assert.commandWorked(st.s.adminCommand(
+ {renameCollection: "sharded.apple", to: "sharded.pear", dropTarget: true}));
+ assert.eq(null, configDB.collections.findOne({_id: "sharded.apple"}));
+ assert.eq(1, configDB.collections.find({_id: "sharded.pear"}).toArray().length);
+
+ // drop a collection
+ assert.commandWorked(db.runCommand({drop: "pear"}));
+ assert.eq(null, configDB.collections.findOne({_id: "sharded.pear"}));
+
+ // movePrimary
+ assert(configDB.databases.findOne({_id: "sharded", primary: shard0Name}));
+
+ assert.commandWorked(st.s.adminCommand({movePrimary: "sharded", to: shard1Name}));
+ assert.eq(null, configDB.databases.findOne({_id: "sharded", primary: shard0Name}));
+ assert(configDB.databases.findOne({_id: "sharded", primary: shard1Name}));
+
+ assert.commandWorked(st.s.adminCommand({movePrimary: "sharded", to: shard0Name}));
+ assert.eq(null, configDB.databases.findOne({_id: "sharded", primary: shard1Name}));
+ assert(configDB.databases.findOne({_id: "sharded", primary: shard0Name}));
+
+ assert.commandWorked(db.foo.remove({x: 1}, true));
+ assert.eq(null, db.foo.findOne());
+};
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/analyze_shard_key_ttl_indexes_setFCV.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/analyze_shard_key_ttl_indexes_setFCV.js
deleted file mode 100644
index 99b465d3bbfe7..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/analyze_shard_key_ttl_indexes_setFCV.js
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Tests that version upgrade creates the TTL indexes for config.sampledQueries and
- * config.sampledQueriesDiff.
- *
- * @tags: [requires_fcv_70]
- */
-
-(function() {
-"use strict";
-
-load('./jstests/multiVersion/libs/multi_rs.js');
-load('./jstests/multiVersion/libs/multi_cluster.js');
-load("./jstests/sharding/analyze_shard_key/libs/query_sampling_util.js");
-
-/**
- * Verifies that a proper TTL index exists for the query sample collection
- */
-function assertTTLIndexExists(node, collName, indexName) {
- const configDB = node.getDB("config");
- let foundIndexSpec = undefined;
- assert.soon(() => {
- const indexSpecs =
- assert.commandWorked(configDB.runCommand({"listIndexes": collName})).cursor.firstBatch;
- for (var i = 0; i < indexSpecs.length; ++i) {
- if (indexSpecs[i].name == indexName) {
- foundIndexSpec = indexSpecs[i];
- return true;
- }
- }
- return false;
- });
- assert.eq(foundIndexSpec.key, {"expireAt": 1});
- assert.eq(foundIndexSpec.expireAfterSeconds, 0);
-}
-
-function assertTTLIndexesExist(node) {
- assertTTLIndexExists(node, "sampledQueries", "SampledQueriesTTLIndex");
- assertTTLIndexExists(node, "sampledQueriesDiff", "SampledQueriesDiffTTLIndex");
- assertTTLIndexExists(node, "analyzeShardKeySplitPoints", "AnalyzeShardKeySplitPointsTTLIndex");
-}
-
-for (let oldVersion of ["last-lts", "last-continuous"]) {
- jsTest.log("Start testing with version " + oldVersion);
- var st = new ShardingTest({
- shards: 1,
- rs: {nodes: 2},
- mongos: 1,
- other: {
- mongosOptions: {binVersion: oldVersion},
- configOptions: {binVersion: oldVersion},
- shardOptions: {binVersion: oldVersion},
- rsOptions: {binVersion: oldVersion}
- }
- });
- st.configRS.awaitReplication();
-
- //////// Upgrade to latest
-
- // Upgrade the config servers
- jsTest.log('upgrading config servers');
- st.upgradeCluster("latest", {upgradeMongos: false, upgradeShards: false});
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
-
- // Upgrade the shards
- jsTest.log('upgrading shard servers');
- st.upgradeCluster("latest", {upgradeMongos: false, upgradeConfigs: false});
- awaitRSClientHosts(st.s, st.rs0.getPrimary(), {ok: true, ismaster: true});
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
-
- assertTTLIndexesExist(st.rs0.getPrimary());
-
- // Upgrade mongos
- jsTest.log('upgrading mongos servers');
- st.upgradeCluster("latest", {upgradeConfigs: false, upgradeShards: false});
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
-
- assertTTLIndexesExist(st.rs0.getPrimary());
-
- // Check that version document is unmodified.
- version = st.s.getCollection('config.version').findOne();
- var clusterID = version.clusterId;
- assert.eq(clusterID, version.clusterId);
-
- //////// Downgrade back
-
- jsTest.log('downgrading mongos servers');
- st.downgradeCluster(oldVersion, {downgradeConfigs: false, downgradeShards: false});
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
-
- assertTTLIndexesExist(st.rs0.getPrimary());
-
- jsTest.log('downgrading shard servers');
- st.downgradeCluster(oldVersion, {downgradeMongos: false, downgradeConfigs: false});
- awaitRSClientHosts(st.s, st.rs0.getPrimary(), {ok: true, ismaster: true});
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
-
- for (let conn of [st.rs0.getPrimary(), st.rs0.getSecondary()]) {
- assertTTLIndexesExist(conn);
- }
-
- jsTest.log('downgrading config servers');
- st.downgradeCluster(oldVersion, {downgradeMongos: false, downgradeShards: false});
- // Restart mongos to clear all cache and force it to do remote calls.
- st.restartMongoses();
-
- // Check that version document is unmodified.
- version = st.s.getCollection('config.version').findOne();
- assert.eq(clusterID, version.clusterId);
-
- jsTest.log("End testing with version " + oldVersion);
- st.stop();
-}
-})();
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/cannot_downgrade_config_server_with_change_streams_images_collection_option.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/cannot_downgrade_config_server_with_change_streams_images_collection_option.js
deleted file mode 100644
index d3bd581397430..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/cannot_downgrade_config_server_with_change_streams_images_collection_option.js
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Verifies a config server cannot downgrade with a collection with changeStreamPreAndPostImages
- * enabled.
- *
- * @tags: [requires_fcv_70, featureFlagCatalogShard, featureFlagTransitionToCatalogShard]
- */
-(function() {
-"use strict";
-
-const st = new ShardingTest({config: 1, shards: 1});
-
-// A collection on a shard with changeStreamPreAndPostImages shouldn't impact downgrade.
-const validShardNS = "foo.bar";
-assert.commandWorked(st.s.getCollection(validShardNS).insert({x: 1}));
-assert.commandWorked(
- st.s.getDB("foo").runCommand({collMod: "bar", changeStreamPreAndPostImages: {enabled: true}}));
-
-// A collection on the config server with changeStreamPreAndPostImages should prevent downgrade. The
-// config server can only downgrade when in dedicated mode and in this mode the only user
-// accessible collections on it are in the config and admin databases, which never allow this
-// option, so we have to create a collection on a separate db via direct connection.
-const directConfigNS = "directDB.onConfig";
-assert.commandWorked(st.configRS.getPrimary().getCollection(directConfigNS).insert({x: 1}));
-assert.commandWorked(st.configRS.getPrimary().getDB("directDB").runCommand({
- collMod: "onConfig",
- changeStreamPreAndPostImages: {enabled: true}
-}));
-
-assert.commandFailedWithCode(st.s.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}),
- ErrorCodes.CannotDowngrade);
-
-// Unset the option on the config server collection and now the config server can downgrade.
-assert.commandWorked(st.configRS.getPrimary().getDB("directDB").runCommand({
- collMod: "onConfig",
- changeStreamPreAndPostImages: {enabled: false}
-}));
-
-assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
-
-st.stop();
-})();
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/compound_wildcard_indexes_downgrade.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/compound_wildcard_indexes_downgrade.js
deleted file mode 100644
index 807c0de042868..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/compound_wildcard_indexes_downgrade.js
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Tests that we will fail on startup if the compound wildcard indexes were not removed before we
- * downgrade from 7.0 to 'last-lts'. Downgrading FCV will allow continued use of a CWI as long as
- * the version of mongod is still 7.0, but will disallow any new creation of a CWI.
- *
- * @tags: [
- * featureFlagCompoundWildcardIndexes,
- * requires_fcv_70,
- * ]
- */
-
-(function() {
-'use strict';
-
-load("jstests/libs/analyze_plan.js"); // For getPlanStages.
-
-const dbpath = MongoRunner.dataPath + 'compound_wildcard_indexes_downgrade';
-resetDbpath(dbpath);
-
-// If we have a CWI on the admin database, we want to make sure we can startup properly despite FCV
-// not being initialized yet. It's possible to hit an invariant if featureFlag.isEnabled is called
-// without checking fcv.isVersionInitialized.
-const dbName = 'admin';
-const dbNameTest = "compound_wildcard_indexes_downgrade";
-const collName = 'compound_wildcard_indexes_downgrade';
-
-const latestVersion = "latest";
-const lastLTSVersion = "last-lts";
-
-const keyPattern = {
- "a.$**": 1,
- b: 1
-};
-
-// Startup with latest, create a compound wildcard index, stop mongod.
-{
- const conn =
- MongoRunner.runMongod({dbpath: dbpath, binVersion: latestVersion, noCleanData: true});
- const db = conn.getDB(dbName);
- const coll = db[collName];
-
- assert.commandWorked(coll.createIndex(keyPattern));
-
- assert.commandWorked(coll.insert({a: {c: 1}, b: 1}));
- assert.commandWorked(coll.insert({a: 30, b: 20}));
-
- MongoRunner.stopMongod(conn);
-}
-
-// Test that we are able to restart a mongod if there exists any CWI on the 'admin' DB and the FCV
-// may not be initialized.
-{
- const conn =
- MongoRunner.runMongod({dbpath: dbpath, binVersion: latestVersion, noCleanData: true});
- const db = conn.getDB(dbName);
- const coll = db[collName];
-
- // Drop the CWI for downgrading.
- assert.commandWorked(coll.dropIndex(keyPattern));
-
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: '6.0'}));
- MongoRunner.stopMongod(conn);
-}
-
-// A normal downgrade process should drop all CWI. Now there's no CWI, we should be able to start a
-// last-lts mongod.
-{
- const conn =
- MongoRunner.runMongod({dbpath: dbpath, binVersion: lastLTSVersion, noCleanData: true});
-
- MongoRunner.stopMongod(conn);
-}
-
-// Tests on a regular database. Test that 1) FCV can be downgraded with the existence of CWI, 2)
-// continued use of CWI after FCV downgraded, 3) cannot create more CWI, and 4) a downgraded mongod
-// fails to start up if CWI is not removed.
-{
- let conn =
- MongoRunner.runMongod({dbpath: dbpath, binVersion: latestVersion, noCleanData: true});
- let db = conn.getDB(dbNameTest);
- let coll = db[collName];
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: '7.0'}));
-
- assert.commandWorked(coll.createIndex(keyPattern));
-
- // Test that it succeeds to downgrade the FCV with the existence of CWI, but it should fail to
- // start a mongod with the existence of a CWI.
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: '6.0'}));
-
- // Test that the CWI can still be used after FCV downgraded.
- const exp = coll.find({"a.c": 1}).explain();
- const winningPlan = getWinningPlan(exp.queryPlanner);
- const ixScans = getPlanStages(winningPlan, "IXSCAN");
- assert.gt(ixScans.length, 0, exp);
- assert.docEq(ixScans[0].indexName, "a.$**_1_b_1", ixScans);
-
- // We cannot create more CWI if FCV is below 7.0.
- assert.commandFailedWithCode(coll.createIndex({"b.$**": 1, c: 1}),
- ErrorCodes.CannotCreateIndex);
-
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: '7.0'}));
-
- // We can create more CWI if FCV is 7.0.
- assert.commandWorked(coll.createIndex({"b.$**": 1, c: 1}));
-
- MongoRunner.stopMongod(conn);
-
- // To successfully downgrade a mongod, user must drop all CWI first.
- assert.throws(() => MongoRunner.runMongod(
- {dbpath: dbpath, binVersion: lastLTSVersion, noCleanData: true}),
- [],
- "MongoD should fail because wildcard indexes do not allow compounding");
-
- // Start a "latest" mongod and drop all indexes to successfully downgrade the mongod.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latestVersion, noCleanData: true});
- db = conn.getDB(dbNameTest);
- coll = db[collName];
- coll.dropIndexes();
-
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: '6.0'}));
-
- MongoRunner.stopMongod(conn);
-
- // We can downgrade now as all indexes have been removed.
- conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: lastLTSVersion, noCleanData: true});
-
- MongoRunner.stopMongod(conn);
-}
-})();
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/dbCheck_snapshotRead.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/dbCheck_snapshotRead.js
deleted file mode 100644
index 729289bd9a5db..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/dbCheck_snapshotRead.js
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Ensure that a 6.0 version replicating a dbCheck oplog entry with the removed snapshotRead:false
- * option does not crash when a 'latest' version receives the entry.
- *
- * @tags: [
- * requires_replication,
- * ]
- */
-(function() {
-"use strict";
-
-load('jstests/multiVersion/libs/multi_rs.js');
-
-const nodes = {
- // We want the 6.0 node to be the primary.
- n1: {binVersion: "6.0", rsConfig: {priority: 1}},
- n2: {binVersion: "latest", rsConfig: {priority: 0}},
-};
-
-const rst = new ReplSetTest({nodes: nodes});
-rst.startSet();
-rst.initiate();
-
-const dbName = "test";
-const collName = jsTestName();
-
-const primary = rst.getPrimary();
-const primaryDB = primary.getDB(dbName);
-const coll = primaryDB[collName];
-
-assert.commandWorked(coll.insert({a: 1}));
-
-// The 6.0 node will replicate the dbCheck oplog entry with the 'snapshotRead:false' option. This is
-// not supported in recent versions and should be ignored, but not cause the node to crash.
-assert.commandWorked(primaryDB.runCommand({"dbCheck": 1, snapshotRead: false}));
-
-rst.awaitReplication();
-
-function dbCheckCompleted(db) {
- return db.currentOp().inprog.filter(x => x["desc"] == "dbCheck")[0] === undefined;
-}
-
-function forEachNode(f) {
- f(rst.getPrimary());
- f(rst.getSecondary());
-}
-
-function awaitDbCheckCompletion(db) {
- assert.soon(() => dbCheckCompleted(db), "dbCheck timed out");
- rst.awaitSecondaryNodes();
- rst.awaitReplication();
-
- forEachNode(function(node) {
- const healthlog = node.getDB('local').system.healthlog;
- assert.soon(function() {
- return (healthlog.find({"operation": "dbCheckStop"}).itcount() == 1);
- }, "dbCheck command didn't complete");
- });
-}
-
-awaitDbCheckCompletion(primaryDB);
-
-{
- // The 6.0 primary should not report any errors.
- const healthlog = primary.getDB('local').system.healthlog;
- assert.eq(0, healthlog.find({severity: "error"}).itcount());
- assert.eq(0, healthlog.find({severity: "warning"}).itcount());
-}
-
-{
- // The latest secondary should log an error in the health log.
- const secondary = rst.getSecondary();
- const healthlog = secondary.getDB('local').system.healthlog;
- assert.eq(1, healthlog.find({severity: "error"}).itcount());
- assert.eq(0, healthlog.find({severity: "warning"}).itcount());
- const errorEntry = healthlog.findOne({severity: "error"});
- assert(errorEntry.hasOwnProperty('data'), tojson(errorEntry));
- assert.eq(false, errorEntry.data.success, tojson(errorEntry));
- assert(errorEntry.data.error.startsWith("Location6769502"), tojson(errorEntry));
-}
-
-rst.stopSet();
-})();
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/fle2_range_downgrade.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/fle2_range_downgrade.js
deleted file mode 100644
index c8928789d2d8b..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/fle2_range_downgrade.js
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Tests that the cluster cannot be downgraded when range encrypted fields present
- *
- * @tags: [
- * requires_fcv_61
- * ]
- */
-
-load("jstests/fle2/libs/encrypted_client_util.js");
-
-(function() {
-"use strict";
-
-const rst = new ReplSetTest({nodes: 1});
-rst.startSet();
-rst.initiate();
-rst.awaitReplication();
-
-let conn = rst.getPrimary();
-let db = conn.getDB("admin");
-
-function runTest(targetFCV) {
- assert.commandWorked(db.createCollection("basic", {
- encryptedFields: {
- "fields": [
- {
- "path": "first",
- "keyId": UUID("11d58b8a-0c6c-4d69-a0bd-70c6d9befae9"),
- "bsonType": "int",
- "queries": {"queryType": "rangePreview", "min": 1, "max": 2, "sparsity": 1}
- },
- ]
- }
- }));
-
- let res = assert.commandFailedWithCode(
- db.adminCommand({setFeatureCompatibilityVersion: targetFCV}), ErrorCodes.CannotDowngrade);
-
- assert(db.basic.drop());
-
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: targetFCV}));
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-}
-
-runTest(lastLTSFCV);
-runTest(lastContinuousFCV);
-
-rst.stopSet();
-})();
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/invalid_index_options.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/invalid_index_options.js
deleted file mode 100644
index 8abc86d65ac27..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/invalid_index_options.js
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Tests that in 6.1 version listIndexes can parse invalid index specs created before 5.0 version.
- *
- * @tags: [requires_replication]
- */
-(function() {
-"use strict";
-
-load('jstests/multiVersion/libs/multi_rs.js');
-
-var nodes = {
- n1: {binVersion: "4.4"},
- n2: {binVersion: "4.4"},
-};
-
-var rst = new ReplSetTest({nodes: nodes});
-rst.startSet();
-rst.initiate();
-
-const dbName = "test";
-const collName = jsTestName();
-
-let primaryDB = rst.getPrimary().getDB(dbName);
-let primaryColl = primaryDB.getCollection(collName);
-
-// In earlier versions, users were able to add invalid index options when creating an index. The
-// option could still be interpreted accordingly.
-assert.commandWorked(primaryColl.createIndex({x: 1}, {sparse: "yes"}));
-
-// Upgrades from 4.4 to 5.0.
-jsTestLog("Upgrading to version 5.0");
-rst.upgradeSet({binVersion: "5.0"});
-assert.commandWorked(rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: "5.0"}));
-
-// Upgrades from 5.0 to 6.0.
-jsTestLog("Upgrading to version last-lts");
-rst.upgradeSet({binVersion: "last-lts"});
-assert.commandWorked(rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
-
-// Upgrades from 6.0 to latest.
-jsTestLog("Upgrading to version latest");
-rst.upgradeSet({binVersion: "latest"});
-const primary = rst.getPrimary();
-assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
-primaryDB = primary.getDB(dbName);
-
-// Verify listIndexes command can correctly output the repaired index specs.
-assert.commandWorked(primaryDB.runCommand({listIndexes: collName}));
-
-// Add a new node to make sure the initial sync works correctly with the invalid index specs.
-jsTestLog("Bringing up a new node");
-rst.add();
-rst.reInitiate();
-
-jsTestLog("Waiting for new node to be synced.");
-rst.awaitReplication();
-rst.awaitSecondaryNodes();
-
-const [secondary1, secondary2] = rst.getSecondaries();
-const secondaryDB1 = secondary1.getDB(dbName);
-const secondaryDB2 = secondary2.getDB(dbName);
-
-// Verify that the existing nodes detect invalid index options, but the new node has the repaired
-// index spec.
-let validateRes = assert.commandWorked(primaryDB.runCommand({validate: collName}));
-assert(!validateRes.valid, "validate should fail: " + tojson(validateRes));
-
-validateRes = assert.commandWorked(secondaryDB1.runCommand({validate: collName}));
-assert(!validateRes.valid, "validate should fail: " + tojson(validateRes));
-
-validateRes = assert.commandWorked(secondaryDB2.runCommand({validate: collName}));
-assert(validateRes.valid, "validate should succeed: " + tojson(validateRes));
-
-// Use collMod to fix the invalid index options in the collection.
-assert.commandWorked(primaryDB.runCommand({collMod: collName}));
-
-// Fix the invalid fields from index spec.
-checkLog.containsJson(primary, 6444400, {fieldName: "sparse"});
-checkLog.containsJson(secondary1, 6444400, {fieldName: "sparse"});
-
-// Verify that the index no longer has invalid index options.
-assert.commandWorked(primaryDB.runCommand({listIndexes: collName}));
-
-validateRes = assert.commandWorked(primaryDB.runCommand({validate: collName}));
-assert(validateRes.valid, "validate should succeed: " + tojson(validateRes));
-
-validateRes = assert.commandWorked(secondaryDB1.runCommand({validate: collName}));
-assert(validateRes.valid, "validate should succeed: " + tojson(validateRes));
-
-validateRes = assert.commandWorked(secondaryDB2.runCommand({validate: collName}));
-assert(validateRes.valid, "validate should succeed: " + tojson(validateRes));
-
-rst.stopSet();
-})();
\ No newline at end of file
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/partial_indexes_downgrade.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/partial_indexes_downgrade.js
deleted file mode 100644
index 95eb03bfeb514..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/partial_indexes_downgrade.js
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Tests that we don't fail the FCV check for partial indexes on the admin database during startup.
- */
-
-(function() {
-'use strict';
-
-const dbpath = MongoRunner.dataPath + 'partial_indexes_downgrade';
-resetDbpath(dbpath);
-
-// If we have a partial index on the admin database, we want to make sure we can startup properly
-// despite FCV not being initialized yet. It's possible to hit an invariant if featureFlag.isEnabled
-// is called without checking fcv.isVersionInitialized (see SERVER-71068 for more details).
-const dbName = 'admin';
-const collName = 'partial_indexes_downgrade';
-
-// Startup with latest, create partial index, stop mongod.
-{
- const conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: 'latest', noCleanData: true});
- const db = conn.getDB(dbName);
- const coll = db[collName];
-
- assert.commandWorked(coll.createIndex(
- {a: 1, b: 1}, {partialFilterExpression: {$or: [{a: {$lt: 20}}, {b: {$lt: 10}}]}}));
-
- assert.commandWorked(coll.insert({a: 1, b: 1}));
- assert.commandWorked(coll.insert({a: 30, b: 20}));
-
- MongoRunner.stopMongod(conn);
-}
-
-// Startup with latest again, to make sure we're not checking FCV for this index at startup.
-{
- const conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: 'latest', noCleanData: true});
- const db = conn.getDB(dbName);
- const coll = db[collName];
-
- // Make sure we are on the same db path as before.
- assert.eq(coll.aggregate().toArray().length, 2);
-
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
- MongoRunner.stopMongod(conn);
-}
-})();
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/recordPreImages_option_upgrade.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/recordPreImages_option_upgrade.js
deleted file mode 100644
index 6f86dcb202c5a..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/recordPreImages_option_upgrade.js
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Verifies that the server ignores collection option "recordPreImages" on binary upgrade from the
- * last LTS version to the current, as well as removes the option from collection attributes on
- * FCV upgrade.
- */
-(function() {
-"use strict";
-load('jstests/multiVersion/libs/multi_rs.js');
-
-const lastLTSVersion = "last-lts";
-const latestVersion = "latest";
-
-// Setup a two-node replica set with last LTS binaries, so it is possible to create a collection
-// with "recordPreImages" option.
-const rst = new ReplSetTest(
- {name: jsTestName(), nodes: [{binVersion: lastLTSVersion}, {binVersion: lastLTSVersion}]});
-rst.startSet();
-rst.initiate();
-const testDB = rst.getPrimary().getDB("test");
-const primaryNode = rst.getPrimary();
-const secondaryNode = rst.getSecondary();
-
-// Create the collection.
-const collectionName = "coll";
-assert.commandWorked(testDB.createCollection(collectionName, {recordPreImages: true}));
-let coll = testDB[collectionName];
-
-// Insert a test document which will be updated to trigger recording of change stream pre-images.
-assert.commandWorked(coll.insert({_id: 1, a: 1}));
-assert.commandWorked(coll.updateOne({_id: 1}, {$inc: {a: 1}}));
-rst.awaitReplication();
-
-// Upgrade the binary of the secondary node to the current version to setup a mixed binary cluster.
-rst.upgradeMembers([secondaryNode], {binVersion: latestVersion});
-
-// Make sure the primary node did not change.
-rst.stepUp(primaryNode);
-
-// Verify that recording of change stream pre-images succeeds.
-assert.commandWorked(coll.updateOne({_id: 1}, {$inc: {a: 1}}));
-rst.awaitReplication();
-
-// Finally upgrade the binary of the primary node to the current version.
-rst.upgradePrimary(rst.getPrimary(), {binVersion: latestVersion});
-
-// Update a document on the collection with inactive "recordPreImages" collection option.
-coll = rst.getPrimary().getDB("test")[collectionName];
-assert.commandWorked(coll.updateOne({_id: 1}, {$inc: {a: 1}}));
-rst.awaitReplication();
-
-// Upgrade the FCV to the latest to trigger removal of "recordPreImages" collection option from
-// persistent catalog entries.
-assert.commandWorked(rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
-// To check the collection options, downgrade FCV to later replace the binary of the server with
-// the last LTS binary version.
-assert.commandWorked(rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
-rst.upgradeSet({binVersion: lastLTSVersion});
-
-// Verify that collection option "recordPreImages" was removed.
-const result =
- assert.commandWorked(rst.getPrimary().getDB("test").runCommand({listCollections: 1}));
-assert.eq(result.cursor.firstBatch[0].name, collectionName);
-assert.docEq(
- {},
- result.cursor.firstBatch[0].options,
- `Collection option "recordPreImages" was not removed. Got response: ${tojson(result)}`);
-rst.stopSet();
-})();
\ No newline at end of file
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_capped_collection.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_capped_collection.js
deleted file mode 100644
index 2a921cfaf1c1f..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_capped_collection.js
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Test to ensure that:
- * 1. The FCV cannot be downgraded to 6.0 if there are capped collections with a size
- * that is non multiple of 256 bytes.
- * 2. The FCV can be set back to upgraded if feature flag DowngradingToUpgrading is true.
- *
- * @tags: [requires_fcv_70]
- */
-(function() {
-"use strict";
-
-load('jstests/libs/collection_drop_recreate.js');
-load("jstests/libs/feature_flag_util.js");
-
-const latest = "latest";
-const dbName = "test_set_fcv_capped_collection";
-const collName = "capped_collection";
-const cappedCollOptions = {
- capped: true,
- size: 5242881,
- max: 5000,
-};
-
-function checkFCVDowngradeUpgrade(db, adminDB) {
- let runDowngradingToUpgrading = false;
- if (FeatureFlagUtil.isEnabled(adminDB, "DowngradingToUpgrading")) {
- runDowngradingToUpgrading = true;
- }
-
- jsTest.log("Create a relaxed size capped collection and attempt to setFCV to lastLTS");
- checkFCV(adminDB, latestFCV);
- assertCreateCollection(db, collName, cappedCollOptions);
- assert.commandFailedWithCode(adminDB.runCommand({setFeatureCompatibilityVersion: lastLTSFCV}),
- ErrorCodes.CannotDowngrade);
-
- // Check FCV is in downgrading state.
- checkFCV(adminDB, lastLTSFCV, lastLTSFCV);
-
- if (runDowngradingToUpgrading) {
- jsTest.log("Set FCV back to latest");
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, latestFCV);
-
- // Confirm the capped collection is not affected.
- const res = db[collName].stats();
- assert.eq(res.capped, cappedCollOptions.capped);
- assert.eq(res.maxSize, cappedCollOptions.size);
- }
-
- assertDropCollection(db, collName);
-}
-
-function runStandaloneTest() {
- jsTest.log("Start Standalone test");
- const conn = MongoRunner.runMongod({binVersion: latest});
- const db = conn.getDB(dbName);
- const adminDB = conn.getDB("admin");
-
- checkFCVDowngradeUpgrade(db, adminDB);
-
- MongoRunner.stopMongod(conn);
-}
-
-function runReplicaSetTest() {
- jsTest.log("Start Replica Set test");
- const rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}});
- rst.startSet();
- rst.initiate();
- const db = rst.getPrimary().getDB(dbName);
- const adminDB = rst.getPrimary().getDB("admin");
-
- checkFCVDowngradeUpgrade(db, adminDB);
-
- rst.stopSet();
-}
-
-function runShardingTest() {
- jsTest.log("Start Sharding test");
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
- const db = st.s.getDB(dbName);
- const adminDB = st.s.getDB("admin");
-
- checkFCVDowngradeUpgrade(db, adminDB);
-
- st.stop();
-}
-
-runStandaloneTest();
-runReplicaSetTest();
-runShardingTest();
-})();
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_encrypted_field_collection.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_encrypted_field_collection.js
deleted file mode 100644
index 125e6c7dbd7fe..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_encrypted_field_collection.js
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Test to ensure that:
- * 1. The FCV cannot be downgraded to 6.0 if there are queryable range encryption indexes.
- * 2. The FCV can be set back to upgraded if feature flag DowngradingToUpgrading is true.
- *
- * @tags: [requires_fcv_70]
- */
-(function() {
-"use strict";
-
-load('jstests/libs/collection_drop_recreate.js');
-load("jstests/libs/feature_flag_util.js");
-
-const latest = "latest";
-const dbName = "test_set_fcv_encrypted_field";
-const collName = "encrypted";
-const encryptedFieldsOption = {
- encryptedFields: {
- fields: [{
- path: "firstName",
- keyId: UUID("11d58b8a-0c6c-4d69-a0bd-70c6d9befae9"),
- bsonType: "int",
- queries: {queryType: "rangePreview", sparsity: 1, min: NumberInt(1), max: NumberInt(2)}
- }]
- }
-};
-
-function checkFCVDowngradeUpgrade(db, adminDB) {
- let runDowngradingToUpgrading = false;
- if (FeatureFlagUtil.isEnabled(adminDB, "DowngradingToUpgrading")) {
- runDowngradingToUpgrading = true;
- }
-
- jsTest.log("Create a encrypted field collection and attempt to setFCV to lastLTS");
- checkFCV(adminDB, latestFCV);
- assertCreateCollection(db, collName, encryptedFieldsOption);
- assert.commandFailedWithCode(adminDB.runCommand({setFeatureCompatibilityVersion: lastLTSFCV}),
- ErrorCodes.CannotDowngrade);
-
- // Check FCV is in downgrading state.
- checkFCV(adminDB, lastLTSFCV, lastLTSFCV);
-
- if (runDowngradingToUpgrading) {
- jsTest.log("Set FCV back to latest");
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, latestFCV);
-
- // Check encryptedField is unaffected.
- const res = db.getCollectionInfos({name: collName});
- assert.eq(res[0].options.encryptedFields.fields[0].queries.queryType,
- encryptedFieldsOption.encryptedFields.fields[0].queries.queryType);
- }
-
- assertDropCollection(db, collName);
-}
-
-function runReplicaSetTest() {
- jsTest.log("Start Replica Set test");
- const rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}});
- rst.startSet();
- rst.initiate();
- const db = rst.getPrimary().getDB(dbName);
- const adminDB = rst.getPrimary().getDB("admin");
-
- checkFCVDowngradeUpgrade(db, adminDB);
-
- rst.stopSet();
-}
-
-function runShardingTest() {
- jsTest.log("Start Sharding test");
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
- const db = st.s.getDB(dbName);
- const adminDB = st.s.getDB("admin");
-
- checkFCVDowngradeUpgrade(db, adminDB);
-
- st.stop();
-}
-
-runReplicaSetTest();
-runShardingTest();
-})();
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_partial_ttl_index_on_timeseries.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_partial_ttl_index_on_timeseries.js
deleted file mode 100644
index 1233db7478588..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_partial_ttl_index_on_timeseries.js
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Test to ensure that:
- * 1. The FCV cannot be downgraded to 6.0 if there are timeseries collections with
- * partial TTL index.
- * 2. The FCV can be set back to upgraded if feature flag DowngradingToUpgrading is true.
- *
- * @tags: [requires_fcv_70]
- */
-(function() {
-"use strict";
-
-load('jstests/libs/collection_drop_recreate.js');
-load("jstests/libs/feature_flag_util.js");
-
-const latest = "latest";
-const dbName = "test_set_fcv_partial_ttl_index";
-const collName = "timeseries";
-const timeFieldName = "tm";
-const metaFieldName = "mm";
-const timeseriesOptions = {
- timeseries: {
- timeField: timeFieldName,
- metaField: metaFieldName,
- }
-};
-const ttlIndexSpec = {
- [timeFieldName]: 1,
-};
-const ttlIndexOptions = {
- expireAfterSeconds: 3600,
- partialFilterExpression: {
- [metaFieldName]: {
- $gt: 5,
- }
- }
-};
-
-function checkFCVDowngradeUpgrade(db, adminDB) {
- let runDowngradingToUpgrading = false;
- if (FeatureFlagUtil.isEnabled(adminDB, "DowngradingToUpgrading")) {
- runDowngradingToUpgrading = true;
- }
-
- jsTest.log(
- "Create a partial TTL index on timeseries collection and attempt to setFCV to lastLTS");
- checkFCV(adminDB, latestFCV);
- assertCreateCollection(db, collName, timeseriesOptions);
- assert.commandWorked(db[collName].createIndex(ttlIndexSpec, ttlIndexOptions));
- assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
-
- // Check FCV is in downgrading state.
- checkFCV(adminDB, lastLTSFCV, lastLTSFCV);
-
- if (runDowngradingToUpgrading) {
- jsTest.log("Set FCV back to latest");
- assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(adminDB, latestFCV);
-
- // Confirm the partial TTL index is not affected.
- const res = db[collName].getIndexes();
- assert.eq(res[1].expireAfterSeconds, ttlIndexOptions.expireAfterSeconds);
- assert.eq(res[1].partialFilterExpression, ttlIndexOptions.partialFilterExpression);
- }
-
- assertDropCollection(db, collName);
-}
-
-function runStandaloneTest() {
- jsTest.log("Start Standalone test");
- const conn = MongoRunner.runMongod({binVersion: latest});
- const db = conn.getDB(dbName);
- const adminDB = conn.getDB("admin");
-
- checkFCVDowngradeUpgrade(db, adminDB);
-
- MongoRunner.stopMongod(conn);
-}
-
-function runReplicaSetTest() {
- jsTest.log("Start Replica Set test");
- const rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}});
- rst.startSet();
- rst.initiate();
- const db = rst.getPrimary().getDB(dbName);
- const adminDB = rst.getPrimary().getDB("admin");
-
- checkFCVDowngradeUpgrade(db, adminDB);
-
- rst.stopSet();
-}
-
-function runShardingTest() {
- jsTest.log("Start Sharding test");
- const st = new ShardingTest({shards: 2, mongos: 1, config: 1});
- const db = st.s.getDB(dbName);
- const adminDB = st.s.getDB("admin");
-
- checkFCVDowngradeUpgrade(db, adminDB);
-
- st.stop();
-}
-
-runStandaloneTest();
-runReplicaSetTest();
-runShardingTest();
-})();
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_prevent_invalid_downgrade_with_catalog_shard.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_prevent_invalid_downgrade_with_catalog_shard.js
deleted file mode 100644
index 10e08d3b98c27..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_prevent_invalid_downgrade_with_catalog_shard.js
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Test to ensure that:
- * 1. The FCV cannot be downgraded to a version that does not have catalog shards if catalog
- * shard is enabled.
- * 2. If the FCV does get downgraded to a version that does not support catalog shards, a
- * catalog shard cannot be created (this can occur if an FCV downgrade happens concurrently
- * with the creation of a catalog shard).
- *
- * @tags: [requires_fcv_70, featureFlagCatalogShard, featureFlagTransitionToCatalogShard]
- */
-(function() {
-"use strict";
-
-// TODO (SERVER-74534): Enable the metadata consistency check when it will work with co-located
-// configsvr.
-TestData.skipCheckMetadataConsistency = true;
-
-load("jstests/libs/catalog_shard_util.js");
-
-const shardedNs = "foo.bar";
-const unshardedNs = "unsharded_foo.unsharded_bar";
-
-function basicCRUD(conn, ns) {
- assert.commandWorked(
- conn.getCollection(ns).insert([{_id: 1, x: 1, skey: -1000}, {_id: 2, skey: 1000}]));
- assert.sameMembers(conn.getCollection(ns).find().toArray(),
- [{_id: 1, x: 1, skey: -1000}, {_id: 2, skey: 1000}]);
- assert.commandWorked(conn.getCollection(ns).remove({x: 1}));
- assert.commandWorked(conn.getCollection(ns).remove({skey: 1000}));
- assert.eq(conn.getCollection(ns).find().toArray().length, 0);
-}
-
-let splitPoint = 0;
-function basicShardedDDL(conn, ns) {
- assert.commandWorked(conn.adminCommand({split: ns, middle: {skey: splitPoint}}));
- splitPoint += 10;
-}
-
-const st = new ShardingTest({shards: 2, catalogShard: true, other: {enableBalancer: true}});
-const mongosAdminDB = st.s.getDB("admin");
-
-assert.commandWorked(st.s.adminCommand({shardCollection: shardedNs, key: {skey: 1}}));
-
-function runTest(targetFCV) {
- jsTest.log("Downgrading FCV to an unsupported version when catalogShard is enabled.");
-
- const errRes = assert.commandFailedWithCode(
- mongosAdminDB.runCommand({setFeatureCompatibilityVersion: targetFCV}),
- ErrorCodes.CannotDowngrade);
- assert.eq(errRes.errmsg,
- `Cannot downgrade featureCompatibilityVersion to ${targetFCV} with a catalog shard as it is not supported in earlier versions. Please transition the config server to dedicated mode using the transitionToDedicatedConfigServer command.`);
-
- // The downgrade fails and should not start the downgrade process on any cluster node.
- const configRes =
- st.config0.getDB("admin").runCommand({getParameter: 1, featureCompatibilityVersion: 1});
- assert(configRes.featureCompatibilityVersion);
- assert.eq(configRes.featureCompatibilityVersion.version, latestFCV);
-
- const shardRes =
- st.shard1.getDB("admin").runCommand({getParameter: 1, featureCompatibilityVersion: 1});
- assert(shardRes.featureCompatibilityVersion);
- assert.eq(shardRes.featureCompatibilityVersion.version, latestFCV);
-
- // The catalog shard's data can still be accessed.
- basicCRUD(st.s, shardedNs);
- basicShardedDDL(st.s, shardedNs);
- basicCRUD(st.s, unshardedNs);
-
- // Remove the catalog shard and verify we can now downgrade.
- CatalogShardUtil.transitionToDedicatedConfigServer(st);
- assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: targetFCV}));
-
- jsTest.log("Attempting to create a catalogShard on an unsupported FCV.");
-
- assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: targetFCV}));
- assert.commandFailedWithCode(mongosAdminDB.runCommand({transitionToCatalogShard: 1}), 7467202);
-
- // Upgrade and transition back to catalog shard mode for the next test.
- assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV}));
- assert.commandWorked(mongosAdminDB.runCommand({transitionToCatalogShard: 1}));
-
- basicCRUD(st.s, shardedNs);
- basicShardedDDL(st.s, shardedNs);
- basicCRUD(st.s, unshardedNs);
-}
-
-runTest(lastLTSFCV);
-runTest(lastContinuousFCV);
-
-st.stop();
-})();
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/timeseries_index.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/timeseries_index.js
deleted file mode 100644
index a6bca60df1a38..0000000000000
--- a/jstests/multiVersion/targetedTestsLastLtsFeatures/timeseries_index.js
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Tests that time-series measurement indexes can be created in FCV 6.0.
- */
-(function() {
-"use strict";
-
-const rst = new ReplSetTest({nodes: 1});
-rst.startSet();
-rst.initiate();
-
-const primary = rst.getPrimary();
-
-const dbName = "test";
-const collName = "coll";
-
-const db = primary.getDB(dbName);
-
-assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
-assert.commandWorked(db.createCollection(collName, {timeseries: {timeField: "t", metaField: "m"}}));
-assert.commandWorked(db.coll.insert({t: ISODate(), m: 1}));
-assert.commandWorked(db.coll.createIndex({a: 1, t: 1}));
-
-rst.stopSet();
-}());
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/timeseries_out_error.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/timeseries_out_error.js
new file mode 100644
index 0000000000000..62a7a6a16ac17
--- /dev/null
+++ b/jstests/multiVersion/targetedTestsLastLtsFeatures/timeseries_out_error.js
@@ -0,0 +1,86 @@
+/**
+ * Tests that $out errors when trying to write to time-series collections on older server versions.
+ * $out with the 'timeseries' option should only succeed if the FCV >= 7.1.
+ */
+
+(function() {
+"use strict";
+
+load('./jstests/multiVersion/libs/multi_cluster.js'); // for upgradeCluster.
+
+const st = new ShardingTest({
+ shards: 2,
+ rs: {nodes: 2},
+ mongos: 1,
+ other: {
+ mongosOptions: {binVersion: "last-lts"},
+ configOptions: {binVersion: "last-lts"},
+ shardOptions: {binVersion: "last-lts"},
+ rsOptions: {binVersion: "last-lts"}
+ }
+});
+st.configRS.awaitReplication();
+
+const dbName = "test";
+const testDB = st.s.getDB(dbName);
+let coll = testDB["coll"];
+let tColl = testDB["timeseries"];
+coll.drop();
+tColl.drop();
+
+// set up a source collection and a time-series target collection.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
+assert.commandWorked(coll.insert({t: ISODate(), m: 1}));
+assert.commandWorked(testDB.createCollection(tColl.getName(), {timeseries: {timeField: "t"}}));
+assert.commandWorked(tColl.insert({t: ISODate(), m: 1}));
+
+// assert aggregate succeeds with no 'timeseries' option.
+let pipeline = [{$out: "out"}];
+assert.doesNotThrow(() => coll.aggregate(pipeline));
+assert.eq(1, testDB["out"].find().itcount());
+
+// assert aggregate fails with the original error with the 'timeseries' option.
+pipeline = [{$out: {coll: "out_time", db: dbName, timeseries: {timeField: "t"}}}];
+assert.throwsWithCode(() => coll.aggregate(pipeline), 16994);
+
+// assert aggregate fails if trying to write to a time-series collection without the 'timeseries'
+// option.
+let replacePipeline = [{$out: tColl.getName()}];
+assert.throwsWithCode(() => coll.aggregate(replacePipeline), ErrorCodes.InvalidOptions);
+
+// upgrade the shards.
+jsTestLog('upgrading the shards.');
+st.upgradeCluster("latest", {upgradeMongos: false, upgradeConfigs: false});
+awaitRSClientHosts(st.s, st.rs0.getPrimary(), {ok: true, ismaster: true});
+// assert aggregate fails with the original error with the 'timeseries' option.
+assert.throwsWithCode(() => coll.aggregate(pipeline), 16994);
+// assert aggregate fails if trying to write to a time-series collection without the 'timeseries'
+// option.
+assert.throwsWithCode(() => coll.aggregate(replacePipeline), 7406100);
+
+// upgrade the config server and mongos.
+jsTestLog('upgrading the config server and mongos.');
+st.upgradeCluster("latest", {upgradeShards: false, upgradeMongos: true, upgradeConfigs: true});
+let mongosConn = st.s;
+coll = mongosConn.getDB(dbName)["coll"];
+// assert aggregate fails with an updated error with the 'timeseries' option.
+assert.throwsWithCode(() => coll.aggregate(pipeline), 7406100); // new error code.
+// assert aggregate fails if trying to write to a time-series collection without the 'timeseries'
+// option.
+assert.throwsWithCode(() => coll.aggregate(replacePipeline), 7406100);
+
+// upgrade the FCV version
+jsTestLog('upgrading the FCV version.');
+assert.commandWorked(mongosConn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+// assert aggregate with 'timeseries' succeeds.
+assert.doesNotThrow(() => coll.aggregate(pipeline));
+let resultColl = mongosConn.getDB(dbName)["out_time"];
+assert.eq(1, resultColl.find().itcount());
+
+// assert aggregate replacing a time-series collection without 'timeseries' succeeds.
+assert.doesNotThrow(() => coll.aggregate(replacePipeline));
+resultColl = mongosConn.getDB(dbName)["timeseries"];
+assert.eq(1, resultColl.find().itcount());
+
+st.stop();
+}());
diff --git a/jstests/noPassthrough/agg_collstats_expr.js b/jstests/noPassthrough/agg_collstats_expr.js
index 45f98af720d9c..3d61cdde9d262 100644
--- a/jstests/noPassthrough/agg_collstats_expr.js
+++ b/jstests/noPassthrough/agg_collstats_expr.js
@@ -25,10 +25,7 @@ function getShardCount(counts, shardName) {
* on the i-th shard or no chunks assigned to that shard if shardDistribution[i] is null.
*/
function runShardingTestExists(shardDistribution) {
- const st = ShardingTest({
- shards: shardDistribution.length,
- setParameter: {receiveChunkWaitForRangeDeleterTimeoutMS: 90000}
- });
+ const st = ShardingTest({shards: shardDistribution.length});
const mongos = st.s0;
const admin = mongos.getDB("admin");
diff --git a/jstests/noPassthrough/agg_group.js b/jstests/noPassthrough/agg_group.js
index 654de425e79f1..e19f0cd7308d9 100644
--- a/jstests/noPassthrough/agg_group.js
+++ b/jstests/noPassthrough/agg_group.js
@@ -11,11 +11,6 @@
// partial aggregation results in a special format to the mongos.
//
// @tags: [requires_sharding]
-(function() {
-'use strict';
-
-load("jstests/libs/analyze_plan.js");
-
const st = new ShardingTest({config: 1, shards: 1});
// This database name can provide multiple similar test cases with a good separate namespace and
@@ -108,5 +103,4 @@ assertShardedGroupResultsMatch(coll, [{$group: {_id: "$item", a: {$avg: "$price"
// Verifies that SBE group pushdown with sharded $avg works for missing data.
assertShardedGroupResultsMatch(coll, [{$group: {_id: "$item", a: {$avg: "$missing"}}}]);
-st.stop();
-}());
+st.stop();
\ No newline at end of file
diff --git a/jstests/noPassthrough/aggregation_out_on_secondary.js b/jstests/noPassthrough/aggregation_out_on_secondary.js
index 2279b9f9f10ae..68862925078e1 100644
--- a/jstests/noPassthrough/aggregation_out_on_secondary.js
+++ b/jstests/noPassthrough/aggregation_out_on_secondary.js
@@ -68,4 +68,4 @@ const primaryProfile =
assert.eq(1, primaryProfile);
rs.stopSet();
-}());
\ No newline at end of file
+}());
diff --git a/jstests/noPassthrough/analyze_command.js b/jstests/noPassthrough/analyze_command.js
index 122bca9fc1338..f702bddf1bc47 100644
--- a/jstests/noPassthrough/analyze_command.js
+++ b/jstests/noPassthrough/analyze_command.js
@@ -1,7 +1,4 @@
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod({setParameter: {featureFlagCommonQueryFramework: true}});
assert.neq(null, conn, "mongod was unable to start up");
@@ -11,11 +8,11 @@ const db = conn.getDB(jsTestName());
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
assert.commandWorked(
- db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"}));
+ db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"}));
const coll = db.cqf_analyze;
const syscoll = db.system.statistics.cqf_analyze;
@@ -179,4 +176,3 @@ assert.eq(100, syscoll.find({_id: "a"})[0].statistics.scalarHistogram.buckets.le
cleanup();
MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/and_hash.js b/jstests/noPassthrough/and_hash.js
index ba96d11ff16a1..7c7b27b45ccaa 100644
--- a/jstests/noPassthrough/and_hash.js
+++ b/jstests/noPassthrough/and_hash.js
@@ -1,9 +1,6 @@
// Tests for whether the query solution correctly used an AND_HASH for index intersection.
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
-load("jstests/libs/analyze_plan.js"); // For planHasStage helper to analyze explain() output.
+import {getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js";
const conn = MongoRunner.runMongod();
const db = conn.getDB("test");
@@ -136,5 +133,4 @@ assertAndHashUsed({
shouldUseAndHash: true
});
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/and_sorted.js b/jstests/noPassthrough/and_sorted.js
index d5b64cbda8ec2..137cd8393c118 100644
--- a/jstests/noPassthrough/and_sorted.js
+++ b/jstests/noPassthrough/and_sorted.js
@@ -1,9 +1,6 @@
// Tests for whether the query solution correctly used an AND_SORTED stage for index intersection.
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
-load("jstests/libs/analyze_plan.js"); // For planHasStage helper to analyze explain() output.
+import {getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js";
const conn = MongoRunner.runMongod();
const db = conn.getDB("test");
@@ -151,5 +148,4 @@ runAndSortedTests();
assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
runAndSortedTests();
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/arithmetic_expression_constant_folding.js b/jstests/noPassthrough/arithmetic_expression_constant_folding.js
index da298ab843429..14319ba2a8c57 100644
--- a/jstests/noPassthrough/arithmetic_expression_constant_folding.js
+++ b/jstests/noPassthrough/arithmetic_expression_constant_folding.js
@@ -89,16 +89,9 @@ function runRandomizedPropertyTest({op, min, max}) {
assertPipelineCorrect(pipeline, v);
}
-// TODO: SERVER-67282 Randomized property testing should work after SBE is updated to match classic
-// engine, so remove this setParameter. When this knob is removed from this test, move this test
-// into jstests/aggregation/expressions/arithmetic_constant_folding.js.
-testDB.adminCommand({setParameter: 1, internalQueryFrameworkControl: "forceClassicEngine"});
for (let i = 0; i < 5; i++) {
runRandomizedPropertyTest({op: "$add", min: -314159255, max: 314159255});
runRandomizedPropertyTest({op: "$multiply", min: -31415, max: 31415});
}
-// TODO: SERVER-67282 Randomized property testing should work after SBE is updated to match classic
-// engine, so remove this setParameter.
-testDB.adminCommand({setParameter: 1, internalQueryFrameworkControl: "trySbeEngine"});
MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/atomic_rename_collection.js b/jstests/noPassthrough/atomic_rename_collection.js
index cdc7e336c9149..1e58be4eadd86 100644
--- a/jstests/noPassthrough/atomic_rename_collection.js
+++ b/jstests/noPassthrough/atomic_rename_collection.js
@@ -41,7 +41,7 @@ tests.forEach((test) => {
dropTarget: true
};
assert.commandWorked(local.adminCommand(cmd), tojson(cmd));
- ops =
+ let ops =
local.oplog.rs.find({ts: {$gt: ts}, ns: {'$regex': dbregex}}).sort({$natural: 1}).toArray();
assert.eq(ops.length,
test.expectedOplogEntries,
diff --git a/jstests/noPassthrough/auto_safe_reconfig_helper_max_voting_nodes.js b/jstests/noPassthrough/auto_safe_reconfig_helper_max_voting_nodes.js
index 534abcf611eea..b8227d0267713 100644
--- a/jstests/noPassthrough/auto_safe_reconfig_helper_max_voting_nodes.js
+++ b/jstests/noPassthrough/auto_safe_reconfig_helper_max_voting_nodes.js
@@ -11,15 +11,6 @@
load("jstests/replsets/rslib.js");
-function waitAllNodesHaveConfig(replTest, config) {
- replTest.nodes.forEach(function(node) {
- assert.soon(function() {
- const nodeConfig = replTest.getReplSetConfigFromNode(node.nodeId);
- return isSameConfigContent(config, nodeConfig);
- });
- });
-}
-
// Make secondaries unelectable. Add 7 voting nodes, which is the maximum allowed.
const replTest = new ReplSetTest({
nodes: [
diff --git a/jstests/noPassthrough/auto_safe_reconfig_helpers.js b/jstests/noPassthrough/auto_safe_reconfig_helpers.js
index fe3e8b7b62531..812fc16cdc9b0 100644
--- a/jstests/noPassthrough/auto_safe_reconfig_helpers.js
+++ b/jstests/noPassthrough/auto_safe_reconfig_helpers.js
@@ -185,5 +185,8 @@ assertSameConfigContent(replTest.getReplSetConfigFromNode(), config);
// Restore the original config before shutting down.
reconfig(replTest, origConfig);
+// There is a chance that some nodes haven't finished reconfig, if we directly call stopSet, those
+// nodes may fail to answer certain commands and fail the test.
+waitAllNodesHaveConfig(replTest, config);
replTest.stopSet();
})();
diff --git a/jstests/noPassthrough/background_validation_checkpoint_existence.js b/jstests/noPassthrough/background_validation_checkpoint_existence.js
index 438278b9aeaec..f3cf1afc450ef 100644
--- a/jstests/noPassthrough/background_validation_checkpoint_existence.js
+++ b/jstests/noPassthrough/background_validation_checkpoint_existence.js
@@ -58,4 +58,4 @@ assert.eq(true, res.valid, res);
assert.eq(2, res.nIndexes, res);
MongoRunner.stopMongod(conn);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/noPassthrough/batched_multi_deletes.js b/jstests/noPassthrough/batched_multi_deletes.js
index 09d1482f0b2c9..ed53fe438437f 100644
--- a/jstests/noPassthrough/batched_multi_deletes.js
+++ b/jstests/noPassthrough/batched_multi_deletes.js
@@ -7,9 +7,7 @@
* ]
*/
-(function() {
-"use strict";
-load("jstests/libs/analyze_plan.js");
+import {getPlanStage} from "jstests/libs/analyze_plan.js";
function validateBatchedDeletes(conn) {
const db = conn.getDB("test");
@@ -91,5 +89,4 @@ function validateBatchedDeletes(conn) {
rst.awaitNodesAgreeOnPrimary();
validateBatchedDeletes(rst.getPrimary());
rst.stopSet();
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/noPassthrough/batched_multi_deletes_cursor_cache_disabled.js b/jstests/noPassthrough/batched_multi_deletes_cursor_cache_disabled.js
new file mode 100644
index 0000000000000..3fc2bebc22802
--- /dev/null
+++ b/jstests/noPassthrough/batched_multi_deletes_cursor_cache_disabled.js
@@ -0,0 +1,58 @@
+/**
+ * Tests batched deletes with 'gWiredTigerCursorCacheSize=0', to see if there are any use-after-free
+ * bugs due to cursor lifetime. This test is only expected to catch regressions in ASAN variants.
+ *
+ * @tags: [
+ * does_not_support_transactions,
+ * exclude_from_large_txns,
+ * requires_sharding,
+ * ]
+ */
+(function() {
+"use strict";
+
+if (!_isAddressSanitizerActive()) {
+ jsTestLog("Skipping " + jsTestName() + " because address sanitizer is not active.");
+}
+
+load("jstests/libs/fail_point_util.js"); // For 'configureFailPoint()'
+load("jstests/libs/parallelTester.js"); // For 'startParallelShell()'
+
+var st =
+ new ShardingTest({shards: 1, rs: {nodes: 1, setParameter: {wiredTigerCursorCacheSize: 0}}});
+
+const primary = st.s0;
+const rsPrimary = st.rs0.getPrimary();
+const db = primary.getDB('test');
+const coll = db.test;
+
+assert.commandWorked(primary.adminCommand({shardCollection: 'test.test', key: {_id: 1}}));
+
+const docIds = Array.from(Array(10).keys());
+assert.commandWorked(coll.insert(docIds.map((x) => {
+ return {_id: x, x: x};
+})));
+
+const throwWriteConflictExceptionInBatchedDeleteStage =
+ configureFailPoint(rsPrimary, "throwWriteConflictExceptionInBatchedDeleteStage");
+
+function performBatchedDelete() {
+ const testDB = db.getMongo().getDB("test");
+ const coll = testDB.test;
+ const result = assert.commandWorked(coll.remove({x: {$gte: 0}}));
+ jsTestLog('delete result: ' + tojson(result));
+}
+
+const awaitBatchedDelete = startParallelShell(performBatchedDelete, primary.port);
+
+throwWriteConflictExceptionInBatchedDeleteStage.wait();
+
+jsTestLog("update documents");
+assert.commandWorked(coll.update({}, {$inc: {x: -docIds.length}}));
+
+throwWriteConflictExceptionInBatchedDeleteStage.off();
+
+awaitBatchedDelete();
+
+st.stop();
+})();
diff --git a/jstests/noPassthrough/batched_multi_deletes_large_transaction.js b/jstests/noPassthrough/batched_multi_deletes_large_transaction.js
index 09728147bec8e..7c94520478fea 100644
--- a/jstests/noPassthrough/batched_multi_deletes_large_transaction.js
+++ b/jstests/noPassthrough/batched_multi_deletes_large_transaction.js
@@ -6,14 +6,11 @@
* handled by the primary.
*
* @tags: [
- * requires_fcv_62,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/libs/feature_flag_util.js"); // for FeatureFlagUtil.isEnabled
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const rst = new ReplSetTest({
nodes: [
@@ -47,7 +44,7 @@ assert.commandWorked(coll.insert(docIds.map((x) => {
// Set up server to split deletes over multiple oplog entries
// such that each oplog entry contains two delete operations.
-if (!FeatureFlagUtil.isEnabled(db, "InternalWritesAreReplicatedTransactionally")) {
+if (!FeatureFlagUtil.isEnabled(db, "LargeBatchedOperations")) {
// Confirm legacy server behavior where mutiple oplog entries are not allowed
// for batched writes.
const result =
@@ -61,7 +58,7 @@ if (!FeatureFlagUtil.isEnabled(db, "InternalWritesAreReplicatedTransactionally")
// Stop test and return early. The rest of the test will test the new multiple oplog entry
// behavior.
rst.stopSet();
- return;
+ quit();
}
// This document removal request will be replicated over two applyOps oplog entries,
@@ -96,5 +93,4 @@ assert(ops[1].hasOwnProperty('prevOpTime'));
assert.eq(ops[0].prevOpTime.ts, ops[1].ts);
assert.eq(ops[1].prevOpTime.ts, Timestamp());
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/bson_max_limit.js b/jstests/noPassthrough/bson_max_limit.js
index 60ad231ee5184..1b60ce8300dd8 100644
--- a/jstests/noPassthrough/bson_max_limit.js
+++ b/jstests/noPassthrough/bson_max_limit.js
@@ -83,26 +83,4 @@ function executeTest(db) {
executeTest(conn.getDB("test"));
MongoRunner.stopMongod(conn);
}
-
-{
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- }
- ]
- });
- rst.startSet();
- rst.initiate();
- // Test the modern default behavior where storeFindAndModifyImagesInSideCollection is true.
- rst.getPrimary().adminCommand(
- {setParameter: 1, storeFindAndModifyImagesInSideCollection: true});
- executeTest(rst.getPrimary().getDB("test"));
- rst.stopSet();
-}
})();
diff --git a/jstests/noPassthrough/bucket_unpacking_with_sort_granularity_change.js b/jstests/noPassthrough/bucket_unpacking_with_sort_granularity_change.js
index 267a12cc34a4a..5206d6f91c07b 100644
--- a/jstests/noPassthrough/bucket_unpacking_with_sort_granularity_change.js
+++ b/jstests/noPassthrough/bucket_unpacking_with_sort_granularity_change.js
@@ -3,15 +3,12 @@
// We check that the results are correct, the documents are sorted, and the documents we expect to
// appear, appear.
// Note: events in buckets that exceed bucketMaxSpan are not included.
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For TimeseriesTest
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
const dbName = jsTestName();
// Start a single mongoD using MongoRunner.
-const conn = MongoRunner.runMongod({setParameter: "featureFlagBucketUnpackWithSort=true"});
+const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod was unable to start up");
// Create the test DB and collection.
@@ -21,13 +18,6 @@ const collName = dbName;
const coll = db[collName];
const minsToMillis = (mins) => mins * 60 * 1000;
-if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) {
- jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled.");
- return;
-}
-
-printjson(conn.adminCommand({getParameter: 1, featureFlagBucketUnpackWithSort: 1}));
-
const on = "alwaysOn";
const off = "off";
@@ -103,4 +93,3 @@ let resOpt = mergeShellOptimized();
assert(resOpt == 0);
MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/noPassthrough/can_load_ttl_index_capped_collection.js b/jstests/noPassthrough/can_load_ttl_index_capped_collection.js
index 0e0c47c6ed873..266746c5f1718 100644
--- a/jstests/noPassthrough/can_load_ttl_index_capped_collection.js
+++ b/jstests/noPassthrough/can_load_ttl_index_capped_collection.js
@@ -47,4 +47,4 @@ assert.eq(
assert.eq(indexes[2].expireAfterSeconds, 10, "Index is not TTL as expected: " + tojson(indexes));
MongoRunner.stopMongod(conn);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/capped_collections_downgrade.js b/jstests/noPassthrough/capped_collections_downgrade.js
deleted file mode 100644
index 53a276f0c123a..0000000000000
--- a/jstests/noPassthrough/capped_collections_downgrade.js
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Tests that the cluster cannot be downgraded when there are capped collections with a size that
- * is non multiple of 256 bytes. The user has to resize or drop the collection in order to
- * downgrade.
- */
-(function() {
-
-const conn = MongoRunner.runMongod();
-const testDB = conn.getDB(jsTestName());
-const cappedColl = testDB["capped_coll"];
-const options = Object.assign({}, {capped: true}, {size: 50 * 1023});
-testDB.createCollection(cappedColl.getName(), options);
-
-// We expect the server to be in a non-downgradable state initially and "command" is what we have to
-// run to correct the state in order to successfully downgrade.
-function checkCappedCollectionForDowngrade(command) {
- assert.commandFailedWithCode(testDB.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}),
- ErrorCodes.CannotDowngrade);
- testDB.runCommand(command);
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
- assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-}
-
-// We want to resize the collection to have a size multiple of 256 bytes in order to be able to
-// downgrade.
-const resizeCommand = Object.assign({}, {collMod: cappedColl.getName()}, {cappedSize: 50 * 1024});
-checkCappedCollectionForDowngrade(resizeCommand);
-
-// We reset the size of the collection to be a non multiple of 256 bytes.
-const resetSizeCommand =
- Object.assign({}, {collMod: cappedColl.getName()}, {cappedSize: 50 * 1023});
-testDB.runCommand(resetSizeCommand);
-
-// We want to drop the collection in order to be able to downgrade.
-const dropCommand = Object.assign({}, {drop: cappedColl.getName()});
-checkCappedCollectionForDowngrade(dropCommand);
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/catalog_shard.js b/jstests/noPassthrough/catalog_shard.js
index 6f63809c9d37e..846a7b7f604d6 100644
--- a/jstests/noPassthrough/catalog_shard.js
+++ b/jstests/noPassthrough/catalog_shard.js
@@ -1,21 +1,13 @@
/**
- * Tests catalog shard topology.
+ * Tests config shard topology.
*
* @tags: [
* requires_persistence,
* requires_fcv_70,
- * featureFlagCatalogShard,
* featureFlagTransitionToCatalogShard,
* ]
*/
-(function() {
-"use strict";
-
-// TODO (SERVER-74534): Enable the metadata consistency check when it will work with co-located
-// configsvr.
-TestData.skipCheckMetadataConsistency = true;
-
-load("jstests/libs/catalog_shard_util.js");
+import {ConfigShardUtil} from "jstests/libs/config_shard_util.js";
load("jstests/libs/fail_point_util.js");
load("jstests/libs/write_concern_util.js");
@@ -26,6 +18,13 @@ const unshardedDbName = "unsharded_db";
const unshardedNs = unshardedDbName + ".unsharded_coll";
const indexedNs = "db_with_index.coll";
+const timeseriesDbName = "timeseriesDB";
+const timeseriesUnshardedCollName = "unsharded_timeseries_coll";
+const timeseriesShardedCollName = "sharded_timeseries_coll";
+const timeseriesShardedNs = timeseriesDbName + "." + timeseriesShardedCollName;
+const timeseriesShardedBucketsNs =
+ `${timeseriesDbName}.system.buckets.${timeseriesShardedCollName}`;
+
function basicCRUD(conn) {
assert.commandWorked(st.s.getCollection(unshardedNs).insert([{x: 1}, {x: -1}]));
@@ -49,7 +48,7 @@ function getCatalogShardChunks(conn) {
const st = new ShardingTest({
shards: 1,
config: 3,
- catalogShard: true,
+ configShard: true,
});
const configShardName = st.shard0.shardName;
@@ -134,29 +133,6 @@ const newShardName =
assert.commandWorked(st.s.adminCommand({split: ns, middle: {skey: 40}}));
}
-{
- //
- // ShardingStateRecovery doesn't block step up.
- //
-
- assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {skey: 0}, to: configShardName}));
-
- const hangMigrationFp = configureFailPoint(st.configRS.getPrimary(), "moveChunkHangAtStep5");
- const moveChunkThread = new Thread(function(mongosHost, ns, newShardName) {
- const mongos = new Mongo(mongosHost);
- assert.commandWorked(
- mongos.adminCommand({moveChunk: ns, find: {skey: 0}, to: newShardName}));
- }, st.s.host, ns, newShardName);
- moveChunkThread.start();
- hangMigrationFp.wait();
-
- // Stepping up shouldn't hang because of ShardingStateRecovery.
- st.configRS.stepUp(st.configRS.getSecondary());
-
- hangMigrationFp.off();
- moveChunkThread.join();
-}
-
{
//
// Collections on the config server support changeStreamPreAndPostImages when the config server
@@ -179,7 +155,7 @@ const newShardName =
{
//
- // Can't remove catalogShard using the removeShard command.
+ // Can't remove configShard using the removeShard command.
//
assert.commandFailedWithCode(st.s.adminCommand({removeShard: "config"}),
@@ -188,7 +164,7 @@ const newShardName =
{
//
- // Remove the catalog shard.
+ // Remove the config shard.
//
let configPrimary = st.configRS.getPrimary();
@@ -198,13 +174,31 @@ const newShardName =
st.s.adminCommand({moveChunk: indexedNs, find: {_id: 0}, to: configShardName}));
assert.commandWorked(st.s.getCollection(indexedNs).createIndex({oldKey: 1}));
+ // Create a sharded and unsharded timeseries collection and verify they and their buckets
+ // collections are correctly dropped. This provides coverage for views and sharded views.
+ const timeseriesDB = st.s.getDB(timeseriesDbName);
+ assert.commandWorked(timeseriesDB.createCollection(timeseriesUnshardedCollName,
+ {timeseries: {timeField: "time"}}));
+ assert.commandWorked(st.s.adminCommand({movePrimary: timeseriesDbName, to: configShardName}));
+ assert.commandWorked(timeseriesDB.createCollection(timeseriesShardedCollName,
+ {timeseries: {timeField: "time"}}));
+ assert.commandWorked(st.s.adminCommand({shardCollection: timeseriesShardedNs, key: {time: 1}}));
+ assert.commandWorked(timeseriesDB[timeseriesShardedCollName].insert({time: ISODate()}));
+ st.printShardingStatus();
+ assert.commandWorked(st.s.adminCommand({
+ moveChunk: timeseriesShardedBucketsNs,
+ find: {"control.min.time": 0},
+ to: configShardName,
+ _waitForDelete: true
+ }));
+
// Use write concern to verify the commands support them. Any values weaker than the default
// sharding metadata write concerns will be upgraded.
let removeRes = assert.commandWorked(
st.s0.adminCommand({transitionToDedicatedConfigServer: 1, writeConcern: {wtimeout: 100}}));
assert.eq("started", removeRes.state);
- // The removal won't complete until all chunks and dbs are moved off the catalog shard.
+ // The removal won't complete until all chunks and dbs are moved off the config shard.
removeRes = assert.commandWorked(st.s0.adminCommand({transitionToDedicatedConfigServer: 1}));
assert.eq("ongoing", removeRes.state);
@@ -213,15 +207,22 @@ const newShardName =
{moveChunk: ns, find: {skey: -1}, to: newShardName, _waitForDelete: true}));
assert.commandWorked(st.s.adminCommand(
{moveChunk: indexedNs, find: {_id: 0}, to: newShardName, _waitForDelete: true}));
+ assert.commandWorked(st.s.adminCommand({
+ moveChunk: timeseriesShardedBucketsNs,
+ find: {"control.min.time": 0},
+ to: newShardName,
+ _waitForDelete: true
+ }));
// Blocked because of the sharded and unsharded databases and the remaining chunk.
removeRes = assert.commandWorked(st.s0.adminCommand({transitionToDedicatedConfigServer: 1}));
assert.eq("ongoing", removeRes.state);
assert.eq(1, removeRes.remaining.chunks);
- assert.eq(2, removeRes.remaining.dbs);
+ assert.eq(3, removeRes.remaining.dbs);
assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: newShardName}));
assert.commandWorked(st.s.adminCommand({movePrimary: unshardedDbName, to: newShardName}));
+ assert.commandWorked(st.s.adminCommand({movePrimary: timeseriesDbName, to: newShardName}));
// The draining sharded collections should not have been locally dropped yet.
assert(configPrimary.getCollection(ns).exists());
@@ -246,13 +247,14 @@ const newShardName =
assert.eq(1, removeRes.pendingRangeDeletions);
suspendRangeDeletionFp.off();
- CatalogShardUtil.waitForRangeDeletions(st.s);
+ ConfigShardUtil.waitForRangeDeletions(st.s);
- // Start the final transition command. This will trigger locally dropping collections on the
- // config server. Hang after removing one collection and trigger a failover to verify the final
- // transition can be resumed on the new primary and the collection dropping is idempotent.
+ // Start the final transition command. This will trigger locally dropping all tracked user
+ // databases on the config server. Hang after removing one database and trigger a failover to
+ // verify the final transition can be resumed on the new primary and the database dropping is
+ // idempotent.
const hangRemoveFp = configureFailPoint(
- st.configRS.getPrimary(), "hangAfterDroppingCollectionInTransitionToDedicatedConfigServer");
+ st.configRS.getPrimary(), "hangAfterDroppingDatabaseInTransitionToDedicatedConfigServer");
const finishRemoveThread = new Thread(function(mongosHost) {
const mongos = new Mongo(mongosHost);
return mongos.adminCommand({transitionToDedicatedConfigServer: 1});
@@ -297,7 +299,7 @@ const newShardName =
{
//
- // Can't create catalogShard using the addShard command.
+ // Can't create configShard using the addShard command.
//
assert.commandFailed(st.s.adminCommand({addShard: st.configRS.getURL(), name: "config"}));
@@ -314,7 +316,7 @@ const newShardName =
{
//
- // Add back the catalog shard.
+ // Add back the config shard.
//
// Create an index while the collection is not on the config server to verify it clones the
@@ -324,7 +326,7 @@ const newShardName =
// Use write concern to verify the command support them. Any values weaker than the default
// sharding metadata write concerns will be upgraded.
assert.commandWorked(
- st.s.adminCommand({transitionToCatalogShard: 1, writeConcern: {wtimeout: 100}}));
+ st.s.adminCommand({transitionFromDedicatedConfigServer: 1, writeConcern: {wtimeout: 100}}));
// Basic CRUD and sharded DDL work.
basicCRUD(st.s);
@@ -340,38 +342,5 @@ const newShardName =
[{_id: 1}, {oldKey: 1}, {newKey: 1}]);
}
-{
- //
- // transitionToCatalogShard requires replication to all config server nodes.
- //
- // TODO SERVER-75391: Remove.
- //
-
- // Transition to dedicated mode so the config server can transition back to catalog shard mode.
- let removeRes = assert.commandWorked(st.s.adminCommand({transitionToDedicatedConfigServer: 1}));
- assert.eq("started", removeRes.state);
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {skey: 0}, to: newShardName, _waitForDelete: true}));
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {skey: 5}, to: newShardName, _waitForDelete: true}));
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: indexedNs, find: {_id: 0}, to: newShardName, _waitForDelete: true}));
- assert.commandWorked(st.s.adminCommand({movePrimary: "directDB", to: newShardName}));
- assert.commandWorked(st.s.adminCommand({transitionToDedicatedConfigServer: 1}));
-
- // transitionToCatalogShard times out with a lagged config secondary despite having a majority
- // of its set still replicating.
- const laggedSecondary = st.configRS.getSecondary();
- st.configRS.awaitReplication();
- stopServerReplication(laggedSecondary);
- assert.commandFailedWithCode(st.s.adminCommand({transitionToCatalogShard: 1, maxTimeMS: 1000}),
- ErrorCodes.MaxTimeMSExpired);
- restartServerReplication(laggedSecondary);
-
- // Now it succeeds.
- assert.commandWorked(st.s.adminCommand({transitionToCatalogShard: 1}));
-}
-
st.stop();
newShardRS.stopSet();
-}());
diff --git a/jstests/noPassthrough/catalog_shard_resharding_fixture.js b/jstests/noPassthrough/catalog_shard_resharding_fixture.js
index b1f337f0fbc42..c6f8574769df4 100644
--- a/jstests/noPassthrough/catalog_shard_resharding_fixture.js
+++ b/jstests/noPassthrough/catalog_shard_resharding_fixture.js
@@ -1,24 +1,19 @@
/**
- * Test the ReshardingTest fixture can work with a catalogShard.
+ * Test the ReshardingTest fixture can work with a configShard.
*
* @tags: [
* requires_fcv_70,
- * featureFlagCatalogShard,
* featureFlagTransitionToCatalogShard,
* ]
*/
-// TODO (SERVER-74534): Enable the metadata consistency check when it will work with co-located
-// configsvr.
-TestData.skipCheckMetadataConsistency = true;
-
(function() {
"use strict";
load("jstests/sharding/libs/resharding_test_fixture.js");
const reshardingTest =
- new ReshardingTest({numDonors: 2, numRecipients: 2, reshardInPlace: true, catalogShard: true});
+ new ReshardingTest({numDonors: 2, numRecipients: 2, reshardInPlace: true, configShard: true});
reshardingTest.setup();
const ns = "reshardingDb.coll";
diff --git a/jstests/noPassthrough/catalog_shard_secondary_reads.js b/jstests/noPassthrough/catalog_shard_secondary_reads.js
index 9ab579d282af3..914dec785d89b 100644
--- a/jstests/noPassthrough/catalog_shard_secondary_reads.js
+++ b/jstests/noPassthrough/catalog_shard_secondary_reads.js
@@ -1,18 +1,12 @@
/**
- * Tests catalog shard topology.
+ * Tests config shard topology.
*
* @tags: [
* requires_fcv_70,
- * featureFlagCatalogShard,
* ]
*/
-(function() {
-"use strict";
+import {ConfigShardUtil} from "jstests/libs/config_shard_util.js";
-// TODO SERVER-74534: Enable metadata consistency check when it works with a catalog shard.
-TestData.skipCheckMetadataConsistency = true;
-
-load("jstests/libs/catalog_shard_util.js");
load("jstests/libs/fail_point_util.js");
load('jstests/libs/chunk_manipulation_util.js');
@@ -22,7 +16,7 @@ const st = new ShardingTest({
shards: {rs0: {nodes: 2}, rs1: {nodes: 2}},
config: 2,
mongos: 1,
- catalogShard: true,
+ configShard: true,
});
assert.commandWorked(st.s0.getDB('test').user.insert({_id: 1234}));
@@ -74,8 +68,8 @@ joinMoveChunk();
assert.commandWorked(st.s0.adminCommand({movePrimary: 'test', to: st.shard1.shardName}));
assert.commandWorked(st.s0.adminCommand({movePrimary: 'sharded', to: st.shard1.shardName}));
-// A catalog shard can't be removed until all range deletions have finished.
-CatalogShardUtil.waitForRangeDeletions(st.s0);
+// A config shard can't be removed until all range deletions have finished.
+ConfigShardUtil.waitForRangeDeletions(st.s0);
removeRes = assert.commandWorked(st.s0.adminCommand({transitionToDedicatedConfigServer: 1}));
assert.eq("completed", removeRes.state, tojson(removeRes));
@@ -122,7 +116,7 @@ assert.commandWorked(st.s0.adminCommand({setFeatureCompatibilityVersion: upgrade
// Need to drop the database before it can become a shard again.
assert.commandWorked(st.configRS.getPrimary().getDB('sharded').dropDatabase());
-assert.commandWorked(st.s0.adminCommand({transitionToCatalogShard: 1}));
+assert.commandWorked(st.s0.adminCommand({transitionFromDedicatedConfigServer: 1}));
assert.commandWorked(st.s0.adminCommand({movePrimary: 'test', to: st.shard0.shardName}));
assert.commandWorked(
st.s0.adminCommand({moveChunk: 'sharded.user', find: {_id: 0}, to: st.shard0.shardName}));
@@ -135,5 +129,4 @@ assert.eq({_id: 5678}, doc);
st.stop();
-MongoRunner.stopMongod(staticMongod);
-})();
+MongoRunner.stopMongod(staticMongod);
\ No newline at end of file
diff --git a/jstests/noPassthrough/change_stream_generate_v2_tokens_flag_with_test_commands_disabled.js b/jstests/noPassthrough/change_stream_generate_v2_tokens_flag_with_test_commands_disabled.js
deleted file mode 100644
index f23507031de3d..0000000000000
--- a/jstests/noPassthrough/change_stream_generate_v2_tokens_flag_with_test_commands_disabled.js
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Test that the $_generateV2ResumeTokens parameter cannot be used on mongoS when test commands are
- * disabled.
- * @tags: [
- * uses_change_streams,
- * requires_sharding,
- * requires_replication,
- * ]
- */
-(function() {
-"use strict";
-
-// Signal to the ShardingTest that we want to disable test commands.
-TestData.enableTestCommands = false;
-
-// Create a sharding fixture with test commands disabled.
-const st = new ShardingTest({shards: 1, rs: {nodes: 1}});
-
-// Confirm that attempting to set any values for $_generateV2ResumeTokens field fails on mongos.
-assert.throwsWithCode(() => st.s.watch([], {$_generateV2ResumeTokens: true}).hasNext(), 6528201);
-assert.throwsWithCode(() => st.s.watch([], {$_generateV2ResumeTokens: false}).hasNext(), 6528201);
-
-// Confirm that attempting to run change streams with $_generateV2ResumeTokens:true fails on shards.
-assert.throwsWithCode(
- () => st.rs0.getPrimary().watch([], {$_generateV2ResumeTokens: true}).hasNext(), 6528200);
-
-// Explicity requesting v1 tokens is allowed on a shard. This is to allow a 6.0 mongoS to
-// communicate with a 7.0 shard.
-const stream = st.rs0.getPrimary().watch([], {$_generateV2ResumeTokens: false});
-assert.commandWorked(st.s.getDB("test")["coll"].insert({x: 1}));
-assert.soon(() => stream.hasNext());
-
-st.stop();
-})();
\ No newline at end of file
diff --git a/jstests/noPassthrough/change_stream_mongos_with_generate_v2_resume_tokens_flag.js b/jstests/noPassthrough/change_stream_mongos_with_generate_v2_resume_tokens_flag.js
deleted file mode 100644
index 2bdbb90739782..0000000000000
--- a/jstests/noPassthrough/change_stream_mongos_with_generate_v2_resume_tokens_flag.js
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Test that mongoS does not set the value of $_generateV2ResumeTokens on the commands it sends to
- * the shards, if no value was specified by the client. If a value was specified, mongoS forwards it
- * to the shards. On a replica set, no explicit value is set; the aggregation simply treats it as
- * default-true.
- * @tags: [
- * uses_change_streams,
- * requires_sharding,
- * requires_replication,
- * ]
- */
-(function() {
-"use strict";
-
-load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
-load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
-
-// Create a sharding fixture with a single one-node replset shard and a one-node replset config
-// server. The latter is to ensure that there is only one node that the internal new-shard monitor
-// $changeStream can be sent to, since it is dispatched with secondaryPreferred readPreference.
-const st = new ShardingTest({shards: 1, rs: {nodes: 1}, config: {nodes: 1}});
-
-const mongosDB = st.s.getDB("test");
-const shardDB = st.rs0.getPrimary().getDB(mongosDB.getName());
-const configDB = st.configRS.getPrimary().getDB("config");
-
-const mongosColl = assertDropAndRecreateCollection(mongosDB, jsTestName());
-const shardColl = shardDB[mongosColl.getName()];
-const configColl = configDB.shards;
-
-// Enable profiling on the shard and config server.
-assert.commandWorked(shardDB.setProfilingLevel(2));
-assert.commandWorked(configDB.setProfilingLevel(2));
-
-// Create one stream on mongoS that returns v2 tokens, the default.
-const v2MongosStream = mongosColl.watch([], {comment: "v2MongosStream"});
-
-// Create a second stream on mongoS that explicitly requests v1 tokens.
-const v1MongosStream =
- mongosColl.watch([], {comment: "v1MongosStream", $_generateV2ResumeTokens: false});
-
-// Create a stream directly on the shard which returns the default v2 tokens.
-const v2ShardStream = shardColl.watch([], {comment: "v2ShardStream"});
-
-// Insert a test document into the collection.
-assert.commandWorked(mongosColl.insert({_id: 1}));
-
-// Wait until all streams have encountered the insert operation.
-assert.soon(() => v1MongosStream.hasNext() && v2MongosStream.hasNext() && v2ShardStream.hasNext());
-
-// Confirm that in a sharded cluster, when v1 token is explicitly requested, mongoS fowards
-// $_generateV2ResumeTokens:false to the shard.
-profilerHasAtLeastOneMatchingEntryOrThrow({
- profileDB: shardDB,
- filter: {
- "originatingCommand.aggregate": mongosColl.getName(),
- "originatingCommand.comment": "v1MongosStream",
- "originatingCommand.$_generateV2ResumeTokens": false
- }
-});
-
-// Confirm that we also set $_generateV2ResumeTokens to false on the internal new-shard monitoring
-// $changeStream that we dispatch to the config servers.
-profilerHasAtLeastOneMatchingEntryOrThrow({
- profileDB: configDB,
- filter: {
- "originatingCommand.aggregate": configColl.getName(),
- "originatingCommand.comment": "v1MongosStream",
- "originatingCommand.$_generateV2ResumeTokens": false
- }
-});
-
-// Confirm that mongoS never sets the $_generateV2ResumeTokens field when client didn't explicitly
-// specify.
-profilerHasAtLeastOneMatchingEntryOrThrow({
- profileDB: shardDB,
- filter: {
- "originatingCommand.aggregate": mongosColl.getName(),
- "originatingCommand.comment": "v2MongosStream",
- "originatingCommand.$_generateV2ResumeTokens": {$exists: false}
- }
-});
-
-// Confirm that we also do not set the $_generateV2ResumeTokens field on the request sent to the
-// config server.
-profilerHasAtLeastOneMatchingEntryOrThrow({
- profileDB: configDB,
- filter: {
- "originatingCommand.aggregate": configColl.getName(),
- "originatingCommand.comment": "v2MongosStream",
- "originatingCommand.$_generateV2ResumeTokens": {$exists: false}
- }
-});
-
-// Confirm that on a replica set - in this case, a direct connection to the shard - no value is set
-// for $_generateV2ResumeTokens if the client did not specify one. The aggregation defaults to
-// treating the value as true.
-profilerHasAtLeastOneMatchingEntryOrThrow({
- profileDB: shardDB,
- filter: {
- "originatingCommand.aggregate": mongosColl.getName(),
- "originatingCommand.comment": "v2ShardStream",
- "originatingCommand.$_generateV2ResumeTokens": {$exists: false}
- }
-});
-
-st.stop();
-})();
\ No newline at end of file
diff --git a/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_replset.js b/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_replset.js
index d7af9b045cd18..d49a9e333c4c6 100644
--- a/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_replset.js
+++ b/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_replset.js
@@ -9,7 +9,13 @@ load("jstests/noPassthrough/libs/change_stream_pre_image_time_based_expiration_u
// Tests pre-image time based expiration on a replica-set.
(function testChangeStreamPreImagesforTimeBasedExpirationOnReplicaSet() {
- const replSetTest = new ReplSetTest({name: "replSet", nodes: 3});
+ const replSetTest = new ReplSetTest({
+ name: "replSet",
+ nodes: 3,
+ // Test expects an exact number of pre-images to be deleted. Thus, the pre-images truncate
+ // markers must only contain 1 document at most.
+ nodeOptions: {setParameter: {preImagesCollectionTruncateMarkersMinBytes: 1}}
+ });
replSetTest.startSet();
replSetTest.initiate();
diff --git a/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_sharded.js b/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_sharded.js
index 551ae495b5874..803bfa45b2d2b 100644
--- a/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_sharded.js
+++ b/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_sharded.js
@@ -15,6 +15,9 @@ load("jstests/noPassthrough/libs/change_stream_pre_image_time_based_expiration_u
shards: 1,
rs: {
nodes: 3,
+ // Test expects an exact number of pre-images to be deleted. Thus, the pre-images
+ // truncate markers must only contain 1 document at most.
+ setParameter: {preImagesCollectionTruncateMarkersMinBytes: 1},
},
};
const st = new ShardingTest(options);
diff --git a/jstests/noPassthrough/change_stream_pre_images_server_stats.js b/jstests/noPassthrough/change_stream_pre_images_server_stats.js
new file mode 100644
index 0000000000000..06dcfc5889328
--- /dev/null
+++ b/jstests/noPassthrough/change_stream_pre_images_server_stats.js
@@ -0,0 +1,126 @@
+/**
+ * Tests that FTDC collects information about the pre-image collection, including its purging job.
+ * @tags: [ requires_replication ]
+ */
+(function() {
+'use strict';
+
+// For verifyGetDiagnosticData.
+load('jstests/libs/ftdc.js');
+
+const kExpiredPreImageRemovalJobSleepSeconds = 1;
+const kExpireAfterSeconds = 1;
+
+const replicaSet = new ReplSetTest({
+ nodes: 1,
+ nodeOptions: {
+ setParameter:
+ {expiredChangeStreamPreImageRemovalJobSleepSecs: kExpiredPreImageRemovalJobSleepSeconds}
+ }
+});
+
+replicaSet.startSet();
+replicaSet.initiate();
+
+const primary = replicaSet.getPrimary();
+const adminDb = primary.getDB('admin');
+const testDb = primary.getDB(jsTestName());
+
+assert.soon(() => {
+ // Ensure that server status diagnostics is collecting pre-image collection statistics.
+ const serverStatusDiagnostics = verifyGetDiagnosticData(adminDb).serverStatus;
+ return serverStatusDiagnostics.hasOwnProperty('changeStreamPreImages') &&
+ serverStatusDiagnostics.changeStreamPreImages.hasOwnProperty('purgingJob');
+});
+
+const diagnosticsBeforeTestCollModifications =
+ verifyGetDiagnosticData(adminDb).serverStatus.changeStreamPreImages.purgingJob;
+
+// Create collection and insert sample data.
+assert.commandWorked(
+ testDb.createCollection("testColl", {changeStreamPreAndPostImages: {enabled: true}}));
+const numberOfDocuments = 100;
+for (let i = 0; i < numberOfDocuments; i++) {
+ assert.commandWorked(testDb.testColl.insert({x: i}));
+}
+
+for (let i = 0; i < numberOfDocuments; i++) {
+ assert.commandWorked(testDb.testColl.updateOne({x: i}, {$inc: {y: 1}}));
+}
+
+const preImageCollection = primary.getDB('config')['system.preimages'];
+
+const estimatedToBeRemovedDocsSize = preImageCollection.find()
+ .toArray()
+ .map(doc => Object.bsonsize(doc))
+ .reduce((acc, size) => acc + size, 0);
+assert.gt(estimatedToBeRemovedDocsSize, 0);
+
+// Set the 'expireAfterSeconds' to 'kExpireAfterSeconds'.
+assert.commandWorked(adminDb.runCommand({
+ setClusterParameter:
+ {changeStreamOptions: {preAndPostImages: {expireAfterSeconds: kExpireAfterSeconds}}}
+}));
+
+// Ensure purging job deletes the expired pre-image entries of the test collection.
+assert.soon(() => {
+ // All entries are removed.
+ return preImageCollection.count() === 0;
+});
+
+// Ensure that FTDC collected the purging job information of the pre-image collection.
+assert.soon(() => {
+ const diagnosticsAfterTestCollModifications =
+ verifyGetDiagnosticData(adminDb).serverStatus.changeStreamPreImages.purgingJob;
+
+ const totalPassBigger = diagnosticsAfterTestCollModifications.totalPass >
+ diagnosticsBeforeTestCollModifications.totalPass;
+ const scannedBigger = diagnosticsAfterTestCollModifications.scannedCollections >
+ diagnosticsBeforeTestCollModifications.scannedCollections;
+ const scannedInternalBigger = diagnosticsAfterTestCollModifications.scannedInternalCollections >
+ diagnosticsBeforeTestCollModifications.scannedInternalCollections;
+ const bytesEqual = diagnosticsAfterTestCollModifications.bytesDeleted >=
+ diagnosticsBeforeTestCollModifications.bytesDeleted + estimatedToBeRemovedDocsSize;
+ const docsDeletedEqual = diagnosticsAfterTestCollModifications.docsDeleted >=
+ diagnosticsBeforeTestCollModifications.docsDeleted + numberOfDocuments;
+ const wallTimeGTE = diagnosticsAfterTestCollModifications.maxStartWallTimeMillis.tojson() >=
+ ISODate("1970-01-01T00:00:00.000Z").tojson();
+ const timeElapsedGTE = diagnosticsAfterTestCollModifications.timeElapsedMillis >=
+ diagnosticsBeforeTestCollModifications.timeElapsedMillis;
+
+ // For debug purposes log which condition failed.
+ if (!totalPassBigger) {
+ jsTestLog("totalPassBigger failed, retrying");
+ return false;
+ }
+ if (!scannedBigger) {
+ jsTestLog("scannedBigger failed, retrying");
+ return false;
+ }
+ if (!scannedInternalBigger) {
+ jsTestLog("scannedInternalBigger failed, retrying");
+ return false;
+ }
+ if (!bytesEqual) {
+ jsTestLog("bytesEqual) failed, retrying");
+ return false;
+ }
+ if (!docsDeletedEqual) {
+ jsTestLog("docsDeletedEqual failed, retrying");
+ return false;
+ }
+ if (!wallTimeGTE) {
+ jsTestLog("wallTimeGTE failed, retrying");
+ return false;
+ }
+ if (!timeElapsedGTE) {
+ jsTestLog("timeElapsedGTE failed, retrying");
+ return false;
+ }
+
+ return totalPassBigger && scannedBigger && scannedInternalBigger && bytesEqual &&
+ docsDeletedEqual && wallTimeGTE && timeElapsedGTE;
+});
+
+replicaSet.stopSet();
+}());
diff --git a/jstests/noPassthrough/change_streams_per_shard_cursor.js b/jstests/noPassthrough/change_streams_per_shard_cursor.js
index d4fcefab48da2..d2c752a5fa47a 100644
--- a/jstests/noPassthrough/change_streams_per_shard_cursor.js
+++ b/jstests/noPassthrough/change_streams_per_shard_cursor.js
@@ -4,10 +4,7 @@
* uses_change_streams,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/catalog_shard_util.js");
+import {ConfigShardUtil} from "jstests/libs/config_shard_util.js";
const dbName = jsTestName();
const setupShardedCluster = (shards = 1) => {
@@ -69,9 +66,11 @@ assert.commandFailedWithCode(sdb.runCommand({
}),
6273801);
-// $out can't passthrough so it's not allowed.
+// $out can't passthrough so it's not allowed. This may be caught in parsing, or when preparing
+// the aggregation.
assert.commandFailedWithCode(
- assert.throws(() => pscWatch(sdb, "coll", shardId, {pipeline: [{$out: "h"}]})), 6273802);
+ assert.throws(() => pscWatch(sdb, "coll", shardId, {pipeline: [{$out: "h"}]})),
+ [6273802, ErrorCodes.IllegalOperation]);
// Shard option should be specified.
assert.commandFailedWithCode(
@@ -83,12 +82,6 @@ assert.commandFailedWithCode(
assert.commandFailedWithCode(assert.throws(() => pscWatch(sdb, "coll", 42)),
ErrorCodes.TypeMismatch);
-const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st);
-if (!isCatalogShardEnabled) {
- // Can't open a per shard cursor on the config RS.
- assert.commandFailedWithCode(assert.throws(() => pscWatch(sdb, "coll", "config")), 6273803);
-}
-
// The shardId should be a valid shard.
assert.commandFailedWithCode(
assert.throws(() => pscWatch(sdb, "coll", "Dwane 'the Shard' Johnson")),
@@ -109,18 +102,16 @@ for (let i = 1; i <= 4; i++) {
}
assert(!c.hasNext());
-if (isCatalogShardEnabled) {
- // Can open a per shard cursor on the config server.
- const configDB = st.s0.getDB("config");
- c = pscWatch(configDB, "coll", "config", undefined /* options */, {allowToRunOnConfigDB: true});
- for (let i = 1; i <= 4; i++) {
- configDB.coll.insertOne({location: 2, i});
- assert(!c.isExhausted());
- assert.soon(() => c.hasNext());
- c.next();
- }
- assert(!c.hasNext());
+// Can open a per shard cursor on the config server.
+const configDB = st.s0.getDB("config");
+c = pscWatch(configDB, "coll", "config", undefined /* options */, {allowToRunOnConfigDB: true});
+for (let i = 1; i <= 4; i++) {
+ configDB.coll.insertOne({location: 2, i});
+ assert(!c.isExhausted());
+ assert.soon(() => c.hasNext());
+ c.next();
}
+assert(!c.hasNext());
// Simple database level watch
c = pscWatch(sdb, 1, shardId);
@@ -211,5 +202,4 @@ sdb.coll2.insertOne({location: 10, _id: 4});
assert(!c.isExhausted());
assert(!c.hasNext());
-st.stop();
-})();
+st.stop();
\ No newline at end of file
diff --git a/jstests/noPassthrough/change_streams_pre_image_removal_job.js b/jstests/noPassthrough/change_streams_pre_image_removal_job.js
index 274153370b3e8..e7293e4501320 100644
--- a/jstests/noPassthrough/change_streams_pre_image_removal_job.js
+++ b/jstests/noPassthrough/change_streams_pre_image_removal_job.js
@@ -8,11 +8,9 @@
// requires_replication,
// requires_majority_read_concern,
// ]
-(function() {
-"use strict";
-
load('jstests/replsets/rslib.js'); // For getLatestOp, getFirstOplogEntry.
load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const docA = {
_id: 12345,
@@ -33,7 +31,12 @@ const oplogSizeMB = 1;
// Set up the replica set with two nodes and two collections with 'changeStreamPreAndPostImages'
// enabled and run expired pre-image removal job every second.
const rst = new ReplSetTest({nodes: 2, oplogSize: oplogSizeMB});
-rst.startSet({setParameter: {expiredChangeStreamPreImageRemovalJobSleepSecs: 1}});
+rst.startSet({
+ setParameter: {
+ expiredChangeStreamPreImageRemovalJobSleepSecs: 1,
+ preImagesCollectionTruncateMarkersMinBytes: 1
+ }
+});
rst.initiate();
const largeStr = 'abcdefghi'.repeat(4 * 1024);
const primaryNode = rst.getPrimary();
@@ -135,36 +138,43 @@ function retryOnCappedPositionLostError(func, message) {
return onlyTwoPreImagesLeft && allPreImagesHaveBiggerTimestamp;
});
- // Because the pre-images collection is implicitly replicated, validate that writes do not
- // generate oplog entries, with the exception of deletions.
- const preimagesNs = 'config.system.preimages';
- // Multi-deletes are batched base on time before performing the deletion, therefore the
- // deleted pre-images can span through multiple applyOps oplog entries.
- //
- // As pre-images span two collections, the minimum number of batches is 2, as we perform
- // the range-deletion per collection. The maximum number of batches is 4 (one per single
- // pre-image removed).
- const expectedNumberOfBatchesRange = [2, 3, 4];
- const serverStatusBatches = testDB.serverStatus()['batchedDeletes']['batches'];
- const serverStatusDocs = testDB.serverStatus()['batchedDeletes']['docs'];
- assert.contains(serverStatusBatches, expectedNumberOfBatchesRange);
- assert.eq(serverStatusDocs, preImagesToExpire);
- assert.contains(
- retryOnCappedPositionLostError(
- () => localDB.oplog.rs
- .find({ns: 'admin.$cmd', 'o.applyOps.op': 'd', 'o.applyOps.ns': preimagesNs})
- .itcount(),
- "Failed to fetch oplog entries for pre-image deletes"),
- expectedNumberOfBatchesRange);
- assert.eq(0,
- retryOnCappedPositionLostError(
- () => localDB.oplog.rs.find({op: {'$ne': 'd'}, ns: preimagesNs}).itcount(),
- "Failed to fetch all oplog entries except pre-image deletes"));
-
- // Verify that pre-images collection content on the primary node is the same as on the
- // secondary.
- rst.awaitReplication();
- assert(bsonWoCompare(getPreImages(primaryNode), getPreImages(rst.getSecondary())) === 0);
+ // If the feature flag is on, then batched deletes will not be used for deletion. Additionally,
+ // since truncates are not replicated, the number of pre-images on the primary may differ from
+ // that of the secondary.
+ if (!FeatureFlagUtil.isPresentAndEnabled(testDB, "UseUnreplicatedTruncatesForDeletions")) {
+ // Because the pre-images collection is implicitly replicated, validate that writes do not
+ // generate oplog entries, with the exception of deletions.
+ const preimagesNs = 'config.system.preimages';
+ // Multi-deletes are batched base on time before performing the deletion, therefore the
+ // deleted pre-images can span through multiple applyOps oplog entries.
+ //
+ // As pre-images span two collections, the minimum number of batches is 2, as we perform
+ // the range-deletion per collection. The maximum number of batches is 4 (one per single
+ // pre-image removed).
+ const expectedNumberOfBatchesRange = [2, 3, 4];
+ const serverStatusBatches = testDB.serverStatus()['batchedDeletes']['batches'];
+ const serverStatusDocs = testDB.serverStatus()['batchedDeletes']['docs'];
+ assert.contains(serverStatusBatches, expectedNumberOfBatchesRange);
+ assert.eq(serverStatusDocs, preImagesToExpire);
+ assert.contains(
+ retryOnCappedPositionLostError(
+ () =>
+ localDB.oplog.rs
+ .find(
+ {ns: 'admin.$cmd', 'o.applyOps.op': 'd', 'o.applyOps.ns': preimagesNs})
+ .itcount(),
+ "Failed to fetch oplog entries for pre-image deletes"),
+ expectedNumberOfBatchesRange);
+ assert.eq(0,
+ retryOnCappedPositionLostError(
+ () => localDB.oplog.rs.find({op: {'$ne': 'd'}, ns: preimagesNs}).itcount(),
+ "Failed to fetch all oplog entries except pre-image deletes"));
+
+ // Verify that pre-images collection content on the primary node is the same as on the
+ // secondary.
+ rst.awaitReplication();
+ assert(bsonWoCompare(getPreImages(primaryNode), getPreImages(rst.getSecondary())) === 0);
+ }
}
// Increase oplog size on each node to prevent oplog entries from being deleted which removes a
@@ -174,5 +184,4 @@ rst.nodes.forEach((node) => {
assert.commandWorked(node.adminCommand({replSetResizeOplog: 1, size: largeOplogSizeMB}));
});
-rst.stopSet();
-}());
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/classic_cqf_simple_comparisons.js b/jstests/noPassthrough/classic_cqf_simple_comparisons.js
new file mode 100644
index 0000000000000..9928e0c3cf076
--- /dev/null
+++ b/jstests/noPassthrough/classic_cqf_simple_comparisons.js
@@ -0,0 +1,50 @@
+/**
+ * Tests that comparisons against a variety of BSON types and shapes are the same in CQF and
+ * classic.
+ */
+import {leafs, smallDocs} from "jstests/query_golden/libs/example_data.js";
+
+const cqfConn = MongoRunner.runMongod({setParameter: {featureFlagCommonQueryFramework: true}});
+assert.neq(null, cqfConn, "mongod was unable to start up");
+const cqfDb = cqfConn.getDB(jsTestName());
+
+assert.commandWorked(
+ cqfDb.adminCommand({setParameter: 1, internalQueryFrameworkControl: "forceBonsai"}));
+const cqfColl = cqfDb.cqf_compare;
+cqfColl.drop();
+
+// Disable via TestData so there's no conflict in case a variant has this enabled.
+TestData.setParameters.featureFlagCommonQueryFramework = false;
+TestData.setParameters.internalQueryFrameworkControl = 'trySbeEngine';
+const classicConn = MongoRunner.runMongod();
+assert.neq(null, classicConn, "mongod was unable to start up");
+
+const classicColl = classicConn.getDB(jsTestName()).classic_compare;
+classicColl.drop();
+
+// TODO SERVER-67818 Bonsai NaN $eq NaN should be true.
+// The above ticket also fixes inequality comparisons to NaN.
+const docs = smallDocs().filter(doc => !tojson(doc).match(/NaN/));
+cqfColl.insert(docs);
+classicColl.insert(docs);
+
+for (const op of ['$eq', '$lt', '$lte', '$gt', '$gte']) {
+ for (const leaf of leafs()) {
+ // TODO SERVER-67550 Equality to null does not match undefined, in Bonsai.
+ if (tojson(leaf).match(/null|undefined/))
+ continue;
+ // TODO SERVER-67818 Bonsai NaN $eq NaN should be true.
+ if (tojson(leaf).match(/NaN/))
+ continue;
+ // Regex with non-equality predicate is not allowed.
+ if (leaf instanceof RegExp && op !== '$eq')
+ continue;
+
+ const cqfResult = cqfColl.find({a: {[op]: leaf}}, {_id: 0}).toArray();
+ const classicResult = classicColl.find({a: {[op]: leaf}}, {_id: 0}).toArray();
+ assert.eq(cqfResult, classicResult);
+ }
+}
+
+MongoRunner.stopMongod(cqfConn);
+MongoRunner.stopMongod(classicConn);
diff --git a/jstests/noPassthrough/client_disconnect_during_sign_logical_time.js b/jstests/noPassthrough/client_disconnect_during_sign_logical_time.js
index f205f24b5c8fe..d596517c7ada4 100644
--- a/jstests/noPassthrough/client_disconnect_during_sign_logical_time.js
+++ b/jstests/noPassthrough/client_disconnect_during_sign_logical_time.js
@@ -18,4 +18,4 @@ assert.commandFailedWithCode(st.s.adminCommand({
ErrorCodes.ClientDisconnect);
st.stop();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/client_metadata_log.js b/jstests/noPassthrough/client_metadata_log.js
index 71fc22a890442..d8c43151c361f 100644
--- a/jstests/noPassthrough/client_metadata_log.js
+++ b/jstests/noPassthrough/client_metadata_log.js
@@ -8,7 +8,7 @@
(function() {
'use strict';
-let checkLog = function(conn) {
+let checkLogForMetadata = function(conn) {
let coll = conn.getCollection("test.foo");
assert.commandWorked(coll.insert({_id: 1}));
@@ -30,7 +30,7 @@ let testMongoD = function() {
let conn = MongoRunner.runMongod({useLogFiles: true});
assert.neq(null, conn, 'mongod was unable to start up');
- checkLog(conn);
+ checkLogForMetadata(conn);
MongoRunner.stopMongod(conn);
};
@@ -43,7 +43,7 @@ let testMongoS = function() {
let st = new ShardingTest({shards: 1, mongos: 1, other: options});
- checkLog(st.s0);
+ checkLogForMetadata(st.s0);
// Validate db.currentOp() contains mongos information
let curOp = st.s0.adminCommand({currentOp: 1});
diff --git a/jstests/noPassthrough/cluster-server-parameter-op-observer.js b/jstests/noPassthrough/cluster-server-parameter-op-observer.js
index ce8f7dbcb115a..ebcb40a58ba1c 100644
--- a/jstests/noPassthrough/cluster-server-parameter-op-observer.js
+++ b/jstests/noPassthrough/cluster-server-parameter-op-observer.js
@@ -1,22 +1,15 @@
// Test that ClusterServerParameterOpObserver fires appropriately.
-// @tags: [requires_replication]
+// @tags: [requires_replication, requires_fcv_71]
(function() {
'use strict';
-const kUnknownCSPLogId = 6226300;
-const kUnknownCSPLogComponent = 'control';
-const kUnknownCSPLogLevel = 3;
-
function runTest(conn) {
const config = conn.getDB('config');
- const originalLogLevel =
- assert.commandWorked(config.setLogLevel(kUnknownCSPLogLevel, kUnknownCSPLogComponent))
- .was.verbosity;
- assert.writeOK(
- config.clusterParameters.insert({_id: 'foo', clusterParameterTime: Date(), value: 123}));
- assert.commandWorked(config.setLogLevel(originalLogLevel, kUnknownCSPLogComponent));
- assert(checkLog.checkContainsOnceJson(conn, kUnknownCSPLogId, {name: 'foo'}));
+ const res =
+ config.clusterParameters.insert({_id: 'foo', clusterParameterTime: Date(), value: 123});
+ assert(res.hasWriteError());
+ assert.neq(res.getWriteError().length, 0);
}
const rst = new ReplSetTest({nodes: 2});
diff --git a/jstests/noPassthrough/cluster_analyze_command.js b/jstests/noPassthrough/cluster_analyze_command.js
index 8dea667053bf6..a193971549ac7 100644
--- a/jstests/noPassthrough/cluster_analyze_command.js
+++ b/jstests/noPassthrough/cluster_analyze_command.js
@@ -1,7 +1,4 @@
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const st = new ShardingTest({
shards: 2,
@@ -17,7 +14,7 @@ const db = st.getDB("test");
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is not enabled");
st.stop();
- return;
+ quit();
}
const coll = db.analyze_coll;
@@ -40,4 +37,3 @@ res = db.runCommand({analyze: coll.getName(), writeConcern: {w: 1}});
assert.commandWorked(res);
st.stop();
-})();
diff --git a/jstests/noPassthrough/cluster_commands_require_cluster_node.js b/jstests/noPassthrough/cluster_commands_require_cluster_node.js
index 6366db4538eaf..c7787ac31cb0f 100644
--- a/jstests/noPassthrough/cluster_commands_require_cluster_node.js
+++ b/jstests/noPassthrough/cluster_commands_require_cluster_node.js
@@ -6,10 +6,7 @@
* requires_sharding,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/catalog_shard_util.js");
+import {ConfigShardUtil} from "jstests/libs/config_shard_util.js";
const kDBName = "foo";
const kCollName = "bar";
@@ -29,6 +26,7 @@ const clusterCommandsCases = [
},
{cmd: {clusterInsert: kCollName, documents: [{x: 1}]}},
{cmd: {clusterUpdate: kCollName, updates: [{q: {doesNotExist: 1}, u: {x: 1}}]}},
+ // TODO SERVER-52419 add test for bulkWrite.
];
function runTestCaseExpectFail(conn, testCase, code) {
@@ -95,20 +93,11 @@ function runTestCaseExpectSuccess(conn, testCase) {
runTestCaseExpectFail(st.s, testCase, ErrorCodes.CommandNotFound);
}
- //
- // Cluster commands are allowed on a catalog shard enabled config server.
- //
-
- const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st);
for (let testCase of clusterCommandsCases) {
- if (isCatalogShardEnabled) {
- if (testCase.expectedErr) {
- runTestCaseExpectFail(st.rs0.getPrimary(), testCase, testCase.expectedErr);
- } else {
- runTestCaseExpectSuccess(st.rs0.getPrimary(), testCase);
- }
+ if (testCase.expectedErr) {
+ runTestCaseExpectFail(st.rs0.getPrimary(), testCase, testCase.expectedErr);
} else {
- runTestCaseExpectFail(st.configRS.getPrimary(), testCase, ErrorCodes.NoShardingEnabled);
+ runTestCaseExpectSuccess(st.rs0.getPrimary(), testCase);
}
}
@@ -125,5 +114,4 @@ function runTestCaseExpectSuccess(conn, testCase) {
}
st.stop();
-}
-}());
+}
\ No newline at end of file
diff --git a/jstests/noPassthrough/cluster_server_parameter_refresher.js b/jstests/noPassthrough/cluster_server_parameter_refresher.js
index a0be0a5dac95f..4a5e2f7b48fc2 100644
--- a/jstests/noPassthrough/cluster_server_parameter_refresher.js
+++ b/jstests/noPassthrough/cluster_server_parameter_refresher.js
@@ -9,10 +9,10 @@
* requires_sharding
* ]
*/
-(function() {
-'use strict';
-
-load('jstests/libs/cluster_server_parameter_utils.js');
+import {
+ kAllClusterParameterDefaults,
+ runGetClusterParameterSharded
+} from "jstests/libs/cluster_server_parameter_utils.js";
function runTest(st, startupRefreshIntervalMS) {
// This assert is necessary because we subtract 8000 MS from this value later on, and we don't
@@ -153,5 +153,4 @@ let options = {
};
let st = new ShardingTest(options);
runTest(st, 10000);
-st.stop();
-})();
+st.stop();
\ No newline at end of file
diff --git a/jstests/noPassthrough/clustered_capped_collection.js b/jstests/noPassthrough/clustered_capped_collection.js
index d30a445a925e8..2beac6ff802c1 100644
--- a/jstests/noPassthrough/clustered_capped_collection.js
+++ b/jstests/noPassthrough/clustered_capped_collection.js
@@ -5,13 +5,12 @@
* requires_fcv_53,
* requires_replication,
* does_not_support_stepdowns,
+ * # Tests running with experimental CQF behavior require test commands to be enabled.
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/clustered_collections/clustered_collection_util.js");
-load("jstests/libs/clustered_collections/clustered_capped_utils.js");
+import {ClusteredCappedUtils} from "jstests/libs/clustered_collections/clustered_capped_utils.js";
{
const replSet = new ReplSetTest({name: "clustered_capped_collections", nodes: 1});
@@ -58,4 +57,3 @@ load("jstests/libs/clustered_collections/clustered_capped_utils.js");
replSetNoTestCommands.stopSet();
}
-})();
diff --git a/jstests/noPassthrough/clustered_capped_collection_arbitrary_key.js b/jstests/noPassthrough/clustered_capped_collection_arbitrary_key.js
index 1b369ba4d0447..7c5aa457febf3 100644
--- a/jstests/noPassthrough/clustered_capped_collection_arbitrary_key.js
+++ b/jstests/noPassthrough/clustered_capped_collection_arbitrary_key.js
@@ -7,11 +7,8 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/clustered_collections/clustered_collection_util.js");
-load("jstests/libs/clustered_collections/clustered_capped_utils.js");
+import {ClusteredCappedUtils} from "jstests/libs/clustered_collections/clustered_capped_utils.js";
const replSet = new ReplSetTest({name: "clustered_capped_collections", nodes: 1});
replSet.startSet({setParameter: {ttlMonitorSleepSecs: 1, supportArbitraryClusterKeyIndex: true}});
@@ -39,4 +36,3 @@ for (let awaitData of [false, true]) {
}
replSet.stopSet();
-})();
diff --git a/jstests/noPassthrough/clustered_coll_mod.js b/jstests/noPassthrough/clustered_coll_mod.js
index 997b452436dab..ba19c97198666 100644
--- a/jstests/noPassthrough/clustered_coll_mod.js
+++ b/jstests/noPassthrough/clustered_coll_mod.js
@@ -6,11 +6,8 @@
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/clustered_collections/clustered_collection_util.js");
-load("jstests/libs/ttl_util.js");
+import {TTLUtil} from "jstests/libs/ttl_util.js";
// Run TTL monitor constantly to speed up this test.
const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'});
@@ -76,5 +73,4 @@ function testCollMod(coll, clusterKey, clusterKeyName) {
testCollMod(conn.getDB(jsTestName())["coll"], {_id: 1}, "_id_");
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/clustered_coll_mod_arbitrary_key.js b/jstests/noPassthrough/clustered_coll_mod_arbitrary_key.js
index 35ef0ee48d3fd..2291735e6f6ec 100644
--- a/jstests/noPassthrough/clustered_coll_mod_arbitrary_key.js
+++ b/jstests/noPassthrough/clustered_coll_mod_arbitrary_key.js
@@ -6,11 +6,8 @@
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/clustered_collections/clustered_collection_util.js");
-load("jstests/libs/ttl_util.js");
+import {TTLUtil} from "jstests/libs/ttl_util.js";
// Run TTL monitor constantly to speed up this test.
const conn = MongoRunner.runMongod(
@@ -76,5 +73,4 @@ function testCollMod(coll, clusterKey, clusterKeyName) {
testCollMod(conn.getDB("local")["coll"], {ts: 1}, "ts_1");
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/clustered_collection_bounded_scan_nonreplicated.js b/jstests/noPassthrough/clustered_collection_bounded_scan_nonreplicated.js
index 1e17e794d86e2..a4c606ffe8b12 100644
--- a/jstests/noPassthrough/clustered_collection_bounded_scan_nonreplicated.js
+++ b/jstests/noPassthrough/clustered_collection_bounded_scan_nonreplicated.js
@@ -8,11 +8,10 @@
* assumes_unsharded_collection,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/clustered_collections/clustered_collection_util.js");
-load("jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js");
+import {
+ testClusteredCollectionBoundedScan
+} from "jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js";
const conn = MongoRunner.runMongod({setParameter: {supportArbitraryClusterKeyIndex: true}});
@@ -23,4 +22,3 @@ const nonReplicatedColl = nonReplicatedDB[collName];
testClusteredCollectionBoundedScan(nonReplicatedColl, {ts: 1});
MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/noPassthrough/clustered_collection_hint_nonreplicated.js b/jstests/noPassthrough/clustered_collection_hint_nonreplicated.js
index 42ab4ddeab25f..c5fffbb132b0e 100644
--- a/jstests/noPassthrough/clustered_collection_hint_nonreplicated.js
+++ b/jstests/noPassthrough/clustered_collection_hint_nonreplicated.js
@@ -7,10 +7,10 @@
* assumes_unsharded_collection,
* ]
*/
-(function() {
-"use strict";
load("jstests/libs/clustered_collections/clustered_collection_util.js");
-load("jstests/libs/clustered_collections/clustered_collection_hint_common.js");
+import {
+ testClusteredCollectionHint
+} from "jstests/libs/clustered_collections/clustered_collection_hint_common.js";
const conn = MongoRunner.runMongod({setParameter: {supportArbitraryClusterKeyIndex: true}});
@@ -21,4 +21,3 @@ const nonReplicatedColl = nonReplicatedDB[collName];
testClusteredCollectionHint(nonReplicatedColl, {ts: 1}, "ts_1");
MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/noPassthrough/clustered_collection_sorted_scan.js b/jstests/noPassthrough/clustered_collection_sorted_scan.js
index 21900ec8773f9..8b1d7021883b5 100644
--- a/jstests/noPassthrough/clustered_collection_sorted_scan.js
+++ b/jstests/noPassthrough/clustered_collection_sorted_scan.js
@@ -2,10 +2,8 @@
* Tests that clustered collections can be used for sorted scanning without inserting
* a blocking scan operator.
*/
-(function() {
-"use strict";
+import {getPlanStage, planHasStage} from "jstests/libs/analyze_plan.js";
-load("jstests/libs/analyze_plan.js");
load("jstests/libs/clustered_collections/clustered_collection_util.js");
Random.setRandomSeed();
@@ -15,7 +13,7 @@ const testConnection =
const testDb = testConnection.getDB('local');
const collectionSize = 10;
const clusteredCollName = "clustered_index_sorted_scan_coll";
-const clusterField = "clusterKey";
+const clusterField = "_id";
let nonClusteredCollName = clusteredCollName + "_nc";
@@ -26,7 +24,6 @@ let clusteredColl = testDb[clusteredCollName];
// Generate a non-clustered collection for comparison
assert.commandWorked(testDb.createCollection(nonClusteredCollName));
-assert.commandWorked(testDb[nonClusteredCollName].createIndex({[clusterField]: 1}, {unique: true}));
let nonClusteredColl = testDb[nonClusteredCollName];
// Put something in the collections so the planner has something to chew on.
@@ -72,12 +69,16 @@ function runTest(isClustered, hasFilter, hasHint, direction) {
assert(!planHasStage(testDb, plan, "SORT"), "Unexpected sort in " + formatParamsAndPlan(plan));
}
-function testCollations(direction) {
+function testCollations(collectionCollation, queryCollation, direction) {
+ const collationsMatch = collectionCollation == queryCollation;
+
let strCollName = clusteredCollName + "_str";
// Generate a clustered collection for the remainder of the testing
- assert.commandWorked(testDb.createCollection(
- strCollName, {clusteredIndex: {key: {[clusterField]: 1}, unique: true}}));
+ assert.commandWorked(testDb.createCollection(strCollName, {
+ clusteredIndex: {key: {[clusterField]: 1}, unique: true},
+ collation: collectionCollation
+ }));
let tsColl = testDb[strCollName];
@@ -86,22 +87,120 @@ function testCollations(direction) {
assert.commandWorked(tsColl.insert({[clusterField]: i.toString(), a: Math.random()}));
}
- // Run query with Faroese collation, just to choose something unlikely.
- // Because the collations don't match, we can't use the clustered index
- // to provide a sort
- let plan = tsColl.find()
- .sort({[clusterField]: direction})
- .collation({locale: "fo", caseLevel: true})
- .explain();
- assert(planHasStage(testDb, plan, "SORT"), "Expected sort in " + tojson(plan));
-
- // However, if we can exclude strings, we don't need an explicit sort even
- // if the collations don't match
- plan = tsColl.find({[clusterField]: {$gt: -1}})
- .sort({[clusterField]: direction})
- .collation({locale: "fo", caseLevel: true})
- .explain();
+ function runExplain(filter) {
+ return tsColl.find(filter)
+ .sort({[clusterField]: direction})
+ .collation(queryCollation)
+ .explain();
+ }
+
+ //
+ // Some queries need an explicit sort only when the query/collection collations do not match.
+ //
+ function assertPlanOnlyHasSortIfCollationsDontMatch(plan) {
+ if (collationsMatch) {
+ assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan));
+ } else {
+ assert(planHasStage(testDb, plan, "SORT"), "Expected sort in " + tojson(plan));
+ }
+ }
+
+ // Empty match.
+ let plan = runExplain({});
+ assertPlanOnlyHasSortIfCollationsDontMatch(plan);
+
+ // Comparison against a field other than the cluster field.
+ plan = runExplain({a: {$lt: 2}});
+ assertPlanOnlyHasSortIfCollationsDontMatch(plan);
+
+ // Query which contains an unsupported match expression.
+ plan = runExplain({$or: [{[clusterField]: {$lt: 2}}, {[clusterField]: {$gt: 5}}]});
+ assertPlanOnlyHasSortIfCollationsDontMatch(plan);
+
+ // Conjunction with one child which is an unsupported match expression and another which is a
+ // comparison against a field other than the cluster field.
+ plan = runExplain(
+ {$and: [{$or: [{[clusterField]: {$lt: 2}}, {[clusterField]: {$gt: 5}}]}, {a: {$gt: -1}}]});
+ assertPlanOnlyHasSortIfCollationsDontMatch(plan);
+
+ // Match which compares the cluster field to a string.
+ plan = runExplain({[clusterField]: {$gt: "1"}});
+ assertPlanOnlyHasSortIfCollationsDontMatch(plan);
+
+ // Match which compares the cluster field to an object containing a string.
+ plan = runExplain({[clusterField]: {$eq: {a: "str"}}});
+ assertPlanOnlyHasSortIfCollationsDontMatch(plan);
+
+ // Match which compares the cluster field to an array containing a string.
+ plan = runExplain({[clusterField]: {$eq: [1, 2, "str"]}});
+ assertPlanOnlyHasSortIfCollationsDontMatch(plan);
+
+ // $in query where one of the elements is a string.
+ plan = runExplain({[clusterField]: {$in: [1, "2", 3]}});
+ assertPlanOnlyHasSortIfCollationsDontMatch(plan);
+
+ // Conjunction with one child which compares the cluster field to a string and another which
+ // is a comparison against a field other than the cluster field.
+ plan = runExplain({$and: [{[clusterField]: "str"}, {a: 5}]});
+ assertPlanOnlyHasSortIfCollationsDontMatch(plan);
+
+ // Conjunction with one $in child which compares the cluster field to a string and another
+ // which is a comparison against a field other than the cluster field.
+ plan = runExplain({$and: [{[clusterField]: {$in: [1, "2", 3]}}, {a: 5}]});
+ assertPlanOnlyHasSortIfCollationsDontMatch(plan);
+
+ //
+ // Some queries can omit the explicit sort regardless of collations. This is the case when
+ // we can exclude string values of the cluster key in the output.
+ //
+
+ // Simple comparison on cluster key which omits strings.
+ plan = runExplain({[clusterField]: {$gt: -1}});
+ assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan));
+ plan = runExplain({[clusterField]: {$eq: {a: 5}}});
+ assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan));
+ plan = runExplain({[clusterField]: {$eq: [1, 2, 3]}});
+ assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan));
+
+ // Conjunction with multiple comparisons on cluster key which omits strings.
+ plan = runExplain({$and: [{[clusterField]: {$gt: -1}}, {[clusterField]: {$lt: 10}}]});
+ assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan));
+
+ // $in query against cluster key which omits strings.
+ plan = runExplain({[clusterField]: {$in: [1, 2, 3]}});
assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan));
+
+ // Conjunction of $in query against cluster key and another comparison on a field other than
+ // the cluster key. The first conjunct omits strings.
+ plan = runExplain({$and: [{[clusterField]: {$in: [1, 2, 3]}}, {a: 5}]});
+ assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan));
+
+ // Conjunction with one comparison against the cluster key and one against another field. The
+ // second conjunct omits strings.
+ plan = runExplain({$and: [{a: {$lt: 2}}, {[clusterField]: {$gt: -1}}]});
+ assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan));
+
+ // Conjunction with one child which is an unsupported match expression and another which is
+ // a comparison against the cluster field. The second conjunct omits strings.
+ plan = runExplain({
+ $and: [
+ {$or: [{[clusterField]: {$lt: 2}}, {[clusterField]: {$gt: 5}}]},
+ {[clusterField]: {$gt: -1}}
+ ]
+ });
+ assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan));
+
+ // Conjunction which contains a comparison of the cluster field to a string and a comparison
+ // of the cluster field to a number. The second conjunct omits strings.
+ plan = runExplain({$and: [{[clusterField]: {$lt: "1"}}, {[clusterField]: {$gt: 2}}]});
+ assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan));
+
+ // Conjunction which contains a $in comparison of the cluster field to a string and a $in
+ // comparison of the cluster field to a number. The second conjunct omits strings.
+ plan = runExplain(
+ {$and: [{[clusterField]: {$in: [1, "2", 3]}}, {[clusterField]: {$in: [1, 3, 4]}}]});
+ assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan));
+
tsColl.drop();
}
@@ -146,7 +245,7 @@ function testPlanCache(direction) {
assert.commandWorked(clusteredColl.createIndex({a: 1}, {name: indexName}));
const filter = {a: {$gt: -1}};
- const projection = {_id: 0, [clusterField]: 1};
+ const projection = {[clusterField]: 1};
const sort = {[clusterField]: direction};
// Because of the _a index above, we should have two alternatves -- filter via the
@@ -190,8 +289,31 @@ for (let isClustered = 0; isClustered <= 1; isClustered++) {
}
}
-testCollations(/* direction = */ 1);
-testCollations(/* direction = */ -1);
+//
+// Show that the direction of the sort does not affect the plans we are able to provide. Also show
+// the collation conditions under which we can avoid explicit sorts in the final plan.
+//
+
+const defaultCollation = {
+ locale: "simple",
+};
+const faroeseCollation = {
+ locale: "fo",
+ caseLevel: true
+};
+
+testCollations(
+ defaultCollation /* for collection */, faroeseCollation /* for query */, /* direction = */ 1);
+testCollations(
+ defaultCollation /* for collection */, faroeseCollation /* for query */, /* direction = */ -1);
+testCollations(
+ faroeseCollation /* for collection */, faroeseCollation /* for query */, /* direction = */ 1);
+testCollations(
+ faroeseCollation /* for collection */, faroeseCollation /* for query */, /* direction = */ -1);
+testCollations(
+ defaultCollation /* for collection */, defaultCollation /* for query */, /* direction = */ 1);
+testCollations(
+ defaultCollation /* for collection */, defaultCollation /* for query */, /* direction = */ -1);
testMinMax();
@@ -205,5 +327,4 @@ assert(planHasStage(testDb, plan, "SORT"), "Expected sort in " + tojson(plan));
clusteredColl.drop();
nonClusteredColl.drop();
-MongoRunner.stopMongod(testConnection);
-})();
+MongoRunner.stopMongod(testConnection);
\ No newline at end of file
diff --git a/jstests/noPassthrough/clustered_collection_ttl.js b/jstests/noPassthrough/clustered_collection_ttl.js
index de21cccb219bd..da1b0db8bb6fe 100644
--- a/jstests/noPassthrough/clustered_collection_ttl.js
+++ b/jstests/noPassthrough/clustered_collection_ttl.js
@@ -6,11 +6,9 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
load("jstests/libs/clustered_collections/clustered_collection_util.js");
load('jstests/libs/dateutil.js');
-load('jstests/libs/ttl_util.js');
+import {TTLUtil} from "jstests/libs/ttl_util.js";
// Run TTL monitor constantly to speed up this test.
const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'});
@@ -102,5 +100,4 @@ assert.commandWorked(replicatedColl.createIndex({ttlField: 1}, {expireAfterSecon
insertAndValidateTTL(replicatedColl, "ttlField");
replicatedColl.drop();
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/collection_catalog_two_phase_drops.js b/jstests/noPassthrough/collection_catalog_two_phase_drops.js
index e53da1c1dc72c..a8868eb7f84df 100644
--- a/jstests/noPassthrough/collection_catalog_two_phase_drops.js
+++ b/jstests/noPassthrough/collection_catalog_two_phase_drops.js
@@ -8,11 +8,7 @@
* requires_wiredtiger
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/disk/libs/wt_file_helper.js");
-load("jstests/libs/feature_flag_util.js");
+import {getUriForColl, getUriForIndex} from "jstests/disk/libs/wt_file_helper.js";
const rst = new ReplSetTest({
nodes: 1,
@@ -32,12 +28,6 @@ const primary = rst.getPrimary();
const dbName = "test";
const db = primary.getDB(dbName);
-if (!FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) {
- jsTestLog("Skipping as featureFlagPointInTimeCatalogLookups is not enabled");
- rst.stopSet();
- return;
-}
-
// Pause the checkpoint thread to control the checkpoint timestamp.
assert.commandWorked(
primary.adminCommand({configureFailPoint: "pauseCheckpointThread", mode: "alwaysOn"}));
@@ -83,8 +73,8 @@ checkLog.containsJson(primary, 6825301, {
assert.commandWorked(db.adminCommand({appendOplogNote: 1, data: {msg: "advance timestamp"}}));
assert.commandWorked(db.adminCommand({fsync: 1}));
-// Completing drop for ident.
-checkLog.containsJson(primary, 22237, {
+// "The ident was successfully dropped".
+checkLog.containsJson(primary, 6776600, {
ident: function(ident) {
return ident == xIndexUri;
}
@@ -133,13 +123,13 @@ checkLog.containsJson(primary, 6825300, {
assert.commandWorked(db.adminCommand({appendOplogNote: 1, data: {msg: "advance timestamp"}}));
assert.commandWorked(db.adminCommand({fsync: 1}));
-// Completing drop for ident.
-checkLog.containsJson(primary, 22237, {
+// "The ident was successfully dropped".
+checkLog.containsJson(primary, 6776600, {
ident: function(ident) {
return ident == collUri;
}
});
-checkLog.containsJson(primary, 22237, {
+checkLog.containsJson(primary, 6776600, {
ident: function(ident) {
return ident == idIndexUri;
}
@@ -160,5 +150,4 @@ checkLog.containsJson(primary, 6825302, {
assert.commandWorked(
primary.adminCommand({configureFailPoint: "pauseCheckpointThread", mode: "off"}));
-rst.stopSet();
-}());
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/collection_scan_low_priority.js b/jstests/noPassthrough/collection_scan_low_priority.js
index 192cc0046006d..1270eb321bb37 100644
--- a/jstests/noPassthrough/collection_scan_low_priority.js
+++ b/jstests/noPassthrough/collection_scan_low_priority.js
@@ -2,6 +2,8 @@
* Tests that unbounded collections scans access the storage engine with low priority.
*
* @tags: [
+ * cqf_incompatible, # TODO SERVER-64007: This test requires plans which yield in order to count
+ * # low-priority transactions, which CQF cannot generate until this ticket is complete.
* requires_wiredtiger,
* ]
*/
@@ -21,10 +23,10 @@ const coll = db.coll;
assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
-const runTest = function(options) {
+const runTest = function(options, deprioritize) {
assert.commandWorked(db.createCollection(coll.getName(), options));
- assert.commandWorked(coll.insert({_id: 0}));
- assert.commandWorked(coll.insert({_id: 1}));
+ assert.commandWorked(coll.insert({_id: 0, class: 0}));
+ assert.commandWorked(coll.insert({_id: 1, class: 0}));
const numLowPriority = function() {
return db.serverStatus()
@@ -34,7 +36,11 @@ const runTest = function(options) {
const testScanDeprioritized = function(direction) {
const numLowPriorityBefore = numLowPriority();
coll.find().hint({$natural: direction}).itcount();
- assert.gt(numLowPriority(), numLowPriorityBefore);
+ if (deprioritize) {
+ assert.gt(numLowPriority(), numLowPriorityBefore);
+ } else {
+ assert.eq(numLowPriority(), numLowPriorityBefore);
+ }
};
testScanDeprioritized(1);
testScanDeprioritized(-1);
@@ -42,7 +48,11 @@ const runTest = function(options) {
const testScanSortLimitDeprioritized = function(direction) {
const numLowPriorityBefore = numLowPriority();
coll.find().hint({$natural: direction}).sort({_id: 1}).limit(1).itcount();
- assert.gt(numLowPriority(), numLowPriorityBefore);
+ if (deprioritize) {
+ assert.gt(numLowPriority(), numLowPriorityBefore);
+ } else {
+ assert.eq(numLowPriority(), numLowPriorityBefore);
+ }
};
testScanSortLimitDeprioritized(1);
testScanSortLimitDeprioritized(-1);
@@ -55,11 +65,45 @@ const runTest = function(options) {
testScanLimitNotDeprioritized(1);
testScanLimitNotDeprioritized(-1);
+ const testAggregationInducedScanDeprioritized = function() {
+ assert.commandWorked(coll.insert({_id: 3, class: 1}));
+ assert.commandWorked(coll.insert({_id: 4, class: 1}));
+ let numLowPriorityBefore = numLowPriority();
+ coll.aggregate(
+ [{
+ $group: {_id: "$class", idSum: {$count: {}}},
+ }],
+ );
+ if (deprioritize) {
+ assert.gt(numLowPriority(), numLowPriorityBefore);
+ } else {
+ assert.eq(numLowPriority(), numLowPriorityBefore);
+ }
+
+ numLowPriorityBefore = numLowPriority();
+ coll.aggregate(
+ [{
+ $match: {class: 0},
+
+ }],
+ );
+ if (deprioritize) {
+ assert.gt(numLowPriority(), numLowPriorityBefore);
+ } else {
+ assert.eq(numLowPriority(), numLowPriorityBefore);
+ }
+ };
+ testAggregationInducedScanDeprioritized();
assert(coll.drop());
};
-runTest({});
-runTest({clusteredIndex: {key: {_id: 1}, unique: true}});
+runTest({}, true);
+runTest({clusteredIndex: {key: {_id: 1}, unique: true}}, true);
+
+assert.commandWorked(
+ db.adminCommand({setParameter: 1, deprioritizeUnboundedUserCollectionScans: false}));
+runTest({}, false);
+runTest({clusteredIndex: {key: {_id: 1}, unique: true}}, false);
MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/collmod_ttl.js b/jstests/noPassthrough/collmod_ttl.js
index 9464ec3284c15..4ae97167282e8 100644
--- a/jstests/noPassthrough/collmod_ttl.js
+++ b/jstests/noPassthrough/collmod_ttl.js
@@ -8,10 +8,7 @@
* requires_ttl_index,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/ttl_util.js");
+import {TTLUtil} from "jstests/libs/ttl_util.js";
// Runs TTL monitor constantly to speed up this test.
const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'});
@@ -42,5 +39,4 @@ assert.commandWorked(testDB.runCommand({
TTLUtil.waitForPass(testDB);
assert.eq(0, coll.find().itcount());
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/column_scan_slow_logs.js b/jstests/noPassthrough/column_scan_slow_logs.js
index b4fda3d42000f..806dcec090764 100644
--- a/jstests/noPassthrough/column_scan_slow_logs.js
+++ b/jstests/noPassthrough/column_scan_slow_logs.js
@@ -5,10 +5,7 @@
* featureFlagColumnstoreIndexes,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const conn = MongoRunner.runMongod({});
assert.neq(null, conn, "mongod was unable to start up");
@@ -18,7 +15,7 @@ assert.commandWorked(db.dropDatabase());
if (!setUpServerForColumnStoreIndexTest(db)) {
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
const coll = db.collection;
@@ -65,5 +62,4 @@ assert.eq(planSummary.match(/'_id'/g).length,
1,
`'_id' should appear once in planSummary. Instead, got: ${planSummary}`);
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/column_store_index_load.js b/jstests/noPassthrough/column_store_index_load.js
index d73d2f97a0614..dd7ab8e0887b8 100644
--- a/jstests/noPassthrough/column_store_index_load.js
+++ b/jstests/noPassthrough/column_store_index_load.js
@@ -1,4 +1,3 @@
-
/**
* Test that different methods of loading a column store index all produce the same valid results.
* Indexes are validated by comparing query results that use the index with results from a control
@@ -10,18 +9,15 @@
* featureFlagColumnstoreIndexes,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {planHasStage} from "jstests/libs/analyze_plan.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const mongod = MongoRunner.runMongod({});
const db = mongod.getDB("test");
if (!setUpServerForColumnStoreIndexTest(db)) {
MongoRunner.stopMongod(mongod);
- return;
+ quit();
}
//
@@ -242,4 +238,3 @@ for (let i = 0; i < noIndexResults.length; ++i) {
}
MongoRunner.stopMongod(mongod);
-})();
diff --git a/jstests/noPassthrough/columnstore_index_persistence.js b/jstests/noPassthrough/columnstore_index_persistence.js
index e8e587ee280fc..db0f04def75cb 100644
--- a/jstests/noPassthrough/columnstore_index_persistence.js
+++ b/jstests/noPassthrough/columnstore_index_persistence.js
@@ -11,11 +11,8 @@
* ]
*/
-(function() {
-'use strict';
-
load('jstests/libs/index_catalog_helpers.js');
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const rst = new ReplSetTest({nodes: 1});
rst.startSet();
@@ -28,7 +25,7 @@ let db_primary = primary.getDB('test');
if (!setUpServerForColumnStoreIndexTest(db_primary)) {
rst.stopSet();
- return;
+ quit();
}
let coll_primary = db_primary.getCollection(collName);
@@ -73,5 +70,4 @@ coll_primary = db_primary.getCollection(collName);
assert.neq(null, IndexCatalogHelpers.findByKeyPattern(indexList, {"$**": "columnstore"}));
}
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/columnstore_index_rowstore_settings.js b/jstests/noPassthrough/columnstore_index_rowstore_settings.js
index 22a2475cdc05c..adf568e8f17a3 100644
--- a/jstests/noPassthrough/columnstore_index_rowstore_settings.js
+++ b/jstests/noPassthrough/columnstore_index_rowstore_settings.js
@@ -8,17 +8,14 @@
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const mongod = MongoRunner.runMongod({});
const db = mongod.getDB("test");
if (!setUpServerForColumnStoreIndexTest(db)) {
MongoRunner.stopMongod(mongod);
- return;
+ quit();
}
const coll = db.columnstore_index_rowstore_settings;
@@ -245,5 +242,4 @@ function getRowstoreStats(explainExec) {
assert.eq(stats.fetches, count / 100, "Expected number of fetches. " + tojson(explain));
})();
-MongoRunner.stopMongod(mongod);
-}());
+MongoRunner.stopMongod(mongod);
\ No newline at end of file
diff --git a/jstests/noPassthrough/commands_handle_kill.js b/jstests/noPassthrough/commands_handle_kill.js
index 6f9a023c014fd..70a8e14f7f18e 100644
--- a/jstests/noPassthrough/commands_handle_kill.js
+++ b/jstests/noPassthrough/commands_handle_kill.js
@@ -1,7 +1,7 @@
// Tests that commands properly handle their underlying plan executor failing or being killed.
// @tags: [
-// # TODO SERVER-64007: Support yielding in CQF plans.
-// cqf_incompatible,
+// # TODO SERVER-70446: Enable yielding for index plans in CQF.
+// cqf_experimental_incompatible,
// ]
(function() {
'use strict';
@@ -212,7 +212,7 @@ assertCommandPropogatesPlanExecutorKillReason({find: coll.getName(), filter: {a:
{usesIndex: true});
assertCommandPropogatesPlanExecutorKillReason(
- {update: coll.getName(), updates: [{q: {a: {$gte: 0}}, u: {$set: {a: 1}}}]},
+ {update: coll.getName(), updates: [{q: {a: {$gte: 0}}, u: {$set: {a: 1}}, multi: true}]},
{curOpFilter: {op: 'update'}, usesIndex: true});
assertCommandPropogatesPlanExecutorKillReason(
diff --git a/jstests/noPassthrough/comment_field_passthrough.js b/jstests/noPassthrough/comment_field_passthrough.js
index 0f6e5a1b236b8..f5ad24dfe9d1d 100644
--- a/jstests/noPassthrough/comment_field_passthrough.js
+++ b/jstests/noPassthrough/comment_field_passthrough.js
@@ -8,10 +8,6 @@
* ]
*/
-// TODO (SERVER-74534): Enable the metadata consistency check when it will work with co-located
-// configsvr.
-TestData.skipCheckMetadataConsistency = true;
-
import {authCommandsLib} from "jstests/auth/lib/commands_lib.js";
load("jstests/libs/fail_point_util.js"); // Helper to enable/disable failpoints easily.
@@ -25,7 +21,8 @@ const denylistedTests = [
"addShardToZone",
"removeShardFromZone",
"oidcListKeys",
- "oidcRefreshKeys"
+ "oidcRefreshKeys",
+ "aggregate_$search" // TODO SERVER-76087 reenable this test
];
function runTests(tests, conn, impls, options) {
diff --git a/jstests/noPassthrough/cqf_fallback.js b/jstests/noPassthrough/cqf_fallback.js
index 0cc62da363e94..d4a79595d2c78 100644
--- a/jstests/noPassthrough/cqf_fallback.js
+++ b/jstests/noPassthrough/cqf_fallback.js
@@ -2,11 +2,7 @@
* Verify that expressions and operators are correctly routed to CQF where eligible. This decision
* is based on several factors including the query text, collection metadata, etc..
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/optimizer_utils.js");
+import {usedBonsaiOptimizer} from "jstests/libs/optimizer_utils.js";
let conn = MongoRunner.runMongod({setParameter: {featureFlagCommonQueryFramework: true}});
assert.neq(null, conn, "mongod was unable to start up");
@@ -20,7 +16,7 @@ if (assert.commandWorked(db.adminCommand({getParameter: 1, internalQueryFramewor
.internalQueryFrameworkControl == "forceClassicEngine") {
jsTestLog("Skipping test due to forceClassicEngine");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
assert.commandWorked(
@@ -36,6 +32,28 @@ function assertSupportedByBonsaiFully(cmd) {
assert.commandWorked(db.runCommand(cmd));
}
+function assertSupportedByBonsaiExperimentally(cmd) {
+ // Experimental features require the knob to be set to "tryBonsaiExperimental" or higher.
+ // With "tryBonsai", these features should not use the new optimizer.
+ assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"}));
+ const defaultExplain = assert.commandWorked(db.runCommand({explain: cmd}));
+ assert(!usedBonsaiOptimizer(defaultExplain), tojson(defaultExplain));
+
+ // Non-explain should also work and use the fallback mechanism, but we cannnot verify exactly
+ // this without looking at the logs.
+ assert.commandWorked(db.runCommand(cmd));
+
+ // Enable "experimental" features in bonsai and expect the query to use Bonsai and pass.
+ assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"}));
+ const explain = assert.commandWorked(db.runCommand({explain: cmd}));
+ assert(usedBonsaiOptimizer(explain), tojson(explain));
+
+ // Non-explain should still work.
+ assert.commandWorked(db.runCommand(cmd));
+}
+
function assertNotSupportedByBonsai(cmd, testOnly, database = db) {
// An unsupported stage should not use the new optimizer.
assert.commandWorked(
@@ -93,6 +111,64 @@ assertNotSupportedByBonsai({find: coll.getName(), filter: {$alwaysFalse: 1}}, tr
assertNotSupportedByBonsai(
{aggregate: coll.getName(), pipeline: [{$match: {$alwaysFalse: 1}}], cursor: {}}, true);
+// Test $match on _id; these have only experimental support.
+assertSupportedByBonsaiExperimentally({find: coll.getName(), filter: {_id: 1}});
+assertSupportedByBonsaiExperimentally(
+ {aggregate: coll.getName(), pipeline: [{$match: {_id: 1}}], cursor: {}});
+assertSupportedByBonsaiExperimentally({find: coll.getName(), filter: {_id: {$lt: 10}}});
+assertSupportedByBonsaiExperimentally(
+ {aggregate: coll.getName(), pipeline: [{$match: {_id: {$lt: 10}}}], cursor: {}});
+assertSupportedByBonsaiExperimentally({find: coll.getName(), filter: {'_id.a': 1}});
+assertSupportedByBonsaiExperimentally(
+ {aggregate: coll.getName(), pipeline: [{$match: {'_id.a': 1}}], cursor: {}});
+assertSupportedByBonsaiExperimentally(
+ {find: coll.getName(), filter: {$and: [{a: 10}, {_id: {$gte: 5}}]}});
+assertSupportedByBonsaiExperimentally({
+ aggregate: coll.getName(),
+ pipeline: [{$match: {$and: [{a: 10}, {_id: {$gte: 5}}]}}],
+ cursor: {}
+});
+
+// Test $project on _id. These are fully supported in bonsai unless the _id index is specifically
+// hinted, which is only experimentally supported.
+assertSupportedByBonsaiFully({find: coll.getName(), filter: {}, projection: {_id: 1}});
+assertSupportedByBonsaiFully(
+ {aggregate: coll.getName(), pipeline: [{$project: {_id: 1}}], cursor: {}});
+assertSupportedByBonsaiFully({find: coll.getName(), filter: {}, projection: {_id: 1, a: 1}});
+assertSupportedByBonsaiFully(
+ {aggregate: coll.getName(), pipeline: [{$project: {_id: 1, a: 1}}], cursor: {}});
+
+assertSupportedByBonsaiExperimentally(
+ {find: coll.getName(), filter: {}, projection: {_id: 1}, hint: {_id: 1}});
+assertSupportedByBonsaiExperimentally(
+ {aggregate: coll.getName(), pipeline: [{$project: {_id: 1}}], cursor: {}, hint: {_id: 1}});
+assertSupportedByBonsaiExperimentally(
+ {find: coll.getName(), filter: {}, projection: {_id: 1, a: 1}, hint: {_id: 1}});
+assertSupportedByBonsaiExperimentally({
+ aggregate: coll.getName(),
+ pipeline: [{$project: {_id: 1, a: 1}}],
+ cursor: {},
+ hint: {_id: 1}
+});
+
+// $natural hints are fully supported in Bonsai...
+assertSupportedByBonsaiFully({find: coll.getName(), filter: {}, hint: {$natural: 1}});
+assertSupportedByBonsaiFully(
+ {aggregate: coll.getName(), pipeline: [], cursor: {}, hint: {$natural: 1}});
+assertSupportedByBonsaiFully({find: coll.getName(), filter: {}, hint: {$natural: -1}});
+assertSupportedByBonsaiFully(
+ {aggregate: coll.getName(), pipeline: [], cursor: {}, hint: {$natural: -1}});
+
+// ... Except if the query relies on some experimental feature (e.g., predicate on _id).
+assertSupportedByBonsaiExperimentally(
+ {find: coll.getName(), filter: {_id: 1}, hint: {$natural: 1}});
+assertSupportedByBonsaiExperimentally(
+ {aggregate: coll.getName(), pipeline: [{$match: {_id: 1}}], cursor: {}, hint: {$natural: 1}});
+assertSupportedByBonsaiExperimentally(
+ {find: coll.getName(), filter: {_id: 1}, hint: {$natural: -1}});
+assertSupportedByBonsaiExperimentally(
+ {aggregate: coll.getName(), pipeline: [{$match: {_id: 1}}], cursor: {}, hint: {$natural: -1}});
+
// Unsupported projection expression.
assertNotSupportedByBonsai(
{find: coll.getName(), filter: {}, projection: {a: {$concatArrays: [["$b"], ["suppported"]]}}},
@@ -268,11 +344,11 @@ assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}}));
assertNotSupportedByBonsai({find: coll.getName(), filter: {}});
assertNotSupportedByBonsai({aggregate: coll.getName(), pipeline: [], cursor: {}});
-// A simple collation on an index should be eligible for CQF.
+// A simple collation on an index should only have experimental support in CQF.
coll.drop();
assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "simple"}}));
-assertSupportedByBonsaiFully({find: coll.getName(), filter: {}});
-assertSupportedByBonsaiFully({aggregate: coll.getName(), pipeline: [], cursor: {}});
+assertSupportedByBonsaiExperimentally({find: coll.getName(), filter: {}});
+assertSupportedByBonsaiExperimentally({aggregate: coll.getName(), pipeline: [], cursor: {}});
// A query against a collection with a hidden index should be eligible for CQF.
coll.drop();
@@ -280,10 +356,10 @@ assert.commandWorked(coll.createIndex({a: 1}, {hidden: true}));
assertSupportedByBonsaiFully({find: coll.getName(), filter: {}});
assertSupportedByBonsaiFully({aggregate: coll.getName(), pipeline: [], cursor: {}});
-// Unhiding the supported index means the query is still eligible for CQF.
+// Unhiding the index means the query only has experimental support in CQF once again.
coll.unhideIndex({a: 1});
-assertSupportedByBonsaiFully({find: coll.getName(), filter: {}});
-assertSupportedByBonsaiFully({aggregate: coll.getName(), pipeline: [], cursor: {}});
+assertSupportedByBonsaiExperimentally({find: coll.getName(), filter: {}});
+assertSupportedByBonsaiExperimentally({aggregate: coll.getName(), pipeline: [], cursor: {}});
// A query against a collection with a hidden index should be eligible for CQF even if the
// underlying index is not supported.
@@ -421,6 +497,9 @@ db = conn.getDB("test");
coll = db[jsTestName()];
coll.drop();
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: 'enableExplainInBonsai', 'mode': 'alwaysOn'}));
+
const supportedExpression = {
a: {$eq: 4}
};
@@ -431,30 +510,46 @@ assert(!usedBonsaiOptimizer(explain), tojson(explain));
explain = coll.explain().aggregate([{$match: supportedExpression}]);
assert(!usedBonsaiOptimizer(explain), tojson(explain));
-// Show that trying to set the framework to tryBonsai or forceBonsai is not permitted when the
-// feature flag is off.
+// Show that trying to set the framework to tryBonsai is not permitted when the feature flag is off,
+// but tryBonsaiExperimental and forceBonsai are allowed (since test commands are enabled here by
+// default).
assert.commandFailed(
db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"}));
explain = coll.explain().find(supportedExpression).finish();
assert(!usedBonsaiOptimizer(explain), tojson(explain));
-assert.commandFailed(
+assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"}));
+explain = coll.explain().find(supportedExpression).finish();
+assert(usedBonsaiOptimizer(explain), tojson(explain));
+
+assert.commandWorked(
db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "forceBonsai"}));
explain = coll.explain().find(supportedExpression).finish();
-assert(!usedBonsaiOptimizer(explain), tojson(explain));
+assert(usedBonsaiOptimizer(explain), tojson(explain));
MongoRunner.stopMongod(conn);
-// Show that we can't start a mongod with the framework control set to tryBonsai or forceBonsai
+// Show that we can't start a mongod with the framework control set to tryBonsaiExperimental when
+// test commands are off.
+TestData.enableTestCommands = false;
+try {
+ conn = MongoRunner.runMongod(
+ {setParameter: {internalQueryFrameworkControl: "tryBonsaiExperimental"}});
+ MongoRunner.stopMongod(conn);
+ assert(false, "MongoD was able to start up when it should have failed");
+} catch (_) {
+ // This is expected.
+}
+
+// Show that we can't start a mongod with the framework control set to tryBonsai
// when the feature flag is off.
TestData.setParameters.featureFlagCommonQueryFramework = false;
-let mongodStarted = false;
+TestData.enableTestCommands = true;
try {
conn = MongoRunner.runMongod({setParameter: {internalQueryFrameworkControl: "tryBonsai"}});
MongoRunner.stopMongod(conn);
- mongodStarted = true;
+ assert(false, "MongoD was able to start up when it should have failed");
} catch (_) {
// This is expected.
}
-assert(!mongodStarted, "MongoD was able to start up when it should have failed");
-}());
diff --git a/jstests/noPassthrough/create_indexes_fails_if_insufficient_disk_space.js b/jstests/noPassthrough/create_indexes_fails_if_insufficient_disk_space.js
new file mode 100644
index 0000000000000..020b090156237
--- /dev/null
+++ b/jstests/noPassthrough/create_indexes_fails_if_insufficient_disk_space.js
@@ -0,0 +1,35 @@
+/**
+ * Ensures that a createIndexes command request fails when the available disk space is below the
+ * indexBuildMinAvailableDiskSpaceMB threshold.
+ * @tags: [
+ * requires_fcv_71,
+ * requires_replication,
+ * ]
+ */
+
+(function() {
+"use strict";
+
+load('jstests/libs/fail_point_util.js');
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB('test');
+const primaryColl = primaryDB.getCollection('test');
+
+const simulateDiskSpaceFp =
+ configureFailPoint(primaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024});
+
+// Empty collections do not start index builds, and should succeed.
+assert.commandWorked(primaryColl.createIndex({b: 1}));
+
+// Populate collection.
+assert.commandWorked(primaryColl.insert({a: 1}));
+
+// Index build should fail to start.
+assert.commandFailedWithCode(primaryColl.createIndex({a: 1}), [ErrorCodes.OutOfDiskSpace]);
+rst.stopSet();
+})();
diff --git a/jstests/noPassthrough/crud_timestamps.js b/jstests/noPassthrough/crud_timestamps.js
index eb80ef4746308..1bf018b9101a5 100644
--- a/jstests/noPassthrough/crud_timestamps.js
+++ b/jstests/noPassthrough/crud_timestamps.js
@@ -16,18 +16,10 @@ rst.initiate();
const testDB = rst.getPrimary().getDB(dbName);
const coll = testDB.getCollection(collName);
-// Determine whether deletes are batched.
-const ret = rst.getPrimary().adminCommand({getParameter: 1, featureFlagBatchMultiDeletes: 1});
-assert(ret.ok || (!ret.ok && ret.errmsg === "no option found to get"));
-const batchedDeletesEnabled = ret.ok ? ret.featureFlagBatchMultiDeletes.value : false;
-if (batchedDeletesEnabled) {
- // For consistent results, generate a single delete (applyOps) batch.
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, batchedDeletesTargetBatchTimeMS: 0}));
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, batchedDeletesTargetStagedDocBytes: 0}));
- assert.commandWorked(testDB.adminCommand({setParameter: 1, batchedDeletesTargetBatchDocs: 0}));
-}
+// For consistent results, generate a single delete (applyOps) batch.
+assert.commandWorked(testDB.adminCommand({setParameter: 1, batchedDeletesTargetBatchTimeMS: 0}));
+assert.commandWorked(testDB.adminCommand({setParameter: 1, batchedDeletesTargetStagedDocBytes: 0}));
+assert.commandWorked(testDB.adminCommand({setParameter: 1, batchedDeletesTargetBatchDocs: 0}));
if (!testDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
rst.stopSet();
@@ -112,21 +104,9 @@ request = {
assert.commandWorked(coll.runCommand(request));
-if (batchedDeletesEnabled) {
- const applyOps = oplog.findOne({op: 'c', ns: 'admin.$cmd', 'o.applyOps.op': 'd'});
- const ts = applyOps['ts'];
- check(ts, []);
-} else {
- ts1 = oplog.findOne({op: 'd', o: {_id: 1}}).ts;
- ts2 = oplog.findOne({op: 'd', o: {_id: 2}}).ts;
- const ts3 = oplog.findOne({op: 'd', o: {_id: 3}}).ts;
- const ts4 = oplog.findOne({op: 'd', o: {_id: 4}}).ts;
-
- check(ts1, [{_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 5}]);
- check(ts2, [{_id: 3, a: 4}, {_id: 4, a: 5}]);
- check(ts3, [{_id: 4, a: 5}]);
- check(ts4, []);
-}
+const applyOps = oplog.findOne({op: 'c', ns: 'admin.$cmd', 'o.applyOps.op': 'd'});
+const ts = applyOps['ts'];
+check(ts, []);
session.endSession();
rst.stopSet();
diff --git a/jstests/noPassthrough/currentop_query.js b/jstests/noPassthrough/currentop_query.js
index a3244f663437f..88b6b58e87aed 100644
--- a/jstests/noPassthrough/currentop_query.js
+++ b/jstests/noPassthrough/currentop_query.js
@@ -6,10 +6,7 @@
* requires_sharding,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
// This test runs manual getMores using different connections, which will not inherit the
// implicit session of the cursor establishing command.
@@ -619,5 +616,4 @@ for (let connType of [rsConn, mongosConn]) {
}
}
-st.stop();
-})();
+st.stop();
\ No newline at end of file
diff --git a/jstests/noPassthrough/currentop_target_all_nodes.js b/jstests/noPassthrough/currentop_target_all_nodes.js
new file mode 100644
index 0000000000000..5a8168e34b074
--- /dev/null
+++ b/jstests/noPassthrough/currentop_target_all_nodes.js
@@ -0,0 +1,32 @@
+// Tests that the $currentOp works as expected when run with the targetAllNodes option turned on and
+// off.
+//
+// @tags: [
+// requires_fcv_71,
+// ]
+(function() {
+"use strict";
+
+const shardCount = 2;
+const rsNodesPerShardCount = 2;
+const st = new ShardingTest({shards: shardCount, rs: {nodes: rsNodesPerShardCount}});
+const clusterAdminDB = st.s.getDB("admin");
+
+function runCurrentOpAgg(shouldTargetAllNodes) {
+ return clusterAdminDB.aggregate(
+ [
+ {$currentOp: {targetAllNodes: shouldTargetAllNodes}},
+ {$match: {"command.comment": "issuing a currentOp with targetAllNodes"}}
+ ],
+ {comment: "issuing a currentOp with targetAllNodes"});
+}
+
+const targetAllNodesFalse = runCurrentOpAgg(false);
+assert.eq(shardCount, targetAllNodesFalse.itcount(), tojson(targetAllNodesFalse));
+
+const targetAllNodesTrue = runCurrentOpAgg(true);
+assert.eq(
+ shardCount * rsNodesPerShardCount, targetAllNodesTrue.itcount(), tojson(targetAllNodesTrue));
+
+st.stop();
+}());
diff --git a/jstests/noPassthrough/dbcheck_detects_data_corruption.js b/jstests/noPassthrough/dbcheck_detects_data_corruption.js
new file mode 100644
index 0000000000000..616f07aae47ac
--- /dev/null
+++ b/jstests/noPassthrough/dbcheck_detects_data_corruption.js
@@ -0,0 +1,63 @@
+/**
+ * This tests that errors are logged when dbCheck finds evidence of corruption, but does not cause
+ * the operation to fail.
+ */
+(function() {
+
+const replSet = new ReplSetTest({nodes: 2});
+replSet.startSet();
+replSet.initiate();
+
+const primary = replSet.getPrimary();
+const secondary = replSet.getSecondary();
+
+const db = primary.getDB('test');
+const collName = 'coll';
+const coll = db[collName];
+
+assert.commandWorked(coll.insert({_id: 0, a: "first"}));
+
+// Create the same type of corruption on both nodes.
+assert.commandWorked(db.adminCommand({
+ configureFailPoint: "skipUnindexingDocumentWhenDeleted",
+ mode: "alwaysOn",
+ data: {indexName: "_id_"}
+}));
+assert.commandWorked(secondary.getDB('admin').runCommand({
+ configureFailPoint: "skipUnindexingDocumentWhenDeleted",
+ mode: "alwaysOn",
+ data: {indexName: "_id_"}
+}));
+
+const docId = 1;
+assert.commandWorked(coll.insert({_id: docId, a: "second"}));
+assert.commandWorked(coll.remove({_id: docId}));
+
+// Validate should detect this inconsistency.
+let res = coll.validate();
+assert.commandWorked(res);
+assert(!res.valid, res);
+
+assert.commandWorked(db.runCommand({"dbCheck": 1}));
+
+// Wait for both nodes to finish checking.
+[primary, secondary].forEach((node) => {
+ print("waiting for node to finish: " + tojson(node));
+ const healthlog = node.getDB('local').system.healthlog;
+ assert.soon(() => healthlog.find({operation: "dbCheckStop"}).itcount() == 1);
+});
+
+[primary, secondary].forEach((node) => {
+ print("checking " + tojson(node));
+ let entry = node.getDB('local').system.healthlog.findOne({severity: 'error'});
+ assert(entry, "No healthlog entry found on " + tojson(node));
+ assert.eq("Erroneous index key found with reference to non-existent record id",
+ entry.msg,
+ tojson(entry));
+
+ // The erroneous index key should not affect the hashes. The documents should still be the same.
+ assert.eq(1, node.getDB('local').system.healthlog.count({severity: 'error'}));
+});
+
+replSet.stopSet(undefined /* signal */, false /* forRestart */, {skipValidation: true});
+})();
diff --git a/jstests/noPassthrough/dbhash_before_ddl_op.js b/jstests/noPassthrough/dbhash_before_ddl_op.js
index 6b2e2337b9266..8ba3739a2a31d 100644
--- a/jstests/noPassthrough/dbhash_before_ddl_op.js
+++ b/jstests/noPassthrough/dbhash_before_ddl_op.js
@@ -1,7 +1,6 @@
/**
* Tests that dbHash does not throw SnapshotUnavailable when running earlier than the latest DDL
- * operation for a collection in the database. When the point-in-time catalog lookups feature flag
- * is disabled, SnapshotUnavailable is still thrown.
+ * operation for a collection in the database.
*
* @tags: [
* requires_replication,
@@ -10,8 +9,6 @@
(function() {
"use strict";
-load("jstests/libs/feature_flag_util.js");
-
const replTest = new ReplSetTest({nodes: 1});
replTest.startSet();
replTest.initiate();
@@ -36,47 +33,24 @@ jsTestLog("Last insert timestamp: " + tojson(insertTS));
const renameTS = assert.commandWorked(db[jsTestName()].renameCollection("renamed")).operationTime;
jsTestLog("Rename timestamp: " + tojson(renameTS));
-if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) {
- // dbHash at all timestamps should work.
- let res = assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: createTS,
- }));
- assert(res.collections.hasOwnProperty(jsTestName()));
-
- res = assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: insertTS,
- }));
- assert(res.collections.hasOwnProperty(jsTestName()));
-
- res = assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: renameTS,
- }));
- assert(res.collections.hasOwnProperty("renamed"));
-} else {
- // dbHash at the 'createTS' should throw SnapshotUnavailable due to the rename.
- assert.commandFailedWithCode(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: createTS,
- }),
- ErrorCodes.SnapshotUnavailable);
-
- // dbHash at the 'insertTS' should throw SnapshotUnavailable due to the rename.
- assert.commandFailedWithCode(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: insertTS,
- }),
- ErrorCodes.SnapshotUnavailable);
-
- // dbHash at 'renameTS' should work.
- let res = assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: renameTS,
- }));
- assert(res.collections.hasOwnProperty("renamed"));
-}
+// dbHash at all timestamps should work.
+let res = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: createTS,
+}));
+assert(res.collections.hasOwnProperty(jsTestName()));
+
+res = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: insertTS,
+}));
+assert(res.collections.hasOwnProperty(jsTestName()));
+
+res = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: renameTS,
+}));
+assert(res.collections.hasOwnProperty("renamed"));
replTest.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/dbstats_sharded_collection.js b/jstests/noPassthrough/dbstats_sharded_collection.js
deleted file mode 100644
index a2ab3dde627cf..0000000000000
--- a/jstests/noPassthrough/dbstats_sharded_collection.js
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Tests that the dbStats command properly computes the stats by comparing the results from a
- * sharded cluster to the summation of querying the mongod's directly.
- *
- * @tags: [requires_dbstats]
- */
-
-(function() {
-"use strict";
-
-// Set up cluster with 2 shards, insert a batch of documents, and configure the cluster so both
-// shards have documents.
-const st = new ShardingTest({shards: 2, mongos: 1});
-const dbName = "db";
-const db = st.getDB(dbName);
-const collName = "foo";
-const ns = dbName + "." + collName;
-const numDocs = 100;
-
-assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-
-let primaryShard = st.getPrimaryShard(dbName);
-let secondaryShard = st.getOther(primaryShard);
-
-let bulk = primaryShard.getCollection(ns).initializeUnorderedBulkOp();
-for (let i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, x: i, y: -i});
-}
-assert.commandWorked(bulk.execute());
-assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
-assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: numDocs / 2}}));
-assert.commandWorked(st.s.adminCommand(
- {moveChunk: ns, find: {_id: 0}, to: secondaryShard.name, _waitForDelete: true}));
-
-const scale = 1024 * 1024;
-let dbStats = db.runCommand({dbStats: 1, scale: scale});
-assert.commandWorked(dbStats);
-jsTestLog('dbStats result on mongos: ' + tojson(dbStats));
-let shard0Stats = primaryShard.getDB(dbName).runCommand({dbStats: 1, scale: scale});
-assert.commandWorked(shard0Stats);
-jsTestLog('dbStats result on primary shard ' + primaryShard.host + ': ' + tojson(shard0Stats));
-let shard1Stats = secondaryShard.getDB(dbName).runCommand({dbStats: 1, scale: scale});
-assert.commandWorked(shard1Stats);
-jsTestLog('dbStats result on secondary shard ' + secondaryShard.host + ': ' + tojson(shard1Stats));
-
-// Compare each of the relevant fields in dbStats to make sure the individual shards' responses sum
-// to the overall cluster's value.
-let total = shard0Stats.collections + shard1Stats.collections;
-assert.eq(dbStats.collections,
- total,
- "Sharded collection dbStats returned " + dbStats.collections +
- " collections total, but sum of individual shards' responses returned " + total +
- " collections total");
-
-total = shard0Stats.views + shard1Stats.views;
-assert.eq(dbStats.views,
- total,
- "Sharded collection dbStats returned " + dbStats.views +
- " views total, but sum of individual shards' responses returned " + total +
- " views total");
-
-total = shard0Stats.objects + shard1Stats.objects;
-assert.eq(dbStats.objects,
- total,
- "Sharded collection dbStats returned " + dbStats.objects +
- " objects total, but sum of individual shards' responses returned " + total +
- " objects total");
-
-total = shard0Stats.dataSize + shard1Stats.dataSize;
-assert.eq(dbStats.dataSize,
- total,
- "Sharded collection dbStats returned " + dbStats.dataSize +
- " dataSize total, but sum of individual shards' responses returned " + total +
- " dataSize total");
-
-total = shard0Stats.storageSize + shard1Stats.storageSize;
-assert.eq(dbStats.storageSize,
- total,
- "Sharded collection dbStats returned " + dbStats.storageSize +
- " storageSize total, but sum of individual shards' responses returned " + total +
- " storageSize total");
-
-total = shard0Stats.indexes + shard1Stats.indexes;
-assert.eq(dbStats.indexes,
- total,
- "Sharded collection dbStats returned " + dbStats.indexes +
- " indexes total, but sum of individual shards' responses returned " + total +
- " indexes total");
-
-total = shard0Stats.indexSize + shard1Stats.indexSize;
-assert.eq(dbStats.indexSize,
- total,
- "Sharded collection dbStats returned " + dbStats.indexSize +
- " indexSize total, but sum of individual shards' responses returned " + total +
- " indexSize total");
-
-total = shard0Stats.totalSize + shard1Stats.totalSize;
-assert.eq(dbStats.totalSize,
- total,
- "Sharded collection dbStats returned " + dbStats.totalSize +
- " totalSize total, but sum of individual shards' responses returned " + total +
- " totalSize total");
-
-st.stop();
-})();
\ No newline at end of file
diff --git a/jstests/noPassthrough/dedicated_to_catalog_shard.js b/jstests/noPassthrough/dedicated_to_catalog_shard.js
index 94107ac52ac22..4cd77cb921d49 100644
--- a/jstests/noPassthrough/dedicated_to_catalog_shard.js
+++ b/jstests/noPassthrough/dedicated_to_catalog_shard.js
@@ -1,17 +1,12 @@
/**
- * Tests catalog shard topology.
+ * Tests config shard topology.
*
* @tags: [
* requires_fcv_70,
- * featureFlagCatalogShard,
* featureFlagTransitionToCatalogShard,
* ]
*/
-// TODO (SERVER-74534): Enable the metadata consistency check when it will work with co-located
-// configsvr.
-TestData.skipCheckMetadataConsistency = true;
-
(function() {
"use strict";
@@ -52,17 +47,17 @@ const configCS = st.configRS.getURL();
}
//
-// Catalog shard mode tests (post addShard).
+// Config shard mode tests (post addShard).
//
{
//
// Adding the config server as a shard works.
//
- assert.commandWorked(st.s.adminCommand({transitionToCatalogShard: 1}));
+ assert.commandWorked(st.s.adminCommand({transitionFromDedicatedConfigServer: 1}));
// More than once works.
- assert.commandWorked(st.s.adminCommand({transitionToCatalogShard: 1}));
- assert.commandWorked(st.s.adminCommand({transitionToCatalogShard: 1}));
+ assert.commandWorked(st.s.adminCommand({transitionFromDedicatedConfigServer: 1}));
+ assert.commandWorked(st.s.adminCommand({transitionFromDedicatedConfigServer: 1}));
// Flushing routing / db cache updates works.
flushRoutingAndDBCacheUpdates(st.configRS.getPrimary());
diff --git a/jstests/noPassthrough/devnull.js b/jstests/noPassthrough/devnull.js
index 5d3fa5e1c5f75..103b49f4b5691 100644
--- a/jstests/noPassthrough/devnull.js
+++ b/jstests/noPassthrough/devnull.js
@@ -10,7 +10,7 @@ assert(logContents.indexOf("enableMajorityReadConcern:false is no longer support
const emrcDefaultConn = MongoRunner.runMongod({storageEngine: "devnull"});
db = emrcDefaultConn.getDB("test");
-res = db.foo.insert({x: 1});
+let res = db.foo.insert({x: 1});
assert.eq(1, res.nInserted, tojson(res));
// Skip collection validation during stopMongod if invalid storage engine.
diff --git a/jstests/noPassthrough/disabled_cluster_server_parameters.js b/jstests/noPassthrough/disabled_cluster_server_parameters.js
index ca88787627e00..fa470b6ffa61c 100644
--- a/jstests/noPassthrough/disabled_cluster_server_parameters.js
+++ b/jstests/noPassthrough/disabled_cluster_server_parameters.js
@@ -5,13 +5,18 @@
* @tags: [
* does_not_support_stepdowns,
* requires_replication,
- * requires_sharding
+ * requires_sharding,
+ * # Tests running with experimental CQF behavior require test commands to be enabled.
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-'use strict';
-
-load('jstests/libs/cluster_server_parameter_utils.js');
+import {
+ setupNode,
+ setupReplicaSet,
+ setupSharded,
+ testDisabledClusterParameters
+} from "jstests/libs/cluster_server_parameter_utils.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
// Verifies that test-only parameters are disabled and excluded when enableTestCommands is false.
TestData.enableTestCommands = false;
@@ -54,4 +59,3 @@ setupSharded(st);
// Check that the same behavior for disabled cluster server parameters holds on sharded clusters.
testDisabledClusterParameters(st);
st.stop();
-}());
diff --git a/jstests/noPassthrough/disabled_test_parameters.js b/jstests/noPassthrough/disabled_test_parameters.js
index 0f71810db035f..3ac30bb5da2fc 100644
--- a/jstests/noPassthrough/disabled_test_parameters.js
+++ b/jstests/noPassthrough/disabled_test_parameters.js
@@ -1,4 +1,6 @@
// Test that test-only set parameters are disabled.
+// Tests running with experimental CQF behavior require test commands to be enabled.
+// @tags: [cqf_experimental_incompatible]
(function() {
'use strict';
diff --git a/jstests/noPassthrough/drop_config_db.js b/jstests/noPassthrough/drop_config_db.js
index c629d41d7a9ca..4f44b5a90990f 100644
--- a/jstests/noPassthrough/drop_config_db.js
+++ b/jstests/noPassthrough/drop_config_db.js
@@ -1,5 +1,7 @@
/*
* Test that dropping the config DB does not crash the server.
+ * Tests running with experimental CQF behavior require test commands to be enabled.
+ * @tags: [cqf_experimental_incompatible]
*/
(function() {
"use strict";
diff --git a/jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js b/jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js
index 20d4b93049b18..962ebd55da0f1 100644
--- a/jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js
+++ b/jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js
@@ -37,4 +37,4 @@ if (storageEngineIsWiredTiger()) {
}
MongoRunner.stopMongod(primary);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/noPassthrough/drop_pending_retry.js b/jstests/noPassthrough/drop_pending_retry.js
index f726345d405fb..e6d8254d887c2 100644
--- a/jstests/noPassthrough/drop_pending_retry.js
+++ b/jstests/noPassthrough/drop_pending_retry.js
@@ -7,10 +7,7 @@
* requires_wiredtiger
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/disk/libs/wt_file_helper.js");
+import {getUriForColl, getUriForIndex} from "jstests/disk/libs/wt_file_helper.js";
const rst = new ReplSetTest({
nodes: 1,
@@ -18,6 +15,7 @@ const rst = new ReplSetTest({
setParameter: {
// Set the history window to zero to explicitly control the oldest timestamp.
minSnapshotHistoryWindowInSeconds: 0,
+ logComponentVerbosity: tojson({storage: 1}),
}
}
});
@@ -51,20 +49,25 @@ assert.commandWorked(db.getCollection("toWrite").insert({x: 1}));
// Take a checkpoint to advance the checkpoint timestamp.
assert.commandWorked(db.adminCommand({fsync: 1}));
-// Tests that the table drops are retried each time the drop pending reaper runs until they succeed.
-// We wait for 5 retries here. 5 for the collection table and 5 for the index table.
-checkLog.containsWithAtLeastCount(primary, "Drop-pending ident is still in use", 2 * 5);
+// Tests that the table drops are retried when the drop pending reaper runs. Once for the collection
+// and once for the index.
+checkLog.containsWithAtLeastCount(primary, "Drop-pending ident is still in use", 2);
// Let the table drops succeed.
assert.commandWorked(primary.adminCommand({configureFailPoint: "WTDropEBUSY", mode: "off"}));
-// Completing drop for ident
-checkLog.containsJson(primary, 22237, {
+// Perform another write and another checkpoint to advance the checkpoint timestamp, triggering
+// the reaper.
+assert.commandWorked(db.getCollection("toWrite").insert({x: 1}));
+assert.commandWorked(db.adminCommand({fsync: 1}));
+
+// "The ident was successfully dropped".
+checkLog.containsJson(primary, 6776600, {
ident: function(ident) {
return ident == collUri;
}
});
-checkLog.containsJson(primary, 22237, {
+checkLog.containsJson(primary, 6776600, {
ident: function(ident) {
return ident == indexUri;
}
@@ -73,5 +76,4 @@ checkLog.containsJson(primary, 22237, {
assert.commandWorked(
primary.adminCommand({configureFailPoint: "pauseCheckpointThread", mode: "off"}));
-rst.stopSet();
-}());
\ No newline at end of file
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/drop_unfinished_replicated_index_build_in_standalone.js b/jstests/noPassthrough/drop_unfinished_replicated_index_build_in_standalone.js
index f4e9728865b88..c3cb0aba1b24c 100644
--- a/jstests/noPassthrough/drop_unfinished_replicated_index_build_in_standalone.js
+++ b/jstests/noPassthrough/drop_unfinished_replicated_index_build_in_standalone.js
@@ -7,10 +7,6 @@
* requires_replication,
* ]
*/
-(function() {
-'use strict';
-
-load('jstests/disk/libs/wt_file_helper.js');
load('jstests/noPassthrough/libs/index_build.js');
const dbName = jsTestName();
@@ -70,5 +66,4 @@ jsTestLog("Dropping database from secondary");
assert.commandWorked(secondaryDB.dropDatabase());
MongoRunner.stopMongod(mongod);
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/durable_history_index_usage.js b/jstests/noPassthrough/durable_history_index_usage.js
index ae3be04fd14dd..d84ac707ba530 100644
--- a/jstests/noPassthrough/durable_history_index_usage.js
+++ b/jstests/noPassthrough/durable_history_index_usage.js
@@ -6,12 +6,8 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/fail_point_util.js");
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/feature_flag_util.js");
const replTest = new ReplSetTest({
nodes: 1,
@@ -68,9 +64,6 @@ const findWithIndex = function(atClusterTime, expectedErrCode) {
}
};
-const pointInTimeCatalogLookupsAreEnabled =
- FeatureFlagUtil.isEnabled(testDB(), "PointInTimeCatalogLookups");
-
const oldestTS = insert({a: 0});
jsTestLog("Oldest timestamp: " + tojson(oldestTS));
@@ -130,8 +123,7 @@ checkLogs();
// The index is being re-created.
-// When the PointInTimeCatalogLookups feature flag is enabled, it's possible to read prior to the
-// most recent DDL operation for the collection.
+// It's possible to read prior to the most recent DDL operation for the collection.
//
// At oldestTs, the index did not exist, so queries for the index at that timestamp will return
// BadValue.
@@ -144,16 +136,12 @@ checkLogs();
//
// Etc.
//
-// Generally speaking when the PointInTimeCatalogLookups feature flag is enabled, find queries
-// should all return the result one would expect based on the state of the catalog at that point in
-// time. When the feature flag is disabled, these find queries will instead return
+// Find queries should all return the result one would expect based on the state of the catalog at
+// that point in time. When the feature flag is disabled, these find queries will instead return
// SnapshotUnavailable.
-findWithIndex(
- oldestTS,
- pointInTimeCatalogLookupsAreEnabled ? ErrorCodes.BadValue : ErrorCodes.SnapshotUnavailable);
-findWithIndex(createIndexTS,
- pointInTimeCatalogLookupsAreEnabled ? null : ErrorCodes.SnapshotUnavailable);
+findWithIndex(oldestTS, ErrorCodes.BadValue);
+findWithIndex(createIndexTS, null);
findWithIndex(preIndexCommitTS, ErrorCodes.BadValue);
findWithIndex(undefined, ErrorCodes.BadValue);
@@ -183,14 +171,9 @@ checkLog.containsJson(primary(), 20663, {
});
IndexBuildTest.assertIndexes(coll(), 2, ["_id_", "a_1"]);
-findWithIndex(
- oldestTS,
- pointInTimeCatalogLookupsAreEnabled ? ErrorCodes.BadValue : ErrorCodes.SnapshotUnavailable);
-findWithIndex(createIndexTS,
- pointInTimeCatalogLookupsAreEnabled ? null : ErrorCodes.SnapshotUnavailable);
-findWithIndex(
- preIndexCommitTS,
- pointInTimeCatalogLookupsAreEnabled ? ErrorCodes.BadValue : ErrorCodes.SnapshotUnavailable);
+findWithIndex(oldestTS, ErrorCodes.BadValue);
+findWithIndex(createIndexTS, null);
+findWithIndex(preIndexCommitTS, ErrorCodes.BadValue);
findWithIndex(restartInsertTS, ErrorCodes.BadValue);
assert.eq(3, findWithIndex(undefined)["cursor"]["firstBatch"].length);
@@ -208,44 +191,34 @@ const insertAfterRestartAfterIndexBuild = insert({a: 4});
assert.eq(5, findWithIndex(insertAfterRestartAfterIndexBuild)["cursor"]["firstBatch"].length);
assert.eq(5, findWithIndex(undefined)["cursor"]["firstBatch"].length);
-findWithIndex(
- oldestTS,
- pointInTimeCatalogLookupsAreEnabled ? ErrorCodes.BadValue : ErrorCodes.SnapshotUnavailable);
-findWithIndex(createIndexTS,
- pointInTimeCatalogLookupsAreEnabled ? null : ErrorCodes.SnapshotUnavailable);
-findWithIndex(
- preIndexCommitTS,
- pointInTimeCatalogLookupsAreEnabled ? ErrorCodes.BadValue : ErrorCodes.SnapshotUnavailable);
-findWithIndex(
- restartInsertTS,
- pointInTimeCatalogLookupsAreEnabled ? ErrorCodes.BadValue : ErrorCodes.SnapshotUnavailable);
+findWithIndex(oldestTS, ErrorCodes.BadValue);
+findWithIndex(createIndexTS, null);
+findWithIndex(preIndexCommitTS, ErrorCodes.BadValue);
+findWithIndex(restartInsertTS, ErrorCodes.BadValue);
+
+assert.eq(4, findWithIndex(insertAfterIndexBuildTS)["cursor"]["firstBatch"].length);
+
+// Drop the index and demonstrate the durable history can be used across a restart for reads with
+// times prior to the drop.
+const dropIndexTS = assert.commandWorked(coll().dropIndex(indexSpec)).operationTime;
+jsTestLog("Index drop timestamp: " + tojson(dropIndexTS));
+
+// Take a checkpoint to persist the new catalog entry of the index being rebuilt.
+assert.commandWorked(testDB().adminCommand({fsync: 1}));
+
+replTest.stop(0, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL}, {forRestart: true});
+replTest.start(
+ 0,
+ {
+ setParameter: {
+ // To control durable history more predictably, disable the checkpoint thread.
+ syncdelay: 0
+ }
+ },
+ true /* restart */);
+// Test that we can read using the dropped index on timestamps before the drop
assert.eq(4, findWithIndex(insertAfterIndexBuildTS)["cursor"]["firstBatch"].length);
+assert.eq(5, findWithIndex(insertAfterRestartAfterIndexBuild)["cursor"]["firstBatch"].length);
-if (pointInTimeCatalogLookupsAreEnabled) {
- // Drop the index and demonstrate the durable history can be used across a restart for reads
- // with times prior to the drop.
- const dropIndexTS = assert.commandWorked(coll().dropIndex(indexSpec)).operationTime;
- jsTestLog("Index drop timestamp: " + tojson(dropIndexTS));
-
- // Take a checkpoint to persist the new catalog entry of the index being rebuilt.
- assert.commandWorked(testDB().adminCommand({fsync: 1}));
-
- replTest.stop(0, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL}, {forRestart: true});
- replTest.start(
- 0,
- {
- setParameter: {
- // To control durable history more predictably, disable the checkpoint thread.
- syncdelay: 0
- }
- },
- true /* restart */);
-
- // Test that we can read using the dropped index on timestamps before the drop
- assert.eq(4, findWithIndex(insertAfterIndexBuildTS)["cursor"]["firstBatch"].length);
- assert.eq(5, findWithIndex(insertAfterRestartAfterIndexBuild)["cursor"]["firstBatch"].length);
-}
-
-replTest.stopSet();
-})();
+replTest.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/ensure_size_storer_flushes_periodically.js b/jstests/noPassthrough/ensure_size_storer_flushes_periodically.js
index 65826603346b2..ca3c2eb7b35b0 100644
--- a/jstests/noPassthrough/ensure_size_storer_flushes_periodically.js
+++ b/jstests/noPassthrough/ensure_size_storer_flushes_periodically.js
@@ -71,4 +71,4 @@ assert.gte(testColl.count(),
"Fast count should still be 100 + 1 after crash. Fast count: " + testColl.count());
MongoRunner.stopMongod(conn);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/noPassthrough/explain_execution_time_in_nanoseconds.js b/jstests/noPassthrough/explain_execution_time_in_nanoseconds.js
index 3fd8028b2e8bf..2ee2978d38463 100644
--- a/jstests/noPassthrough/explain_execution_time_in_nanoseconds.js
+++ b/jstests/noPassthrough/explain_execution_time_in_nanoseconds.js
@@ -1,10 +1,7 @@
// When running explain commands with "executionStats" verbosity, checks that the explain output
// includes "executionTimeMicros"/"executionTimeNanos" only if requested.
// "executionTimeMillisEstimate" will always be present in the explain output.
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAllPlanStages().
+import {getAllPlanStages} from "jstests/libs/analyze_plan.js";
let conn = MongoRunner.runMongod({});
assert.neq(conn, null, "mongod failed to start up");
@@ -85,5 +82,4 @@ for (let executionStage of executionStages) {
}
}
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/explain_group_stage_exec_stats.js b/jstests/noPassthrough/explain_group_stage_exec_stats.js
index 8a8774f694a5a..4526992c036c5 100644
--- a/jstests/noPassthrough/explain_group_stage_exec_stats.js
+++ b/jstests/noPassthrough/explain_group_stage_exec_stats.js
@@ -2,11 +2,8 @@
* Tests that $group stage reports memory footprint per accumulator when explain is run with
* verbosities "executionStats" and "allPlansExecution".
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStage().
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod();
const testDB = conn.getDB('test');
@@ -19,7 +16,7 @@ if (checkSBEEnabled(testDB)) {
// spilling behavior of the classic DocumentSourceGroup stage.
jsTest.log("Skipping test since SBE $group pushdown has different memory tracking behavior");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
const bigStr = Array(1025).toString(); // 1KB of ','
@@ -150,4 +147,3 @@ groupStages = getAggPlanStage(
checkGroupStages(groupStages, {}, false, 0);
MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/explain_output_truncation.js b/jstests/noPassthrough/explain_output_truncation.js
index 563bbaddbda2a..c7a89bc60f9e2 100644
--- a/jstests/noPassthrough/explain_output_truncation.js
+++ b/jstests/noPassthrough/explain_output_truncation.js
@@ -1,10 +1,7 @@
/**
* Test that explain output is correctly truncated when it grows too large.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getPlanStage, getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js";
const dbName = "test";
const collName = jsTestName();
@@ -49,5 +46,4 @@ assert.eq(
fetchStage.inputStage.warning, "stats tree exceeded BSON size limit for explain", explain);
assert(!planHasStage(testDb, explain, "IXSCAN"), explain);
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/explain_sort_exec_stats.js b/jstests/noPassthrough/explain_sort_exec_stats.js
index f21e82c9d28e6..de7fbb3f3c1f6 100644
--- a/jstests/noPassthrough/explain_sort_exec_stats.js
+++ b/jstests/noPassthrough/explain_sort_exec_stats.js
@@ -2,11 +2,8 @@
* Tests that $sort stage reports the correct stats when explain is run with
* different verbosities.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod();
const db = conn.getDB("test");
@@ -90,4 +87,3 @@ pipelines.forEach(function(pipeline) {
});
MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/explain_unionwith_lookup_sharded.js b/jstests/noPassthrough/explain_unionwith_lookup_sharded.js
index ef4b4a42bec66..4268143845ab5 100644
--- a/jstests/noPassthrough/explain_unionwith_lookup_sharded.js
+++ b/jstests/noPassthrough/explain_unionwith_lookup_sharded.js
@@ -4,10 +4,7 @@
*
* This test was originally designed to reproduce SERVER-71636.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getAggPlanStage, getAggPlanStages} from "jstests/libs/analyze_plan.js";
const dbName = "test";
@@ -212,5 +209,4 @@ stageExplain = getStageFromMergerPart(explain);
assert(stageExplain.hasOwnProperty("$unionWith"), explain);
assertStageDoesNotHaveRuntimeStats(stageExplain);
-st.stop();
-}());
+st.stop();
\ No newline at end of file
diff --git a/jstests/noPassthrough/external_data_source.js b/jstests/noPassthrough/external_data_source.js
index b366fc7739063..5b24adbb939ed 100644
--- a/jstests/noPassthrough/external_data_source.js
+++ b/jstests/noPassthrough/external_data_source.js
@@ -7,10 +7,7 @@
* requires_external_data_source
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // for aggPlanHasStage()
+import {aggPlanHasStage} from "jstests/libs/analyze_plan.js";
// Runs tests on a standalone mongod.
let conn = MongoRunner.runMongod({setParameter: {enableComputeMode: true}});
@@ -160,6 +157,17 @@ assert.throwsWithCode(() => {
});
})();
+(function testCollectionlessAgg() {
+ const docs = [{a: 1}, {a: 2}, {a: 3}];
+ assert.sameMembers(docs, db.aggregate([{$documents: docs}]).toArray());
+})();
+
+(function testCollectionlessAggWithExternalDataSources() {
+ assert.throwsWithCode(() => {
+ db.aggregate([{$documents: [{a: 1}]}], {$_externalDataSources: []});
+ }, 7604400);
+})();
+
//
// Named Pipes success test cases follow.
//
@@ -491,5 +499,4 @@ if (hostInfo.os.type != "Windows") {
return !runningStatus.alive && runningStatus.exitCode != 0;
}, "Expected mongod died due to an error", 120 * 1000);
})();
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/noPassthrough/external_sort_find.js b/jstests/noPassthrough/external_sort_find.js
index a1505f129a392..0b5ad6f9d0682 100644
--- a/jstests/noPassthrough/external_sort_find.js
+++ b/jstests/noPassthrough/external_sort_find.js
@@ -1,11 +1,8 @@
/**
* Test that the find command can spill to disk while executing a blocking sort.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getAggPlanStage, getPlanStage} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
// Only allow blocking sort execution to use 100 kB of memory.
const kMaxMemoryUsageBytes = 100 * 1024;
@@ -167,4 +164,3 @@ assert.eq(aggregationExternalSortStatsForPipeline.spills,
aggregationExternalSortStatsForPipeline);
MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/fle2_shardsvr_cleanup.js b/jstests/noPassthrough/fle2_shardsvr_cleanup.js
new file mode 100644
index 0000000000000..0d8b965dc2837
--- /dev/null
+++ b/jstests/noPassthrough/fle2_shardsvr_cleanup.js
@@ -0,0 +1,36 @@
+/**
+ * Cannot run cleanup against a shard server
+ *
+ * @tags: [
+ * requires_fcv_70,
+ * requires_sharding,
+ * featureFlagFLE2CleanupCommand
+ * ]
+ */
+import {EncryptedClient} from "jstests/fle2/libs/encrypted_client_util.js";
+
+function runTest(mongosConn, shardConn) {
+ let dbName = 'testdb';
+
+ let clientMongoS = new EncryptedClient(mongosConn, dbName);
+
+ assert.commandWorked(clientMongoS.createEncryptionCollection("basic", {
+ encryptedFields: {
+ "fields":
+ [{"path": "first", "bsonType": "string", "queries": {"queryType": "equality"}}]
+ }
+ }));
+
+ let clientShard = new EncryptedClient(shardConn, dbName);
+
+ assert.commandFailedWithCode(clientShard.getDB().basic.cleanup(), 7618804);
+}
+
+jsTestLog("Sharding: Testing fle2 cleanup not allowed against a shard server");
+{
+ const st = new ShardingTest({shards: 1, mongos: 1, config: 1});
+
+ runTest(st.s, st.shard0);
+
+ st.stop();
+}
diff --git a/jstests/noPassthrough/fle2_shardsvr_compact.js b/jstests/noPassthrough/fle2_shardsvr_compact.js
index 7f5aa2cc5bf8b..16bffafd7d2c6 100644
--- a/jstests/noPassthrough/fle2_shardsvr_compact.js
+++ b/jstests/noPassthrough/fle2_shardsvr_compact.js
@@ -6,10 +6,7 @@
* requires_sharding
* ]
*/
-load("jstests/fle2/libs/encrypted_client_util.js");
-
-(function() {
-'use strict';
+import {EncryptedClient} from "jstests/fle2/libs/encrypted_client_util.js";
function runTest(mongosConn, shardConn) {
let dbName = 'testdb';
@@ -36,4 +33,3 @@ jsTestLog("Sharding: Testing fle2 drop collection warning");
st.stop();
}
-}());
diff --git a/jstests/noPassthrough/ftdc_connection_reuse.js b/jstests/noPassthrough/ftdc_connection_reuse.js
index 242afee00a077..4d490870d67d5 100644
--- a/jstests/noPassthrough/ftdc_connection_reuse.js
+++ b/jstests/noPassthrough/ftdc_connection_reuse.js
@@ -14,7 +14,7 @@ load("jstests/libs/parallelTester.js");
const ftdcPath = MongoRunner.toRealPath('ftdc');
const st = new ShardingTest({
- shards: 1,
+ shards: {rs0: {nodes: 1}},
mongos: {
s0: {setParameter: {diagnosticDataCollectionDirectoryPath: ftdcPath}},
}
@@ -27,11 +27,11 @@ const testDB = st.s.getDB(kDbName);
const coll = testDB.getCollection(kCollName);
function getDiagnosticData() {
+ let stats;
assert.soon(() => {
- let stats = verifyGetDiagnosticData(st.s.getDB("admin")).connPoolStats;
+ stats = verifyGetDiagnosticData(st.s.getDB("admin")).connPoolStats;
return stats["pools"].hasOwnProperty('NetworkInterfaceTL-TaskExecutorPool-0');
}, "Failed to load NetworkInterfaceTL-TaskExecutorPool-0 in FTDC within time limit");
- const stats = verifyGetDiagnosticData(st.s.getDB("admin")).connPoolStats;
assert(stats.hasOwnProperty('totalWasUsedOnce'));
assert(stats.hasOwnProperty('totalConnUsageTimeMillis'));
return stats["pools"]["NetworkInterfaceTL-TaskExecutorPool-0"];
@@ -74,8 +74,14 @@ function launchFinds({times, readPref, shouldFail}) {
function resetPools() {
const cfg = st.rs0.getPrimary().getDB('local').system.replset.findOne();
const allHosts = cfg.members.map(x => x.host);
-
assert.commandWorked(st.s.adminCommand({dropConnections: 1, hostAndPort: allHosts}));
+ // FTDC data is collected periodically. Check that the data returned reflects that the pools
+ // have been dropped before resuming testing.
+ assert.soon(() => {
+ const stats = getDiagnosticData();
+ // The shard has a single node in its replica set.
+ return !stats.hasOwnProperty(allHosts[0]);
+ }, "Failed to wait for pool stats to reflect dropped pools");
}
[1, 2, 3].forEach(v => assert.commandWorked(coll.insert({x: v})));
diff --git a/jstests/noPassthrough/ftdc_mirrored_reads.js b/jstests/noPassthrough/ftdc_mirrored_reads.js
index c1bb78e7dac8b..2c7eda4cb928d 100644
--- a/jstests/noPassthrough/ftdc_mirrored_reads.js
+++ b/jstests/noPassthrough/ftdc_mirrored_reads.js
@@ -16,26 +16,30 @@ const kCollName = "test";
const kOperations = 100;
const rst = new ReplSetTest({nodes: 3});
-rst.startSet();
+// Disable mirrored reads to make sure the initialization of oplog fetcher find commands from the
+// secondaries do not get included in the metrics that we are testing.
+rst.startSet({
+ setParameter: {
+ mirrorReads: tojsononeline({samplingRate: 0.0}),
+ logComponentVerbosity: tojson({command: 1})
+ }
+});
rst.initiateWithHighElectionTimeout();
const primary = rst.getPrimary();
const secondaries = rst.getSecondaries();
-function getMirroredReadsStats(node) {
- return node.getDB(kDbName).serverStatus({mirroredReads: 1}).mirroredReads;
-}
-
function getDiagnosticData(node) {
let db = node.getDB('admin');
- const stats = verifyGetDiagnosticData(db).serverStatus;
+ const stats = verifyGetDiagnosticData(db, false /* logData */).serverStatus;
assert(stats.hasOwnProperty('mirroredReads'));
+ jsTestLog(`Got diagnostic data for host: ${node}, ${tojson(stats.mirroredReads)}`);
return stats.mirroredReads;
}
function getMirroredReadsProcessedAsSecondary() {
let readsProcessed = 0;
for (let i = 0; i < secondaries.length; i++) {
- const stats = getMirroredReadsStats(secondaries[i]);
+ const stats = getDiagnosticData(secondaries[i]);
readsProcessed += stats.processedAsSecondary;
}
return readsProcessed;
@@ -46,7 +50,7 @@ function waitForPrimaryToSendMirroredReads(expectedReadsSeen, expectedReadsSent)
jsTestLog("Verifying reads were seen and sent by the maestro");
jsTestLog("ExpectedReadsSent :" + expectedReadsSent +
", ExpectedReadsSeen:" + expectedReadsSeen);
- const afterPrimaryReadStats = getMirroredReadsStats(primary);
+ const afterPrimaryReadStats = getDiagnosticData(primary);
const actualMirrorableReadsSeen = afterPrimaryReadStats.seen;
const actualMirroredReadsSent = afterPrimaryReadStats.sent;
jsTestLog("Primary metrics after reads: " + tojson(afterPrimaryReadStats));
@@ -58,13 +62,14 @@ function waitForPrimaryToSendMirroredReads(expectedReadsSeen, expectedReadsSent)
function sendAndCheckReads(rst) {
const primary = rst.getPrimary();
// Initial metrics before sending kOperations number of finds.
- const initialPrimaryReadStats = getMirroredReadsStats(primary);
+ const initialPrimaryReadStats = getDiagnosticData(primary);
const mirrorableReadsSeenBefore = initialPrimaryReadStats.seen;
const mirroredReadsSentBefore = initialPrimaryReadStats.sent;
+ primary.getDB(kDbName).getCollection(kCollName).insert({x: i});
jsTestLog(`Sending ${kOperations} reads to primary`);
for (var i = 0; i < kOperations; ++i) {
- primary.getDB(kDbName).runCommand({find: kCollName, filter: {}});
+ assert.commandWorked(primary.getDB(kDbName).runCommand({find: kCollName, filter: {}}));
}
const expectedReadsSeen = mirrorableReadsSeenBefore + kOperations;
@@ -121,8 +126,8 @@ assert.commandWorked(primary.adminCommand({setParameter: 1, mirrorReads: {sampli
let primaryResolvedAfterReads = getDiagnosticData(primary).resolved;
jsTestLog(`Mirrored ${primaryResolvedAfterReads} reads so far`);
for (let i = 0; i < secondaries.length; i++) {
- jsTestLog("Secondary " + secondaries[i] +
- " metrics: " + tojson(getMirroredReadsStats(secondaries[i])));
+ // Print the secondary metrics for easier debugging.
+ getDiagnosticData(secondaries[i]);
}
// There are two secondaries, so `kOperations * 2` reads must be resolved.
return primaryResolvedBeforeReads + kOperations * 2 <= primaryResolvedAfterReads;
diff --git a/jstests/noPassthrough/geo_near_random1.js b/jstests/noPassthrough/geo_near_random1.js
index 06dcf86c819f0..37051468a5eac 100644
--- a/jstests/noPassthrough/geo_near_random1.js
+++ b/jstests/noPassthrough/geo_near_random1.js
@@ -1,14 +1,12 @@
// this tests all points using $near
-var db;
(function() {
"use strict";
load("jstests/libs/geo_near_random.js");
const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod failed to start.");
-db = conn.getDB("test");
-var test = new GeoNearRandomTest("weekly.geo_near_random1");
+var test = new GeoNearRandomTest("weekly.geo_near_random1", conn.getDB("test"));
test.insertPts(1000);
diff --git a/jstests/noPassthrough/geo_near_random2.js b/jstests/noPassthrough/geo_near_random2.js
index b5ec59af1124d..6ee97cae62424 100644
--- a/jstests/noPassthrough/geo_near_random2.js
+++ b/jstests/noPassthrough/geo_near_random2.js
@@ -1,14 +1,12 @@
// this tests 1% of all points using $near and $nearSphere
-var db;
(function() {
"use strict";
load("jstests/libs/geo_near_random.js");
const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod failed to start.");
-db = conn.getDB("test");
-var test = new GeoNearRandomTest("weekly.geo_near_random2");
+var test = new GeoNearRandomTest("weekly.geo_near_random2", conn.getDB("test"));
test.insertPts(50000);
diff --git a/jstests/noPassthrough/global_profiling_filter.js b/jstests/noPassthrough/global_profiling_filter.js
index ac3b4496550de..a6356095ab8c0 100644
--- a/jstests/noPassthrough/global_profiling_filter.js
+++ b/jstests/noPassthrough/global_profiling_filter.js
@@ -238,7 +238,7 @@ function runCorrectnessTests(conn) {
})();
(function testGlobalFilterUnsetOverridesDatabaseSpecificSettings() {
- result = assert.commandWorked(db.getSiblingDB("db1").runCommand(
+ let result = assert.commandWorked(db.getSiblingDB("db1").runCommand(
{profile: isMongos ? 0 : 1, filter: profileFilter1.filter}));
assert.eq(result.filter, profileFilter2.filter);
result = assert.commandWorked(db.getSiblingDB("db3").runCommand(
diff --git a/jstests/noPassthrough/group_spill_long_keys.js b/jstests/noPassthrough/group_spill_long_keys.js
new file mode 100644
index 0000000000000..08a6abb9547ee
--- /dev/null
+++ b/jstests/noPassthrough/group_spill_long_keys.js
@@ -0,0 +1,110 @@
+/**
+ * Test a $group query which has a large number of group-by fields and needs to spill to disk.
+ */
+import {getPlanStage} from "jstests/libs/analyze_plan.js";
+
+const MEM_LIMIT_KB = 2;
+
+// Make sure that we can handle more than 32 keys (the maximum allowed number of components in a
+// compound index).
+const NUM_GROUP_KEYS = 33;
+
+// Run a mongod that has a reduced memory limit for when its hash aggregation operators (in both
+// SBE and the Classic execution engine) will spill data to disk.
+const memLimit = MEM_LIMIT_KB * 1024;
+const conn = MongoRunner.runMongod({
+ setParameter: {
+ internalQuerySlotBasedExecutionHashAggApproxMemoryUseInBytesBeforeSpill: memLimit,
+ internalDocumentSourceGroupMaxMemoryBytes: memLimit
+ }
+});
+assert.neq(conn, null, "mongod failed to start up");
+
+const db = conn.getDB("test");
+const coll = db.group_spill_long_keys;
+
+function nextFieldName(name) {
+ function nextChar(char) {
+ return String.fromCharCode(char.charCodeAt(0) + 1);
+ }
+
+ function lastChar(str) {
+ return str[str.length - 1];
+ }
+
+ // If the final character is a "z", start using a longer string. Otherwise we cycle through all
+ // possibilities for the last letter. These means we generate only 26 unique names for each
+ // string length, but that's ok since this function will not be used to generate more than ~40
+ // unique names.
+ if (lastChar(name) === "z") {
+ return "a".repeat(name.length + 1);
+ } else {
+ return name.substr(0, name.length - 1) + nextChar(lastChar(name));
+ }
+}
+
+let counter = 0;
+
+/**
+ * Generates a document with 'NUM_GROUP_KEYS' uniquely named keys. Values are increasingly large
+ * 64-bit integers.
+ */
+function generateDoc() {
+ let doc = {};
+ let str = "a";
+ for (let i = 0; i < NUM_GROUP_KEYS; ++i) {
+ doc[str] = NumberLong(counter);
+ ++counter;
+ str = nextFieldName(str);
+ }
+ return doc;
+}
+
+// Calculate how many documents we need. We use 100 times the approximate number of documents that
+// would cause a spill limit in order to cause the query to spill frequently.
+let exampleDoc = generateDoc();
+let docSize = Object.bsonsize(exampleDoc);
+let docsNeeded = Math.ceil(memLimit / docSize) * 100;
+
+coll.drop();
+for (let i = 0; i < docsNeeded; ++i) {
+ assert.commandWorked(coll.insert(generateDoc()));
+}
+
+/**
+ * Generates the _id field for a $group query that aggregates on 'NUM_GROUP_KEY' unique keys. The
+ * returned document should look like {a: "$a", b: "$b", ...}.
+ */
+const groupKey = (function() {
+ let doc = {};
+ let str = "a";
+ for (let i = 0; i < NUM_GROUP_KEYS; ++i) {
+ doc[str] = "$" + str;
+ str = nextFieldName(str);
+ }
+ return doc;
+}());
+
+const pipeline = [{$group: {_id: groupKey}}];
+
+// Run the query twice and assert that there are as many groups as documents in the collection,
+// since each document has a unique group key. We run the query twice because the second time it may
+// use a cached plan.
+for (let i = 0; i < 2; ++i) {
+ assert.eq(docsNeeded, coll.aggregate(pipeline).itcount());
+}
+
+// Run an explain. If SBE was used, make sure that we see a "group" stage that spilled in the exec
+// stats.
+let explain = coll.explain("executionStats").aggregate(pipeline);
+assert(explain.hasOwnProperty("explainVersion"), explain);
+if (explain.explainVersion !== "1") {
+ let hashAgg = getPlanStage(explain.executionStats.executionStages, "group");
+ // There should be a group-by slot for each field we are grouping by.
+ assert.eq(hashAgg.groupBySlots.length, NUM_GROUP_KEYS, hashAgg);
+ assert.eq(hashAgg.usedDisk, true, hashAgg);
+ assert.gt(hashAgg.spills, 0, hashAgg);
+ assert.gt(hashAgg.spilledRecords, 0, hashAgg);
+}
+
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/group_spill_metrics.js b/jstests/noPassthrough/group_spill_metrics.js
index 89c6d6072d93f..4101cdbba62e1 100644
--- a/jstests/noPassthrough/group_spill_metrics.js
+++ b/jstests/noPassthrough/group_spill_metrics.js
@@ -7,11 +7,8 @@
* requires_persistence,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStage().
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod();
const db = conn.getDB('test');
@@ -74,4 +71,3 @@ assert.eq(
metricsAfter.spilledRecords, expectedSpilledRecords + metricsBefore.spilledRecords, pipeline);
MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/hybrid_index_with_updates.js b/jstests/noPassthrough/hybrid_index_with_updates.js
index 0e4641ed83a40..a7e1d1a5b440d 100644
--- a/jstests/noPassthrough/hybrid_index_with_updates.js
+++ b/jstests/noPassthrough/hybrid_index_with_updates.js
@@ -51,7 +51,7 @@ const collScanFailPoint = configureFailPoint(
// Start the background build.
let bgBuild = startParallelShell(function() {
- assert.commandWorked(db.hybrid.createIndex({i: 1}, {background: true}));
+ assert.commandWorked(db.hybrid.createIndex({i: 1}));
}, conn.port);
checkLog.containsJson(conn, 20386, {
diff --git a/jstests/noPassthrough/hybrid_multikey.js b/jstests/noPassthrough/hybrid_multikey.js
index ba0154708f4b9..6908a63c36cc3 100644
--- a/jstests/noPassthrough/hybrid_multikey.js
+++ b/jstests/noPassthrough/hybrid_multikey.js
@@ -3,10 +3,7 @@
* various index types.
*/
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/analyze_plan.js"); // For getWinningPlan to analyze explain() output.
-
-(function() {
-"use strict";
+import {getWinningPlan} from "jstests/libs/analyze_plan.js";
const conn = MongoRunner.runMongod();
const dbName = 'test';
@@ -135,4 +132,3 @@ runTest({
});
MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/noPassthrough/hybrid_unique_index_with_updates.js b/jstests/noPassthrough/hybrid_unique_index_with_updates.js
index 3dae16551c743..2140dffd9f699 100644
--- a/jstests/noPassthrough/hybrid_unique_index_with_updates.js
+++ b/jstests/noPassthrough/hybrid_unique_index_with_updates.js
@@ -51,7 +51,7 @@ let setUp = function(coll) {
let buildIndexInBackground = function(coll, expectDuplicateKeyError) {
const createIndexFunction = function(collFullName) {
const coll = db.getMongo().getCollection(collFullName);
- return coll.createIndex({i: 1}, {background: true, unique: true});
+ return coll.createIndex({i: 1}, {unique: true});
};
const assertFunction = expectDuplicateKeyError ? function(collFullName) {
assert.commandFailedWithCode(createIndexFunction(collFullName), ErrorCodes.DuplicateKey);
diff --git a/jstests/noPassthrough/index_abort_before_commit_signal.js b/jstests/noPassthrough/index_abort_before_commit_signal.js
index 61e2a7ee7a040..927bb3bce18a9 100644
--- a/jstests/noPassthrough/index_abort_before_commit_signal.js
+++ b/jstests/noPassthrough/index_abort_before_commit_signal.js
@@ -60,7 +60,7 @@ createIndex();
jsTestLog('Waiting for index build to complete');
IndexBuildTest.waitForIndexBuildToStop(testDB, coll.getName(), 'a_1');
-IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
+IndexBuildTest.assertIndexesSoon(coll, 1, ['_id_']);
rst.stopSet();
})();
diff --git a/jstests/noPassthrough/index_abort_stepdown_prepare.js b/jstests/noPassthrough/index_abort_stepdown_prepare.js
index 061ac4055b355..c8a98d015ff9d 100644
--- a/jstests/noPassthrough/index_abort_stepdown_prepare.js
+++ b/jstests/noPassthrough/index_abort_stepdown_prepare.js
@@ -105,7 +105,7 @@ assert.commandWorked(session.abortTransaction_forTesting());
jsTestLog("Waiting for index build to complete");
IndexBuildTest.waitForIndexBuildToStop(primaryDB, primaryColl.getName(), indexName);
-IndexBuildTest.assertIndexes(primaryColl, 2, ["_id_", indexName]);
+IndexBuildTest.assertIndexesSoon(primaryColl, 2, ["_id_", indexName]);
rst.stopSet();
})();
diff --git a/jstests/noPassthrough/index_build_aborted_on_fcv_downgrade.js b/jstests/noPassthrough/index_build_aborted_on_fcv_downgrade.js
new file mode 100644
index 0000000000000..a5612f9960443
--- /dev/null
+++ b/jstests/noPassthrough/index_build_aborted_on_fcv_downgrade.js
@@ -0,0 +1,133 @@
+/**
+ * Ensures that index builds are aborted when setFCV causes an FCV downgrade, and that during that
+ * period new index builds are blocked.
+ *
+ * TODO (SERVER-68290): remove test when removing index build abort on FCV downgrade and reintroduce
+ * "jstests/noPassthrough/index_downgrade_fcv.js".
+ *
+ * @tags: [
+ * requires_fcv_71,
+ * requires_replication,
+ * ]
+ */
+(function() {
+"use strict";
+
+load('jstests/noPassthrough/libs/index_build.js');
+load("jstests/libs/fail_point_util.js");
+
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ },
+ },
+ ]
+});
+rst.startSet();
+rst.initiate();
+
+const dbName = 'test';
+const collName = 'coll';
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB.getCollection(collName);
+
+assert.commandWorked(primaryColl.insert({a: 1}));
+
+rst.awaitReplication();
+
+// Clear log to ensure checkLog does not see unrelated log entries.
+assert.commandWorked(primaryDB.adminCommand({clearLog: 'global'}));
+
+// Hang an index build in the commit phase, to later check that FCV downgrade waits on a commiting
+// index build.
+const hangIndexBuildBeforeCommit = configureFailPoint(primary, "hangIndexBuildBeforeCommit");
+const createIdxCommit = IndexBuildTest.startIndexBuild(
+ primary, primaryColl.getFullName(), {c: 1}, null, [ErrorCodes.IndexBuildAborted]);
+const commitBuildUUID =
+ IndexBuildTest
+ .assertIndexesSoon(primaryColl, 2, ['_id_'], ['c_1'], {includeBuildUUIDs: true})['c_1']
+ .buildUUID;
+hangIndexBuildBeforeCommit.wait();
+
+// Setup index build to be aborted by the FCV downgrade.
+const hangAfterInitializingIndexBuild =
+ configureFailPoint(primary, "hangAfterInitializingIndexBuild");
+const createIdxAborted = IndexBuildTest.startIndexBuild(
+ primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.IndexBuildAborted]);
+
+const abortedBuildUUID =
+ IndexBuildTest
+ .assertIndexesSoon(
+ primaryColl, 3, ['_id_'], ['a_1', 'c_1'], {includeBuildUUIDs: true})['a_1']
+ .buildUUID;
+
+hangAfterInitializingIndexBuild.wait();
+
+const hangAfterBlockingIndexBuildsForFcvDowngrade =
+ configureFailPoint(primary, "hangAfterBlockingIndexBuildsForFcvDowngrade");
+
+// Ensure index build block and abort happens during the FCV transitioning state.
+const failAfterReachingTransitioningState =
+ configureFailPoint(primary, "failAfterReachingTransitioningState");
+
+const awaitSetFcv = startParallelShell(
+ funWithArgs(function(collName) {
+ // Should fail due to failAfterReachingTransitioningState.
+ assert.commandFailedWithCode(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}),
+ 7555200);
+ }, primaryColl.getName()), primary.port);
+
+hangAfterBlockingIndexBuildsForFcvDowngrade.wait();
+
+// Start an index build while the block is active.
+const createIdxBlocked = IndexBuildTest.startIndexBuild(primary, primaryColl.getFullName(), {b: 1});
+// "Index build: new index builds are blocked, waiting".
+checkLog.containsJson(primary, 7738700);
+
+hangAfterBlockingIndexBuildsForFcvDowngrade.off();
+
+// "About to abort all index builders running".
+assert.soon(() => checkLog.checkContainsWithCountJson(primary,
+ 7738702,
+ {
+ reason: function(reason) {
+ return reason.startsWith(
+ "FCV downgrade in progress");
+ }
+ },
+ /*count=*/ 1));
+
+// "Index build: joined after abort".
+checkLog.containsJson(primary, 20655, {
+ buildUUID: function(uuid) {
+ return uuid && uuid["uuid"]["$uuid"] === extractUUIDFromObject(abortedBuildUUID);
+ }
+});
+
+checkLog.containsJson(primary, 4725201, {
+ indexBuilds: function(uuidArray) {
+ return uuidArray && uuidArray.length == 1 &&
+ uuidArray[0]["uuid"]["$uuid"] === extractUUIDFromObject(commitBuildUUID);
+ }
+});
+hangIndexBuildBeforeCommit.off();
+hangAfterInitializingIndexBuild.off();
+
+jsTestLog("Waiting for threads to join");
+createIdxAborted();
+createIdxCommit();
+awaitSetFcv();
+createIdxBlocked();
+
+// The index build started before the FCV downgrade should have been aborted, while the build
+// started while the index build block was in place should have succeeded. The index build which was
+// already in the commit phase when the FCV downgrade took place should also have completed.
+IndexBuildTest.assertIndexesSoon(primaryColl, 3, ['_id_', 'b_1', 'c_1']);
+
+rst.stopSet();
+})();
diff --git a/jstests/noPassthrough/index_build_external_and_internal_abort.js b/jstests/noPassthrough/index_build_external_and_internal_abort.js
index b3b3e671abd9c..447ad5dce26a7 100644
--- a/jstests/noPassthrough/index_build_external_and_internal_abort.js
+++ b/jstests/noPassthrough/index_build_external_and_internal_abort.js
@@ -3,14 +3,10 @@
* internal index build abort (e.g. build failed due to invalid keys).
*
* @tags: [
- * featureFlagIndexBuildGracefulErrorHandling,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js");
load('jstests/noPassthrough/libs/index_build.js');
const rst = new ReplSetTest({
@@ -29,7 +25,6 @@ assert.commandWorked(coll.insert({point: {x: -15.0, y: "abc"}}));
let indexBuilderThreadFP =
configureFailPoint(testDB, 'hangIndexBuildBeforeTransitioningReplStateTokAwaitPrimaryAbort');
-let connThreadFP = configureFailPoint(testDB, 'hangInRemoveIndexBuildEntryAfterCommitOrAbort');
// Will fail with error code 13026: "geo values must be 'legacy coordinate pairs' for 2d indexes"
const waitForIndexBuild =
@@ -45,20 +40,15 @@ const awaitDropCollection =
assert.commandWorked(db.runCommand({drop: collName}));
}, coll.getName()), primary.port);
-// Wait for the 'drop' command to hang while tearing down the index build, just after setting the
-// index build state to kAborted.
-connThreadFP.wait();
+// Check external abort is reattempted multiple times, meaning it is blocked behind the internal
+// abort.
+assert.soon(() => checkLog.checkContainsWithAtLeastCountJson(primary, 4656010, {}, 3));
-// Resume the index builder thread, which would now try to abort an index that's already in kAbort
-// state.
+// Resume the index builder thread, which will transition to kAwaitPrimaryAbort and unblock external
+// aborts.
indexBuilderThreadFP.off();
-// Wait for the log to confirm the index builder won't attempt to abort the build, because it's
-// already in aborted state.
-checkLog.containsJson(primary, 7530800);
-
-// Resume the collection drop and wait for its completion.
-connThreadFP.off();
+// Wait for completion.
awaitDropCollection();
waitForIndexBuild();
@@ -66,5 +56,4 @@ waitForIndexBuild();
// The collection does not exist.
assert.eq(testDB.getCollectionNames().indexOf(coll.getName()), -1, "collection still exists.");
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/index_build_external_and_internal_abort_do_not_deadlock_single_phase.js b/jstests/noPassthrough/index_build_external_and_internal_abort_do_not_deadlock_single_phase.js
new file mode 100644
index 0000000000000..d9a37f17dd630
--- /dev/null
+++ b/jstests/noPassthrough/index_build_external_and_internal_abort_do_not_deadlock_single_phase.js
@@ -0,0 +1,47 @@
+/**
+ * Tests dropping a collection (causing an external index build abort) does not deadlock with an
+ * internal self abort for single-phase index builds.
+ */
+(function() {
+"use strict";
+
+load('jstests/noPassthrough/libs/index_build.js');
+load("jstests/libs/fail_point_util.js");
+
+// A standalone configuration is key to running the index build single-phase.
+const conn = MongoRunner.runMongod();
+
+const dbName = 'test';
+const collName = 'coll';
+const db = conn.getDB(dbName);
+const coll = db.getCollection(collName);
+
+coll.drop();
+assert.commandWorked(coll.insert({a: [0, "a"]}));
+
+// Hang after the index build has checked if the build is already aborted, but before taking
+// collection locks for cleanup.
+const hangBeforeCleanup = configureFailPoint(db, 'hangIndexBuildBeforeAbortCleanUp');
+
+const hangAfterCollDropHasLocks =
+ configureFailPoint(db, 'hangAbortIndexBuildByBuildUUIDAfterLocks');
+
+const createIdx =
+ IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {a: "2d"}, null, [13026]);
+
+hangBeforeCleanup.wait();
+
+const collDrop = startParallelShell(funWithArgs(function(dbName, collName) {
+ db.getSiblingDB(dbName).getCollection(collName).drop();
+ }, dbName, collName), conn.port);
+
+hangAfterCollDropHasLocks.wait();
+hangBeforeCleanup.off();
+hangAfterCollDropHasLocks.off();
+
+jsTestLog("Waiting for collection drop shell to return");
+collDrop();
+createIdx();
+
+MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/index_build_external_and_internal_abort_do_not_deadlock_two_phase.js b/jstests/noPassthrough/index_build_external_and_internal_abort_do_not_deadlock_two_phase.js
new file mode 100644
index 0000000000000..bf1e120ea0921
--- /dev/null
+++ b/jstests/noPassthrough/index_build_external_and_internal_abort_do_not_deadlock_two_phase.js
@@ -0,0 +1,90 @@
+/**
+ * Tests dropping a collection (causing an external index build abort) does not deadlock with an
+ * internal self abort for two-phase index builds.
+ *
+ * @tags: [
+ * requires_replication,
+ * ]
+ */
+(function() {
+"use strict";
+
+load('jstests/noPassthrough/libs/index_build.js');
+load("jstests/libs/fail_point_util.js");
+
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ },
+ },
+ ]
+});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB('test');
+const primaryColl = primaryDB.getCollection('test');
+
+primaryColl.drop();
+assert.commandWorked(primaryColl.insert({a: 1}));
+
+// Pause the index builds on the secondary, using the 'hangAfterStartingIndexBuild' failpoint.
+const failpointHangAfterInit = configureFailPoint(primaryDB, "hangAfterInitializingIndexBuild");
+const hangBeforeCleanup = configureFailPoint(primaryDB, 'hangIndexBuildBeforeAbortCleanUp');
+
+// Block secondary to avoid commitQuorum being fullfilled.
+IndexBuildTest.pauseIndexBuilds(rst.getSecondary());
+
+jsTestLog("Waiting for index build to start");
+const createIdx = IndexBuildTest.startIndexBuild(
+ primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.OutOfDiskSpace]);
+
+const buildUUID =
+ IndexBuildTest
+ .assertIndexesSoon(primaryColl, 2, ['_id_'], ['a_1'], {includeBuildUUIDs: true})['a_1']
+ .buildUUID;
+
+const failAfterVoteForCommitReadiness =
+ configureFailPoint(primaryDB,
+ "failIndexBuildWithErrorInSecondDrain",
+ {buildUUID: buildUUID, error: ErrorCodes.OutOfDiskSpace});
+
+// Continue index build after preparing the artificial failure.
+failpointHangAfterInit.off();
+
+// Wait for the index build to be in clean up path.
+hangBeforeCleanup.wait();
+
+const hangAfterCollDropHasLocks =
+ configureFailPoint(primaryDB, 'hangAbortIndexBuildByBuildUUIDAfterLocks');
+
+const collDrop = startParallelShell(funWithArgs(function(dbName, collName) {
+ jsTestLog("Dropping collection");
+ db.getSiblingDB(dbName).getCollection(collName).drop();
+ }, primaryDB.getName(), primaryColl.getName()), primary.port);
+
+hangAfterCollDropHasLocks.wait();
+hangBeforeCleanup.off();
+hangAfterCollDropHasLocks.off();
+
+// The index build should not be externally abortable once the index builder thread is in the
+// process of aborting.
+jsTestLog("Waiting for the index build to abort");
+// Cleaned up index build after abort.
+checkLog.containsJson(primary, 465611, {
+ buildUUID: function(uuid) {
+ return uuid && uuid["uuid"]["$uuid"] === extractUUIDFromObject(buildUUID);
+ }
+});
+
+jsTestLog("Waiting for collection drop shell to return");
+collDrop();
+createIdx();
+
+rst.stopSet();
+})();
diff --git a/jstests/noPassthrough/index_build_killed_disk_space.js b/jstests/noPassthrough/index_build_killed_disk_space.js
index c1c76ecbdcfae..69ddcb7315545 100644
--- a/jstests/noPassthrough/index_build_killed_disk_space.js
+++ b/jstests/noPassthrough/index_build_killed_disk_space.js
@@ -1,9 +1,9 @@
/**
* Ensures that index builds are killed on primaries when the available disk space drops below a
- * limit.
+ * limit,only if the primary has not yet voted for commit.
*
* @tags: [
- * featureFlagIndexBuildGracefulErrorHandling,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
@@ -13,6 +13,114 @@
load('jstests/noPassthrough/libs/index_build.js');
load("jstests/libs/fail_point_util.js");
+function killBeforeVoteCommitSucceeds(rst) {
+ const primary = rst.getPrimary();
+ const primaryDB = primary.getDB('test');
+ const primaryColl = primaryDB.getCollection('test');
+
+ primaryColl.drop();
+ assert.commandWorked(primaryColl.insert({a: 1}));
+
+ const hangAfterInitFailPoint = configureFailPoint(primaryDB, 'hangAfterInitializingIndexBuild');
+
+ let serverStatus = primaryDB.serverStatus();
+ const tookActionCountBefore = serverStatus.metrics.diskSpaceMonitor.tookAction;
+ const killedDueToInsufficientDiskSpaceBefore =
+ serverStatus.indexBuilds.killedDueToInsufficientDiskSpace;
+
+ jsTestLog("Waiting for index build to start");
+ const createIdx = IndexBuildTest.startIndexBuild(
+ primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.OutOfDiskSpace]);
+ IndexBuildTest.waitForIndexBuildToStart(primaryDB, primaryColl.getName(), 'a_1');
+
+ // Ensure the index build is in an abortable state before the DiskSpaceMonitor runs.
+ hangAfterInitFailPoint.wait();
+
+ // Default indexBuildMinAvailableDiskSpaceMB is 500 MB.
+ // Simulate a remaining disk space of 450MB.
+ const simulateDiskSpaceFp =
+ configureFailPoint(primaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024});
+
+ jsTestLog("Waiting for the disk space monitor to take action");
+ assert.soon(() => {
+ return primaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction > tookActionCountBefore;
+ });
+
+ jsTestLog("Waiting for the index build to be killed");
+ // "Index build: joined after abort".
+ checkLog.containsJson(primary, 20655);
+
+ jsTestLog("Waiting for threads to join");
+ createIdx();
+ simulateDiskSpaceFp.off();
+ hangAfterInitFailPoint.off();
+
+ // "Index build: aborted due to insufficient disk space"
+ checkLog.containsJson(primary, 7333601);
+
+ assert.eq(killedDueToInsufficientDiskSpaceBefore + 1,
+ primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace);
+
+ rst.awaitReplication();
+ IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_']);
+
+ const secondaryColl = rst.getSecondary().getCollection(primaryColl.getFullName());
+ IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']);
+}
+
+function killAfterVoteCommitFails(rst) {
+ const primary = rst.getPrimary();
+ const primaryDB = primary.getDB('test');
+ const primaryColl = primaryDB.getCollection('test');
+
+ primaryColl.drop();
+ assert.commandWorked(primaryColl.insert({a: 1}));
+
+ const hangAfterVoteCommit =
+ configureFailPoint(primaryDB, 'hangIndexBuildAfterSignalPrimaryForCommitReadiness');
+
+ let serverStatus = primaryDB.serverStatus();
+ const tookActionCountBefore = serverStatus.metrics.diskSpaceMonitor.tookAction;
+ const killedDueToInsufficientDiskSpaceBefore =
+ serverStatus.indexBuilds.killedDueToInsufficientDiskSpace;
+
+ jsTestLog("Waiting for index build to start");
+ const createIdx = IndexBuildTest.startIndexBuild(primary, primaryColl.getFullName(), {a: 1});
+ IndexBuildTest.waitForIndexBuildToStart(primaryDB, primaryColl.getName(), 'a_1');
+
+ // Ensure the index build has voted commit before the DiskSpaceMonitor runs.
+ hangAfterVoteCommit.wait();
+
+ // Default indexBuildMinAvailableDiskSpaceMB is 500 MB.
+ // Simulate a remaining disk space of 450MB.
+ const simulateDiskSpaceFp =
+ configureFailPoint(primaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024});
+
+ jsTestLog("Waiting for the disk space monitor to take action");
+ assert.soon(() => {
+ return primaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction > tookActionCountBefore;
+ });
+
+ jsTestLog("Waiting for the index build kill attempt to fail");
+ // "Index build: cannot force abort".
+ checkLog.containsJson(primary, 7617000);
+
+ hangAfterVoteCommit.off();
+ simulateDiskSpaceFp.off();
+
+ jsTestLog("Waiting for threads to join");
+ createIdx();
+
+ assert.eq(killedDueToInsufficientDiskSpaceBefore,
+ primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace);
+
+ rst.awaitReplication();
+ IndexBuildTest.assertIndexes(primaryColl, 2, ['_id_', 'a_1']);
+
+ const secondaryColl = rst.getSecondary().getCollection(primaryColl.getFullName());
+ IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']);
+}
+
const rst = new ReplSetTest({
nodes: [
{},
@@ -27,50 +135,8 @@ const rst = new ReplSetTest({
rst.startSet();
rst.initiate();
-const primary = rst.getPrimary();
-const primaryDB = primary.getDB('test');
-const primaryColl = primaryDB.getCollection('test');
-
-assert.commandWorked(primaryColl.insert({a: 1}));
-
-let hangAfterInitFailPoint = configureFailPoint(primaryDB, 'hangAfterInitializingIndexBuild');
-
-const tookActionCountBefore = primaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction;
-
-jsTestLog("Waiting for index build to start");
-const createIdx = IndexBuildTest.startIndexBuild(
- primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.Interrupted]);
-IndexBuildTest.waitForIndexBuildToStart(primaryDB, primaryColl.getName(), 'a_1');
-
-// Ensure the index build is in an abortable state before the DiskSpaceMonitor runs.
-hangAfterInitFailPoint.wait();
-
-// Default indexBuildMinAvailableDiskSpaceMB is 500 MB.
-// Simulate a remaining disk space of 450MB.
-const simulateDiskSpaceFp =
- configureFailPoint(primaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024});
-
-jsTestLog("Waiting for the disk space monitor to take action");
-assert.soon(() => {
- return primaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction > tookActionCountBefore;
-});
-hangAfterInitFailPoint.off();
-
-jsTestLog("Waiting for the index build to be killed");
-// "Index build: joined after abort".
-checkLog.containsJson(primary, 20655);
-
-jsTestLog("Waiting for threads to join");
-createIdx();
-simulateDiskSpaceFp.off();
-
-assert.eq(1, primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace);
-
-rst.awaitReplication();
-IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_']);
-
-const secondaryColl = rst.getSecondary().getCollection(primaryColl.getFullName());
-IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']);
+killBeforeVoteCommitSucceeds(rst);
+killAfterVoteCommitFails(rst);
rst.stopSet();
})();
diff --git a/jstests/noPassthrough/index_build_killed_disk_space_secondary.js b/jstests/noPassthrough/index_build_killed_disk_space_secondary.js
index c083df03f27f0..6ece302e479dd 100644
--- a/jstests/noPassthrough/index_build_killed_disk_space_secondary.js
+++ b/jstests/noPassthrough/index_build_killed_disk_space_secondary.js
@@ -1,9 +1,9 @@
/**
* Ensures that index builds are cancelled by secondaries when the available disk space drops below
- * a limit.
+ * a limit, only if the secondary has not yet voted for commit.
*
* @tags: [
- * featureFlagIndexBuildGracefulErrorHandling,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
@@ -13,6 +13,167 @@
load('jstests/noPassthrough/libs/index_build.js');
load("jstests/libs/fail_point_util.js");
+function killBeforeVoteCommitSucceeds(rst) {
+ jsTestLog(
+ "Index build in a secondary can be killed by the DiskSpaceMonitor before it has voted for commit.");
+
+ const dbName = 'test';
+ const collName = 'coll';
+ const primary = rst.getPrimary();
+ const primaryDB = primary.getDB(dbName);
+ const primaryColl = primaryDB.getCollection(collName);
+
+ primaryColl.drop();
+ assert.commandWorked(primaryColl.insert({a: 1}));
+
+ rst.awaitReplication();
+
+ const secondary = rst.getSecondary();
+ const secondaryDB = secondary.getDB(dbName);
+ const secondaryColl = secondaryDB.getCollection(collName);
+
+ const primaryKilledDueToDiskSpaceBefore =
+ primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace;
+ const secondaryKilledDueToDiskSpaceBefore =
+ secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace;
+
+ // Pause the index build on the primary after it replicates the startIndexBuild oplog entry,
+ // effectively pausing the index build on the secondary too as it will wait for the primary to
+ // commit or abort.
+ IndexBuildTest.pauseIndexBuilds(primary);
+
+ const tookActionCountBefore = secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction;
+
+ jsTestLog("Waiting for index build to start on secondary");
+ const hangAfterInitFailPoint =
+ configureFailPoint(secondaryDB, 'hangAfterInitializingIndexBuild');
+ const createIdx = IndexBuildTest.startIndexBuild(
+ primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.IndexBuildAborted]);
+ IndexBuildTest.waitForIndexBuildToStart(secondaryDB, secondaryColl.getName(), 'a_1');
+
+ // Ensure the index build is in an abortable state before the DiskSpaceMonitor runs.
+ hangAfterInitFailPoint.wait();
+
+ // Default indexBuildMinAvailableDiskSpaceMB is 500 MB.
+ // Simulate a remaining disk space of 450MB on the secondary node.
+ const simulateDiskSpaceFp =
+ configureFailPoint(secondaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024});
+
+ jsTestLog("Waiting for the disk space monitor to take action on secondary");
+ assert.soon(() => {
+ return secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction >
+ tookActionCountBefore;
+ });
+ IndexBuildTest.resumeIndexBuilds(primary);
+
+ jsTestLog("Waiting for the index build to be killed");
+ // "Index build: joined after abort".
+ checkLog.containsJson(secondary, 20655);
+
+ jsTestLog("Waiting for threads to join");
+ createIdx();
+
+ // Confirm that the error message returned by the createIndexes command describes the secondary
+ // running out of disk space, rather than a generic "operation was interrupted" message.
+ // We use the log message as a proxy for the error message that is returned by createIndexes.
+ checkLog.contains(
+ primary,
+ new RegExp(
+ "20655.*Index build: joined after abort.*IndexBuildAborted.*'voteAbortIndexBuild' received from.*: available disk space of.*bytes is less than required minimum of"));
+
+ simulateDiskSpaceFp.off();
+
+ // "Index build: aborted due to insufficient disk space"
+ checkLog.containsJson(secondaryDB, 7333601);
+
+ // Disable failpoint only after we know the build is aborted. We want the build to be aborted
+ // before it has voted for commit, and this ensures that is the case.
+ hangAfterInitFailPoint.off();
+
+ assert.eq(primaryKilledDueToDiskSpaceBefore,
+ primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace);
+ assert.eq(secondaryKilledDueToDiskSpaceBefore + 1,
+ secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace);
+
+ rst.awaitReplication();
+ IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_']);
+ IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']);
+}
+
+function killAfterVoteCommitFails(rst) {
+ jsTestLog(
+ "Index build in a secondary cannot killed by the DiskSpaceMonitor after it has voted for commit");
+
+ const dbName = 'test';
+ const collName = 'coll';
+ const primary = rst.getPrimary();
+ const primaryDB = primary.getDB(dbName);
+ const primaryColl = primaryDB.getCollection(collName);
+
+ primaryColl.drop();
+ assert.commandWorked(primaryColl.insert({a: 1}));
+
+ rst.awaitReplication();
+
+ const secondary = rst.getSecondary();
+ const secondaryDB = secondary.getDB(dbName);
+ const secondaryColl = secondaryDB.getCollection(collName);
+
+ const primaryKilledDueToDiskSpaceBefore =
+ primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace;
+ const secondaryKilledDueToDiskSpaceBefore =
+ secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace;
+
+ // Pause the index build on the primary after it replicates the startIndexBuild oplog entry,
+ // effectively pausing the index build on the secondary too as it will wait for the primary to
+ // commit or abort.
+ IndexBuildTest.pauseIndexBuilds(primary);
+
+ const tookActionCountBefore = secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction;
+
+ jsTestLog("Waiting for index build to start on secondary");
+ const hangAfterVoteCommit =
+ configureFailPoint(secondaryDB, 'hangIndexBuildAfterSignalPrimaryForCommitReadiness');
+ const createIdx =
+ IndexBuildTest.startIndexBuild(primary, primaryColl.getFullName(), {a: 1}, null);
+ IndexBuildTest.waitForIndexBuildToStart(secondaryDB, secondaryColl.getName(), 'a_1');
+
+ // Ensure the index build is in an abortable state before the DiskSpaceMonitor runs.
+ hangAfterVoteCommit.wait();
+
+ // Default indexBuildMinAvailableDiskSpaceMB is 500 MB.
+ // Simulate a remaining disk space of 450MB on the secondary node.
+ const simulateDiskSpaceFp =
+ configureFailPoint(secondaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024});
+
+ jsTestLog("Waiting for the disk space monitor to take action on secondary");
+ assert.soon(() => {
+ return secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction >
+ tookActionCountBefore;
+ });
+ IndexBuildTest.resumeIndexBuilds(primary);
+
+ jsTestLog("Waiting for the index build kill attempt to fail");
+ // "Index build: cannot force abort".
+ checkLog.containsJson(secondary, 7617000);
+
+ // Disable failpoint only after the abort attempt.
+ hangAfterVoteCommit.off();
+
+ jsTestLog("Waiting for threads to join");
+ createIdx();
+ simulateDiskSpaceFp.off();
+
+ assert.eq(primaryKilledDueToDiskSpaceBefore,
+ primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace);
+ assert.eq(secondaryKilledDueToDiskSpaceBefore,
+ secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace);
+
+ rst.awaitReplication();
+ IndexBuildTest.assertIndexes(primaryColl, 2, ['_id_', 'a_1']);
+ IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']);
+}
+
const rst = new ReplSetTest({
nodes: [
{},
@@ -27,62 +188,8 @@ const rst = new ReplSetTest({
rst.startSet();
rst.initiate();
-const dbName = 'test';
-const collName = 'coll';
-const primary = rst.getPrimary();
-const primaryDB = primary.getDB(dbName);
-const primaryColl = primaryDB.getCollection(collName);
-
-assert.commandWorked(primaryColl.insert({a: 1}));
-
-rst.awaitReplication();
-
-const secondary = rst.getSecondary();
-const secondaryDB = secondary.getDB(dbName);
-const secondaryColl = secondaryDB.getCollection(collName);
-
-// Pause the index build on the primary after it replicates the startIndexBuild oplog entry,
-// effectively pausing the index build on the secondary too as it will wait for the primary to
-// commit or abort.
-IndexBuildTest.pauseIndexBuilds(primary);
-
-const tookActionCountBefore = secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction;
-
-jsTestLog("Waiting for index build to start on secondary");
-const hangAfterInitFailPoint = configureFailPoint(secondaryDB, 'hangAfterInitializingIndexBuild');
-const createIdx = IndexBuildTest.startIndexBuild(
- primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.IndexBuildAborted]);
-IndexBuildTest.waitForIndexBuildToStart(secondaryDB, secondaryColl.getName(), 'a_1');
-
-// Ensure the index build is in an abortable state before the DiskSpaceMonitor runs.
-hangAfterInitFailPoint.wait();
-hangAfterInitFailPoint.off();
-
-// Default indexBuildMinAvailableDiskSpaceMB is 500 MB.
-// Simulate a remaining disk space of 450MB on the secondary node.
-const simulateDiskSpaceFp =
- configureFailPoint(secondaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024});
-
-jsTestLog("Waiting for the disk space monitor to take action on secondary");
-assert.soon(() => {
- return secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction > tookActionCountBefore;
-});
-IndexBuildTest.resumeIndexBuilds(primary);
-
-jsTestLog("Waiting for the index build to be killed");
-// "Index build: joined after abort".
-checkLog.containsJson(secondary, 20655);
-
-jsTestLog("Waiting for threads to join");
-createIdx();
-simulateDiskSpaceFp.off();
-
-assert.eq(0, primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace);
-assert.eq(1, secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace);
-
-rst.awaitReplication();
-IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_']);
-IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']);
+killBeforeVoteCommitSucceeds(rst);
+killAfterVoteCommitFails(rst);
rst.stopSet();
})();
diff --git a/jstests/noPassthrough/index_build_killop_primary.js b/jstests/noPassthrough/index_build_killop_primary.js
new file mode 100644
index 0000000000000..d86fa92abaf43
--- /dev/null
+++ b/jstests/noPassthrough/index_build_killop_primary.js
@@ -0,0 +1,103 @@
+/**
+ * Confirms that background index builds on a primary can be aborted using killop.
+ * @tags: [
+ * requires_replication,
+ * ]
+ */
+(function() {
+"use strict";
+
+load('jstests/noPassthrough/libs/index_build.js');
+load("jstests/libs/fail_point_util.js");
+
+function killopOnFailpoint(rst, failpointName, collName) {
+ const primary = rst.getPrimary();
+ const testDB = primary.getDB('test');
+ const coll = testDB.getCollection(collName);
+
+ assert.commandWorked(coll.insert({a: 1}));
+
+ const fp = configureFailPoint(testDB, failpointName);
+ // Pausing is only required to obtain the opId, as the target failpoint will block the build at
+ // the location where we want the index build to be killed.
+ IndexBuildTest.pauseIndexBuilds(primary);
+
+ const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1});
+
+ // When the index build starts, find its op id.
+ const opId = IndexBuildTest.waitForIndexBuildToScanCollection(testDB, coll.getName(), 'a_1');
+
+ IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId, (op) => {
+ jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op));
+ assert.eq(
+ undefined,
+ op.connectionId,
+ 'Was expecting IndexBuildsCoordinator op; found db.currentOp() for connection thread instead: ' +
+ tojson(op));
+ assert.eq(
+ coll.getFullName(),
+ op.ns,
+ 'Unexpected ns field value in db.currentOp() result for index build: ' + tojson(op));
+ });
+
+ // Once we have the opId, we can resume index builds (the target failpoint will block it at the
+ // desired location).
+ IndexBuildTest.resumeIndexBuilds(primary);
+
+ // Index build should be present in the config.system.indexBuilds collection.
+ const indexMap =
+ IndexBuildTest.assertIndexes(coll, 2, ["_id_"], ["a_1"], {includeBuildUUIDs: true});
+ const indexBuildUUID = indexMap['a_1'].buildUUID;
+ assert(primary.getCollection('config.system.indexBuilds').findOne({_id: indexBuildUUID}));
+
+ // Kill the index builder thread.
+ fp.wait();
+ assert.commandWorked(testDB.killOp(opId));
+ fp.off();
+
+ const exitCode = createIdx({checkExitSuccess: false});
+ assert.neq(
+ 0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
+
+ // Check that no new index has been created. This verifies that the index build was aborted
+ // rather than successfully completed.
+ IndexBuildTest.assertIndexesSoon(coll, 1, ['_id_']);
+
+ const cmdNs = testDB.getCollection('$cmd').getFullName();
+ let ops = rst.dumpOplog(primary, {op: 'c', ns: cmdNs, 'o.startIndexBuild': coll.getName()});
+ assert.eq(1, ops.length, 'incorrect number of startIndexBuild oplog entries: ' + tojson(ops));
+ ops = rst.dumpOplog(primary, {op: 'c', ns: cmdNs, 'o.abortIndexBuild': coll.getName()});
+ assert.eq(1, ops.length, 'incorrect number of abortIndexBuild oplog entries: ' + tojson(ops));
+ ops = rst.dumpOplog(primary, {op: 'c', ns: cmdNs, 'o.commitIndexBuild': coll.getName()});
+ assert.eq(0, ops.length, 'incorrect number of commitIndexBuild oplog entries: ' + tojson(ops));
+
+ // Index build should be removed from the config.system.indexBuilds collection.
+ assert.isnull(
+ primary.getCollection('config.system.indexBuilds').findOne({_id: indexBuildUUID}));
+}
+
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
+ },
+ },
+ ]
+});
+rst.startSet();
+rst.initiate();
+
+// Kill the build before it has voted for commit.
+jsTestLog("killOp index build on primary before vote for commit readiness");
+killopOnFailpoint(rst, 'hangAfterIndexBuildFirstDrain', 'beforeVoteCommit');
+
+// Kill the build after it has voted for commit.
+jsTestLog("killOp index build on primary after vote for commit readiness");
+killopOnFailpoint(rst, 'hangIndexBuildAfterSignalPrimaryForCommitReadiness', 'afterVoteCommit');
+
+rst.stopSet();
+})();
diff --git a/jstests/noPassthrough/index_build_killop_secondary_after_commit.js b/jstests/noPassthrough/index_build_killop_secondary_after_commit.js
new file mode 100644
index 0000000000000..fdc414861abdf
--- /dev/null
+++ b/jstests/noPassthrough/index_build_killop_secondary_after_commit.js
@@ -0,0 +1,108 @@
+/**
+ * Confirms that aborting a background index builds on a secondary does not leave node in an
+ * inconsistent state.
+ * @tags: [
+ * requires_replication,
+ * ]
+ */
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
+load("jstests/libs/log.js"); // for checkLog
+load('jstests/noPassthrough/libs/index_build.js');
+
+// This test triggers an unclean shutdown (an fassert), which may cause inaccurate fast counts.
+TestData.skipEnforceFastCountOnValidate = true;
+
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary. This allows the primary to commit without waiting
+ // for the secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
+ },
+ slowms: 30000, // Don't log slow operations on secondary. See SERVER-44821.
+ },
+ {
+ // The arbiter prevents the primary from stepping down due to lack of majority in the
+ // case where the secondary is restarting due to the (expected) unclean shutdown. Note
+ // that the arbiter doesn't participate in the commitQuorum.
+ rsConfig: {
+ arbiterOnly: true,
+ },
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
+
+assert.commandWorked(coll.insert({a: 1}));
+
+let secondary = rst.getSecondary();
+IndexBuildTest.pauseIndexBuilds(secondary);
+
+const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1});
+
+// When the index build starts, find its op id.
+let secondaryDB = secondary.getDB(testDB.getName());
+const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB);
+
+IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId, (op) => {
+ jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op));
+ assert.eq(coll.getFullName(),
+ op.ns,
+ 'Unexpected ns field value in db.currentOp() result for index build: ' + tojson(op));
+});
+
+// Wait for the primary to complete the index build and replicate a commit oplog entry.
+// "Index build: completed successfully"
+checkLog.containsJson(primary, 20663);
+
+// Kill the index build.
+assert.commandWorked(secondaryDB.killOp(opId));
+
+const gracefulIndexBuildFlag = FeatureFlagUtil.isEnabled(testDB, "IndexBuildGracefulErrorHandling");
+if (!gracefulIndexBuildFlag) {
+ // We expect this to crash the secondary because this error is not recoverable
+ assert.soon(function() {
+ return rawMongoProgramOutput().search(/Fatal assertion.*(51101)/) >= 0;
+ });
+} else {
+ // Expect the secondary to crash. Depending on timing, this can be either because the secondary
+ // was waiting for a primary abort when a 'commitIndexBuild' is applied, or because the build
+ // fails and tries to request an abort while a 'commitIndexBuild' is being applied.
+ assert.soon(function() {
+ return rawMongoProgramOutput().search(/Fatal assertion.*(7329403|7329407)/) >= 0;
+ });
+}
+
+// After restarting the secondary, expect that the index build completes successfully.
+rst.stop(secondary.nodeId, undefined, {forRestart: true, allowedExitCode: MongoRunner.EXIT_ABORT});
+rst.start(secondary.nodeId, undefined, true /* restart */);
+
+secondary = rst.getSecondary();
+secondaryDB = secondary.getDB(testDB.getName());
+
+// Wait for the restarted secondary node to reach SECONDARY state again.
+rst.waitForState(secondary, ReplSetTest.State.SECONDARY);
+
+// Wait for the index build to complete on all nodes.
+rst.awaitReplication();
+
+// Expect successful createIndex command invocation in parallel shell. A new index should be present
+// on the primary and secondary.
+createIdx();
+
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']);
+
+// Check that index was created on the secondary despite the attempted killOp().
+const secondaryColl = secondaryDB.getCollection(coll.getName());
+IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']);
+
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/index_build_killop_secondary_before_commit.js b/jstests/noPassthrough/index_build_killop_secondary_before_commit.js
index acd23431959ce..d9b9231145c05 100644
--- a/jstests/noPassthrough/index_build_killop_secondary_before_commit.js
+++ b/jstests/noPassthrough/index_build_killop_secondary_before_commit.js
@@ -1,18 +1,100 @@
/**
- * Sends a killop to an index build on a secondary node before it commits and confirms that the
- * index build is canceled on all nodes.
+ * Sends a killop to an index build on a secondary node before it commits and confirms that:
+ * - the index build is canceled on all nodes if killop is before voting for commit.
+ * - the killop results in the secondary crashing if the killop is after voting for commit.
*
* @tags: [
- * featureFlagIndexBuildGracefulErrorHandling,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js");
load('jstests/noPassthrough/libs/index_build.js');
+TestData.skipEnforceFastCountOnValidate = true;
+
+function killopIndexBuildOnSecondaryOnFailpoint(rst, failpointName, shouldSucceed) {
+ const primary = rst.getPrimary();
+ const testDB = primary.getDB('test');
+ const coll = testDB.getCollection('test');
+ let secondary = rst.getSecondary();
+ let secondaryDB = secondary.getDB(testDB.getName());
+
+ coll.drop();
+ assert.commandWorked(coll.insert({a: 1}));
+
+ // Pause the index build on the primary so that it does not commit.
+ IndexBuildTest.pauseIndexBuilds(primary);
+ IndexBuildTest.pauseIndexBuilds(secondary);
+
+ let expectedErrors = shouldSucceed ? ErrorCodes.IndexBuildAborted : [];
+
+ const fp = configureFailPoint(secondary, failpointName);
+ const createIdx =
+ IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {}, expectedErrors);
+
+ // When the index build starts, find its op id.
+ const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB);
+
+ IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId, (op) => {
+ jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op));
+ assert.eq(
+ coll.getFullName(),
+ op.ns,
+ 'Unexpected ns field value in db.currentOp() result for index build: ' + tojson(op));
+ });
+
+ // Resume index build to the desired failpoint, and kill it.
+ IndexBuildTest.resumeIndexBuilds(secondary);
+ fp.wait();
+ assert.commandWorked(secondaryDB.killOp(opId));
+ fp.off();
+
+ if (shouldSucceed) {
+ // "attempting to abort index build".
+ checkLog.containsJson(primary, 4656010);
+
+ IndexBuildTest.resumeIndexBuilds(primary);
+ // "Index build: joined after abort".
+ checkLog.containsJson(primary, 20655);
+
+ // Wait for the index build abort to replicate.
+ rst.awaitReplication();
+
+ // Expect the index build to fail and for the index to not exist on either node.
+ createIdx();
+
+ IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
+
+ const secondaryColl = secondaryDB.getCollection(coll.getName());
+ IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']);
+ } else {
+ // We expect this to crash the secondary because this error is not recoverable.
+ assert.soon(function() {
+ return rawMongoProgramOutput().search(/Fatal assertion.*(51101)/) >= 0;
+ });
+
+ // After restarting the secondary, expect that the index build completes successfully.
+ rst.stop(secondary.nodeId,
+ undefined,
+ {forRestart: true, allowedExitCode: MongoRunner.EXIT_ABORT});
+ rst.start(secondary.nodeId, undefined, true /* restart */);
+
+ secondary = rst.getSecondary();
+ secondaryDB = secondary.getDB(testDB.getName());
+
+ IndexBuildTest.resumeIndexBuilds(primary);
+ // Expect the index build to succeed.
+ createIdx();
+
+ // Wait for the index build commit to replicate.
+ rst.awaitReplication();
+ IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']);
+
+ const secondaryColl = secondaryDB.getCollection(coll.getName());
+ IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']);
+ }
+}
+
const rst = new ReplSetTest({
nodes: [
{},
@@ -23,59 +105,26 @@ const rst = new ReplSetTest({
},
slowms: 30000, // Don't log slow operations on secondary. See SERVER-44821.
},
+ {
+ // The arbiter prevents the primary from stepping down due to lack of majority in the
+ // case where the secondary is restarting due to the (expected) unclean shutdown. Note
+ // that the arbiter doesn't participate in the commitQuorum.
+ rsConfig: {
+ arbiterOnly: true,
+ },
+ },
]
});
rst.startSet();
rst.initiate();
-const primary = rst.getPrimary();
-const testDB = primary.getDB('test');
-const coll = testDB.getCollection('test');
-
-assert.commandWorked(coll.insert({a: 1}));
-
-// Pause the index build on the primary so that it does not commit.
-IndexBuildTest.pauseIndexBuilds(primary);
-
-const secondary = rst.getSecondary();
-IndexBuildTest.pauseIndexBuilds(secondary);
-
-const createIdx = IndexBuildTest.startIndexBuild(
- primary, coll.getFullName(), {a: 1}, {}, ErrorCodes.IndexBuildAborted);
-
-// When the index build starts, find its op id.
-const secondaryDB = secondary.getDB(testDB.getName());
-const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB);
-
-IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId, (op) => {
- jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op));
- assert.eq(coll.getFullName(),
- op.ns,
- 'Unexpected ns field value in db.currentOp() result for index build: ' + tojson(op));
-});
-
-// Kill the index build.
-assert.commandWorked(secondaryDB.killOp(opId));
-
-// Resume index build, allowing it to cancel.
-IndexBuildTest.resumeIndexBuilds(secondary);
-// "attempting to abort index build".
-checkLog.containsJson(primary, 4656010);
-
-IndexBuildTest.resumeIndexBuilds(primary);
-// "Index build: joined after abort".
-checkLog.containsJson(primary, 20655);
-
-// Wait for the index build abort to replicate.
-rst.awaitReplication();
-
-// Expect the index build to fail and for the index to not exist on either node.
-createIdx();
-
-IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
+// Kill the build before it has voted for commit.
+jsTestLog("killOp index build on secondary before vote for commit readiness");
+killopIndexBuildOnSecondaryOnFailpoint(
+ rst, 'hangAfterIndexBuildFirstDrain', /*shouldSucceed*/ true);
-const secondaryColl = secondaryDB.getCollection(coll.getName());
-IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']);
+jsTestLog("killOp index build on secondary after vote for commit readiness");
+killopIndexBuildOnSecondaryOnFailpoint(
+ rst, 'hangIndexBuildAfterSignalPrimaryForCommitReadiness', /*shouldSucceed*/ false);
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/index_build_operation_metrics.js b/jstests/noPassthrough/index_build_operation_metrics.js
index 75ee9e331fb45..49b1f580e1e3f 100644
--- a/jstests/noPassthrough/index_build_operation_metrics.js
+++ b/jstests/noPassthrough/index_build_operation_metrics.js
@@ -285,7 +285,7 @@ assert.commandWorked(primaryDB[collName].dropIndex({a: 1}));
assert(!metrics[dbName]);
});
- // Ensure the index was actually built. Do this after checking metrics because the helper calls
+ // Ensure the index was not built. Do this after checking metrics because the helper calls
// listIndexes which contributes to metrics.
IndexBuildTest.assertIndexes(primaryDB[collName], 1, ['_id_']);
IndexBuildTest.assertIndexes(secondaryDB[collName], 1, ['_id_']);
@@ -385,4 +385,4 @@ assert.commandWorked(primaryDB[collName].dropIndex({a: 1}));
IndexBuildTest.assertIndexes(secondaryDB[collName], 2, ['_id_', 'a_1']);
})();
rst.stopSet();
-}());
\ No newline at end of file
+}());
diff --git a/jstests/noPassthrough/index_build_out_of_order_scan.js b/jstests/noPassthrough/index_build_out_of_order_scan.js
new file mode 100644
index 0000000000000..ebc2d1dbf0017
--- /dev/null
+++ b/jstests/noPassthrough/index_build_out_of_order_scan.js
@@ -0,0 +1,76 @@
+/**
+ * Ensures that index builds encountering a DataCorruptionDetected error log and increment a metric.
+ *
+ * @tags: [
+ * requires_fcv_71,
+ * requires_replication,
+ * ]
+ */
+(function() {
+"use strict";
+
+load('jstests/noPassthrough/libs/index_build.js');
+load("jstests/libs/fail_point_util.js");
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const dbName = 'test';
+const collName = 'coll';
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB.getCollection(collName);
+
+assert.commandWorked(primaryColl.insert({a: 1}));
+
+rst.awaitReplication();
+
+const hangAfterInitializingIndexBuild =
+ configureFailPoint(primary, "hangAfterInitializingIndexBuild");
+const createIdx = IndexBuildTest.startIndexBuild(
+ primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.DataCorruptionDetected]);
+
+const buildUUID =
+ IndexBuildTest
+ .assertIndexesSoon(primaryColl, 2, ['_id_'], ['a_1'], {includeBuildUUIDs: true})['a_1']
+ .buildUUID;
+
+hangAfterInitializingIndexBuild.wait();
+const WTRecordStoreUassertOutOfOrder =
+ configureFailPoint(primary, "WTRecordStoreUassertOutOfOrder");
+const hangBeforeAbort =
+ configureFailPoint(primary, "hangIndexBuildBeforeTransitioningReplStateTokAwaitPrimaryAbort");
+hangAfterInitializingIndexBuild.off();
+
+hangBeforeAbort.wait();
+
+// Get collection UUID.
+const collInfos = primaryDB.getCollectionInfos({name: primaryColl.getName()});
+assert.eq(collInfos.length, 1, collInfos);
+const collUUID = collInfos[0].info.uuid;
+
+// Index build: data corruption detected.
+checkLog.containsJson(primary, 7333600, {
+ buildUUID: function(uuid) {
+ return uuid && uuid["uuid"]["$uuid"] === extractUUIDFromObject(buildUUID);
+ },
+ db: primaryDB.getName(),
+ collectionUUID: function(uuid) {
+ jsTestLog(collUUID);
+ return uuid && uuid["uuid"]["$uuid"] === extractUUIDFromObject(collUUID);
+ }
+});
+assert.eq(1, primaryDB.serverStatus().indexBuilds.failedDueToDataCorruption);
+
+// Disable out-of-order failpoint so clean-up can succeed.
+WTRecordStoreUassertOutOfOrder.off();
+hangBeforeAbort.off();
+
+jsTestLog("Waiting for threads to join");
+createIdx();
+
+IndexBuildTest.assertIndexesSoon(primaryColl, 1, ['_id_']);
+
+rst.stopSet();
+})();
diff --git a/jstests/noPassthrough/index_build_stepdown_dropCollection_during_early_setup.js b/jstests/noPassthrough/index_build_stepdown_dropCollection_during_early_setup.js
new file mode 100644
index 0000000000000..778bfa89e0533
--- /dev/null
+++ b/jstests/noPassthrough/index_build_stepdown_dropCollection_during_early_setup.js
@@ -0,0 +1,79 @@
+/**
+ * Starts an index build, steps down the primary before the index build has completed its setup (and
+ * made other replicas aware of the index build), and drop the collection the index is being built
+ * on. This exercises a path described in SERVER-77025 whereby applying a DDL operation (like
+ * dropCollection) on the secondary conflicts with the ongoing index build. This test confirms that
+ * replication waits until the index build is not present anymore, and then retries dropCollection
+ * and succeeds.
+ *
+ * @tags: [
+ * requires_replication,
+ * ]
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/fail_point_util.js"); // For "configureFailPoint()"
+load("jstests/libs/parallelTester.js"); // For "startParallelShell()"
+load("jstests/noPassthrough/libs/index_build.js"); // For "IndexBuildTest"
+
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB("test");
+const primaryColl = primaryDB.getCollection("coll");
+assert.commandWorked(primaryDB.setLogLevel(1, "replication"));
+
+assert.commandWorked(primaryColl.insert({_id: 1, a: 1}));
+rst.awaitReplication();
+
+// Enable fail point which makes index build hang during setup, simulating a condition where the
+// index build is registered, but not yet replicated.
+const fp = configureFailPoint(primary, "hangIndexBuildOnSetupBeforeTakingLocks");
+
+const waitForIndexBuildToErrorOut = IndexBuildTest.startIndexBuild(
+ primary, primaryColl.getFullName(), {a: 1}, {}, [ErrorCodes.InterruptedDueToReplStateChange]);
+
+fp.wait();
+
+// Step down the node, while the index build is set up in memory but the "startIndexBuild" entry
+// hasn't replicated.
+assert.commandWorked(primaryDB.adminCommand({"replSetStepDown": 5 * 60, "force": true}));
+
+rst.waitForPrimary();
+
+// Drop the collection on the new primary. The new primary is not aware of the index build, because
+// the old primary hadn't been able to replicate the "startIndexBuild" oplog entry.
+const waitForDropCollection = startParallelShell(function() {
+ db.getCollection("coll").drop();
+}, rst.getPrimary().port);
+
+// Confirm that the old primary, now secondary waits until the index build is not in progress any
+// longer before retrying the drop.
+// "Waiting for index build(s) to complete on the namespace before retrying the conflicting
+// operation"
+assert.soon(() => checkLog.checkContainsOnceJson(rst.getSecondary(), 7702500));
+
+// Resume the index build so it can fail due to InterruptedDueToReplStateChange.
+fp.off();
+
+// Confirm that the old primary, now secondary can retry the dropCollection.
+// "Acceptable error during oplog application: background operation in progress for namespace"
+assert.soon(() => checkLog.checkContainsOnceJson(rst.getSecondary(), 51775));
+
+// dropCollection now succeeds, and the command completes on the primary.
+waitForDropCollection();
+
+rst.awaitReplication();
+
+// The index build fails with InterruptedDueToReplStateChange.
+waitForIndexBuildToErrorOut();
+
+// Collection doesn't exist.
+assert(!rst.getPrimary().getDB("test").getCollectionNames().includes("coll"));
+assert(!rst.getSecondary().getDB("test").getCollectionNames().includes("coll"));
+
+rst.stopSet();
+})();
diff --git a/jstests/noPassthrough/index_build_stepdown_during_async_stepup.js b/jstests/noPassthrough/index_build_stepdown_during_async_stepup.js
new file mode 100644
index 0000000000000..d5d76ca04bdbc
--- /dev/null
+++ b/jstests/noPassthrough/index_build_stepdown_during_async_stepup.js
@@ -0,0 +1,80 @@
+/**
+ * Verifies that the index build step-up async task handles a stepdown gracefully.
+ *
+ * @tags: [
+ * requires_fcv_71,
+ * requires_replication,
+ * ]
+ */
+(function() {
+"use strict";
+
+load('jstests/noPassthrough/libs/index_build.js');
+load("jstests/libs/fail_point_util.js");
+
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+
+const dbName = 'test';
+const collName = 'coll';
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB.getCollection(collName);
+
+assert.commandWorked(primaryColl.insert({a: 1}));
+
+rst.awaitReplication();
+
+const secondary = rst.getSecondary();
+
+const hangAfterIndexBuildDumpsInsertsFromBulk =
+ configureFailPoint(primary, 'hangAfterIndexBuildDumpsInsertsFromBulk');
+const hangOnStepUpAsyncTaskBeforeCheckingCommitQuorum =
+ configureFailPoint(secondary, 'hangOnStepUpAsyncTaskBeforeCheckingCommitQuorum');
+
+const waitForIndexBuildToComplete = IndexBuildTest.startIndexBuild(
+ primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.InterruptedDueToReplStateChange]);
+
+// Wait for the primary to start the index build.
+hangAfterIndexBuildDumpsInsertsFromBulk.wait();
+
+assert.commandWorked(primary.adminCommand({replSetStepDown: 60, force: true}));
+
+// The old secondary is now stepping up and checking the active index builds.
+// "IndexBuildsCoordinator-StepUp [..] Active index builds"
+hangOnStepUpAsyncTaskBeforeCheckingCommitQuorum.wait();
+checkLog.containsJson(secondary, 20650);
+
+// Step down the new primary.
+const waitForStepDown = startParallelShell(() => {
+ assert.commandWorked(db.adminCommand({replSetStepDown: 60 * 60, force: true}));
+}, secondary.port);
+
+// Wait for the RstlKillOpThread to run again. It first ran when the secondary stepped up (earlier
+// in this test case), and it's running now when it's stepping down again.
+assert.soon(() => checkLog.checkContainsWithCountJson(secondary, 21343, {}, 2));
+
+// Wait for the step-up task to be marked as killPending by the RstlKillOpThread.
+assert.soon(() => {
+ return 1 ===
+ secondary.getDB('test')
+ .currentOp({desc: 'IndexBuildsCoordinator-StepUp', killPending: true})['inprog']
+ .length;
+});
+
+// Turn off the failpoints. Allow the createIndexes command to return
+// InterruptedDueToReplStateChange due to stepdown, the stepped-up secondary to complete the new
+// stepdown, and the index build to succeed.
+hangOnStepUpAsyncTaskBeforeCheckingCommitQuorum.off();
+hangAfterIndexBuildDumpsInsertsFromBulk.off();
+waitForIndexBuildToComplete();
+waitForStepDown();
+
+IndexBuildTest.assertIndexesSoon(
+ rst.getPrimary().getDB(dbName).getCollection(collName), 2, ['_id_', 'a_1']);
+IndexBuildTest.assertIndexesSoon(
+ rst.getSecondary().getDB(dbName).getCollection(collName), 2, ['_id_', 'a_1']);
+
+rst.stopSet();
+})();
diff --git a/jstests/noPassthrough/index_build_vote_abort_while_vote_commit.js b/jstests/noPassthrough/index_build_vote_abort_while_vote_commit.js
index 34d02c9d61a51..a7689f083d6fc 100644
--- a/jstests/noPassthrough/index_build_vote_abort_while_vote_commit.js
+++ b/jstests/noPassthrough/index_build_vote_abort_while_vote_commit.js
@@ -1,9 +1,8 @@
/**
- * Ensures that index builds can safely be aborted, for instance by the DiskSpaceMonitor, while a
- * voteCommitIndexBuild is in progress.
+ * Ensures that index builds cannot be aborted after voting for commit.
*
* @tags: [
- * featureFlagIndexBuildGracefulErrorHandling,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
@@ -45,7 +44,7 @@ const secondaryColl = secondaryDB.getCollection(collName);
// effectively pausing the index build on the secondary too as it will wait for the primary to
// commit or abort.
IndexBuildTest.pauseIndexBuilds(primary);
-const hangVoteCommit = configureFailPoint(primary, 'hangBeforeVoteCommitIndexBuild');
+const hangBeforeVoteCommit = configureFailPoint(primary, 'hangBeforeVoteCommitIndexBuild');
const tookActionCountBefore = secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction;
@@ -55,7 +54,7 @@ const createIdx = IndexBuildTest.startIndexBuild(
IndexBuildTest.waitForIndexBuildToStart(secondaryDB, secondaryColl.getName(), 'a_1');
// Wait until secondary is voting for commit.
-hangVoteCommit.wait();
+hangBeforeVoteCommit.wait();
// Default indexBuildMinAvailableDiskSpaceMB is 500 MB.
// Simulate a remaining disk space of 450MB on the secondary node.
@@ -68,20 +67,20 @@ assert.soon(() => {
});
IndexBuildTest.resumeIndexBuilds(primary);
-jsTestLog("Waiting for the index build to be killed");
-// "Index build: joined after abort".
-checkLog.containsJson(secondary, 20655);
+jsTestLog("Waiting for the index build kill attempt to fail");
+// "Index build: cannot force abort".
+checkLog.containsJson(secondary, 7617000);
+hangBeforeVoteCommit.off();
jsTestLog("Waiting for threads to join");
createIdx();
simulateDiskSpaceFp.off();
assert.eq(0, primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace);
-assert.eq(1, secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace);
+assert.eq(0, secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace);
-rst.awaitReplication();
-IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_']);
-IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']);
+IndexBuildTest.assertIndexesSoon(primaryColl, 2, ['_id_', 'a_1']);
+IndexBuildTest.assertIndexesSoon(secondaryColl, 2, ['_id_', 'a_1']);
rst.stopSet();
})();
diff --git a/jstests/noPassthrough/index_build_yield_bulk_load.js b/jstests/noPassthrough/index_build_yield_bulk_load.js
index 4d2e1bd4150ba..12fc4997162c2 100644
--- a/jstests/noPassthrough/index_build_yield_bulk_load.js
+++ b/jstests/noPassthrough/index_build_yield_bulk_load.js
@@ -58,4 +58,4 @@ awaitIndex();
awaitDrop();
MongoRunner.stopMongod(conn);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/index_build_yield_prepare_conflicts.js b/jstests/noPassthrough/index_build_yield_prepare_conflicts.js
index e28fae3d36a68..2881ae145315e 100644
--- a/jstests/noPassthrough/index_build_yield_prepare_conflicts.js
+++ b/jstests/noPassthrough/index_build_yield_prepare_conflicts.js
@@ -74,4 +74,4 @@ session.abortTransaction_forTesting();
awaitIndex();
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/index_commit_currentop_slow.js b/jstests/noPassthrough/index_commit_currentop_slow.js
index 1daea5a189167..d3db6d352ecfe 100644
--- a/jstests/noPassthrough/index_commit_currentop_slow.js
+++ b/jstests/noPassthrough/index_commit_currentop_slow.js
@@ -35,15 +35,13 @@ assert.commandWorked(coll.insert({a: 1}));
const secondary = rst.getSecondary();
IndexBuildTest.pauseIndexBuilds(secondary);
-const createIdx =
- IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true});
+const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1});
// Wait for secondary to start processing commitIndexBuild oplog entry from the primary.
const secondaryDB = secondary.getDB(testDB.getName());
assert.soon(function() {
const filter = {
'command.commitIndexBuild': {$exists: true},
- 'waitingForLatch.captureName': 'AnonymousLockable',
'$all': true,
};
const result = assert.commandWorked(secondaryDB.currentOp(filter));
diff --git a/jstests/noPassthrough/index_downgrade_fcv.js b/jstests/noPassthrough/index_downgrade_fcv.js
deleted file mode 100644
index aba8fddf284f0..0000000000000
--- a/jstests/noPassthrough/index_downgrade_fcv.js
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * If a user attempts to downgrade the server while there is an index build in progress, the
- * downgrade should succeed without blocking.
- * @tags: [
- * requires_replication,
- * ]
- */
-(function() {
-"use strict";
-
-load('jstests/noPassthrough/libs/index_build.js');
-
-const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- },
- ]
-});
-const nodes = rst.startSet();
-rst.initiate();
-
-const primary = rst.getPrimary();
-const testDB = primary.getDB('test');
-const coll = testDB.getCollection('test');
-
-assert.commandWorked(coll.insert({a: 1}));
-
-IndexBuildTest.pauseIndexBuilds(primary);
-
-const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1});
-IndexBuildTest.waitForIndexBuildToScanCollection(testDB, coll.getName(), 'a_1');
-
-// Downgrade the primary using the setFeatureCompatibilityVersion command.
-try {
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
-} finally {
- IndexBuildTest.resumeIndexBuilds(primary);
-}
-
-IndexBuildTest.waitForIndexBuildToStop(testDB);
-
-createIdx();
-
-IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']);
-
-// This confirms that the downgrade command will complete successfully after the index build has
-// completed.
-assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
-
-rst.stopSet();
-})();
diff --git a/jstests/noPassthrough/index_drop_before_running.js b/jstests/noPassthrough/index_drop_before_running.js
index 57ec6a906bde9..a2af906258edb 100644
--- a/jstests/noPassthrough/index_drop_before_running.js
+++ b/jstests/noPassthrough/index_drop_before_running.js
@@ -2,7 +2,7 @@
* Test aborting an index build after setup but before transitioning to in-progress.
*
* @tags: [
- * featureFlagIndexBuildGracefulErrorHandling,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/index_killop_standalone.js b/jstests/noPassthrough/index_killop_standalone.js
index be4a3aff1e15a..33e24e332c679 100644
--- a/jstests/noPassthrough/index_killop_standalone.js
+++ b/jstests/noPassthrough/index_killop_standalone.js
@@ -1,5 +1,5 @@
/**
- * Confirms that both foreground and background index builds can be aborted using killop.
+ * Confirms that index builds can be aborted using killop.
*/
(function() {
"use strict";
@@ -14,11 +14,11 @@ assert.commandWorked(testDB.dropDatabase());
assert.commandWorked(testDB.test.insert({a: 1}));
const coll = testDB.test;
-// Test that building an index with 'options' can be aborted using killop.
-function testAbortIndexBuild(options) {
+// Test that building an index can be aborted using killop.
+function testAbortIndexBuild() {
IndexBuildTest.pauseIndexBuilds(conn);
- const createIdx = IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {a: 1}, options);
+ const createIdx = IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {a: 1});
// When the index build starts, find its op id.
const opId = IndexBuildTest.waitForIndexBuildToScanCollection(testDB, coll.getName(), 'a_1');
@@ -42,7 +42,6 @@ function testAbortIndexBuild(options) {
IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
}
-testAbortIndexBuild({background: true});
-testAbortIndexBuild({background: false});
+testAbortIndexBuild();
MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/index_primary_aborts_immediately_on_key_generation_error.js b/jstests/noPassthrough/index_primary_aborts_immediately_on_key_generation_error.js
index d473ae51e5e87..0ddd895139c7c 100644
--- a/jstests/noPassthrough/index_primary_aborts_immediately_on_key_generation_error.js
+++ b/jstests/noPassthrough/index_primary_aborts_immediately_on_key_generation_error.js
@@ -4,7 +4,7 @@
* proceed to the next phase.
*
* @tags: [
- * featureFlagIndexBuildGracefulErrorHandling,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
@@ -73,16 +73,11 @@ createIdx();
const reasonString = `'voteAbortIndexBuild' received from '${secondary.host}'`;
checkLog.checkContainsOnceJsonStringMatch(testDB, 4656003, "error", reasonString);
-// As aborting the build involves interrupting the building thread on which the user op is waiting,
-// the user op will return before the primary has actually aborted the build. Waiting for the
-// 'createIndexes' command to return does not guarantee that the primary has replicated the abort
-// oplog entry, nor that the secondary has applied it.
-IndexBuildTest.waitForIndexBuildToStop(testDB);
-IndexBuildTest.waitForIndexBuildToStop(secondaryDB);
-
-// Assert index does not exist.
-IndexBuildTest.assertIndexes(coll, 1, ['_id_'], []);
-IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_'], []);
+// Wait for the index build to eventually disappear. Due to an external abort thread doing the
+// cleanup, we can't rely on waitForIndexBuildToStop as it checks for the opId of the builder
+// thread.
+IndexBuildTest.assertIndexesSoon(coll, 1, ['_id_'], []);
+IndexBuildTest.assertIndexesSoon(secondaryColl, 1, ['_id_'], []);
rst.stopSet();
})();
diff --git a/jstests/noPassthrough/index_scan_low_priority.js b/jstests/noPassthrough/index_scan_low_priority.js
index d04f9c75c133c..4094455397e12 100644
--- a/jstests/noPassthrough/index_scan_low_priority.js
+++ b/jstests/noPassthrough/index_scan_low_priority.js
@@ -21,45 +21,66 @@ const coll = db.coll;
assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
-assert.commandWorked(coll.insert({a: 0}));
-assert.commandWorked(coll.insert({a: 1}));
-assert.commandWorked(coll.createIndexes([{a: 1}, {a: -1}]));
+const runTest = function(deprioritize) {
+ assert.commandWorked(coll.insert({a: 0}));
+ assert.commandWorked(coll.insert({a: 1}));
+ assert.commandWorked(coll.createIndexes([{a: 1}, {a: -1}]));
-const numLowPriority = function() {
- return db.serverStatus().wiredTiger.concurrentTransactions.read.lowPriority.finishedProcessing;
-};
+ const numLowPriority = function() {
+ return db.serverStatus()
+ .wiredTiger.concurrentTransactions.read.lowPriority.finishedProcessing;
+ };
-const testCoveredScanDeprioritized = function(direction) {
- const numLowPriorityBefore = numLowPriority();
- coll.find().hint({a: direction}).itcount();
- assert.gt(numLowPriority(), numLowPriorityBefore);
-};
-testCoveredScanDeprioritized(1);
-testCoveredScanDeprioritized(-1);
+ const testCoveredScanDeprioritized = function(direction) {
+ const numLowPriorityBefore = numLowPriority();
+ coll.find().hint({a: direction}).itcount();
+ if (deprioritize) {
+ assert.gt(numLowPriority(), numLowPriorityBefore);
+ } else {
+ assert.eq(numLowPriority(), numLowPriorityBefore);
+ }
+ };
+ testCoveredScanDeprioritized(1);
+ testCoveredScanDeprioritized(-1);
-const testNonCoveredScanDeprioritized = function(direction) {
- const numLowPriorityBefore = numLowPriority();
- coll.find({b: 1}).hint({a: direction}).itcount();
- assert.gt(numLowPriority(), numLowPriorityBefore);
-};
-testNonCoveredScanDeprioritized(1);
-testNonCoveredScanDeprioritized(-1);
+ const testNonCoveredScanDeprioritized = function(direction) {
+ const numLowPriorityBefore = numLowPriority();
+ coll.find({b: 1}).hint({a: direction}).itcount();
+ if (deprioritize) {
+ assert.gt(numLowPriority(), numLowPriorityBefore);
+ } else {
+ assert.eq(numLowPriority(), numLowPriorityBefore);
+ }
+ };
+ testNonCoveredScanDeprioritized(1);
+ testNonCoveredScanDeprioritized(-1);
-const testScanSortLimitDeprioritized = function(direction) {
- const numLowPriorityBefore = numLowPriority();
- coll.find().hint({a: direction}).sort({a: 1}).limit(1).itcount();
- assert.gt(numLowPriority(), numLowPriorityBefore);
-};
-testScanSortLimitDeprioritized(1);
-testScanSortLimitDeprioritized(-1);
+ const testScanSortLimitDeprioritized = function(direction) {
+ const numLowPriorityBefore = numLowPriority();
+ coll.find().hint({a: direction}).sort({a: 1}).limit(1).itcount();
+ if (deprioritize) {
+ assert.gt(numLowPriority(), numLowPriorityBefore);
+ } else {
+ assert.eq(numLowPriority(), numLowPriorityBefore);
+ }
+ };
+ testScanSortLimitDeprioritized(1);
+ testScanSortLimitDeprioritized(-1);
-const testScanLimitNotDeprioritized = function(direction) {
- const numLowPriorityBefore = numLowPriority();
- coll.find().hint({a: direction}).limit(1).itcount();
- assert.eq(numLowPriority(), numLowPriorityBefore);
+ const testScanLimitNotDeprioritized = function(direction) {
+ const numLowPriorityBefore = numLowPriority();
+ coll.find().hint({a: direction}).limit(1).itcount();
+ assert.eq(numLowPriority(), numLowPriorityBefore);
+ };
+ testScanLimitNotDeprioritized(1);
+ testScanLimitNotDeprioritized(-1);
};
-testScanLimitNotDeprioritized(1);
-testScanLimitNotDeprioritized(-1);
+
+runTest(true);
+
+assert.commandWorked(
+ db.adminCommand({setParameter: 1, deprioritizeUnboundedUserIndexScans: false}));
+runTest(false);
MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/index_secondary_awaiting_primary_abort_crash_on_commit.js b/jstests/noPassthrough/index_secondary_awaiting_primary_abort_crash_on_commit.js
index 4dc9a27ca77e7..b1208086bf7b1 100644
--- a/jstests/noPassthrough/index_secondary_awaiting_primary_abort_crash_on_commit.js
+++ b/jstests/noPassthrough/index_secondary_awaiting_primary_abort_crash_on_commit.js
@@ -3,7 +3,7 @@
* oplog entry to be replicated. If a commit entry is received instead, the secondary should crash.
*
* @tags: [
- * featureFlagIndexBuildGracefulErrorHandling,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/index_secondary_signal_primary_abort.js b/jstests/noPassthrough/index_secondary_signal_primary_abort.js
index 5cf7bc2fa036f..3e9b0578c3b3c 100644
--- a/jstests/noPassthrough/index_secondary_signal_primary_abort.js
+++ b/jstests/noPassthrough/index_secondary_signal_primary_abort.js
@@ -2,7 +2,7 @@
* Tests that a failing index build on a secondary node causes the primary node to abort the build.
*
* @tags: [
- * featureFlagIndexBuildGracefulErrorHandling,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
@@ -74,13 +74,9 @@ createIdx();
failSecondaryBuild.off();
-// Wait for the builds to be unregistered before asserting indexes.
-IndexBuildTest.waitForIndexBuildToStop(primaryDB, primaryColl.getName(), kIndexName);
-IndexBuildTest.waitForIndexBuildToStop(secondaryDB, secondaryColl.getName(), kIndexName);
-
// Assert index does not exist.
-IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_'], []);
-IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_'], []);
+IndexBuildTest.assertIndexesSoon(primaryColl, 1, ['_id_'], []);
+IndexBuildTest.assertIndexesSoon(secondaryColl, 1, ['_id_'], []);
rst.stopSet();
})();
diff --git a/jstests/noPassthrough/index_signaling_primary_abort_shutdown.js b/jstests/noPassthrough/index_signaling_primary_abort_shutdown.js
index 6871a3d903a3e..8ba5c637d3a07 100644
--- a/jstests/noPassthrough/index_signaling_primary_abort_shutdown.js
+++ b/jstests/noPassthrough/index_signaling_primary_abort_shutdown.js
@@ -3,7 +3,7 @@
* properly interrupted, without blocking shutdown, and restarted after shutdown.
*
* @tags: [
- * featureFlagIndexBuildGracefulErrorHandling,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/index_stepdown_abort_prepare_conflict.js b/jstests/noPassthrough/index_stepdown_abort_prepare_conflict.js
index adb535e2b75c0..089ff1a9b4b2b 100644
--- a/jstests/noPassthrough/index_stepdown_abort_prepare_conflict.js
+++ b/jstests/noPassthrough/index_stepdown_abort_prepare_conflict.js
@@ -45,7 +45,13 @@ assert.commandWorked(
// Enable fail point which makes hybrid index build to hang before it aborts.
var failPoint;
-if (TestData.setParameters.featureFlagIndexBuildGracefulErrorHandling) {
+
+const gracefulIndexBuildFeatureFlag =
+ assert
+ .commandWorked(
+ primary.adminCommand({getParameter: 1, featureFlagIndexBuildGracefulErrorHandling: 1}))
+ .featureFlagIndexBuildGracefulErrorHandling.value;
+if (gracefulIndexBuildFeatureFlag) {
// If this feature flag is enabled, index builds fail immediately instead of suppressing errors
// until the commit phase, and always signal the primary for abort (even if it is itself). Abort
// is only ever performed in the command thread, which is interrupted by replication state
diff --git a/jstests/noPassthrough/index_stepdown_commit_prepare_conflict.js b/jstests/noPassthrough/index_stepdown_commit_prepare_conflict.js
index 12fabee9ae36b..e7e026644d52b 100644
--- a/jstests/noPassthrough/index_stepdown_commit_prepare_conflict.js
+++ b/jstests/noPassthrough/index_stepdown_commit_prepare_conflict.js
@@ -103,4 +103,4 @@ IndexBuildTest.assertIndexes(newPrimary.getDB(dbName).getCollection(collName), 2
IndexBuildTest.assertIndexes(primaryColl, 2, ["_id_", "x_1"]);
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/index_stepup_abort_skipped_records.js b/jstests/noPassthrough/index_stepup_abort_skipped_records.js
index b3a154ea36bb0..8e1563a958aa2 100644
--- a/jstests/noPassthrough/index_stepup_abort_skipped_records.js
+++ b/jstests/noPassthrough/index_stepup_abort_skipped_records.js
@@ -3,7 +3,7 @@
* skipped records that still cause key generation errors.
*
* @tags: [
- * featureFlagIndexBuildGracefulErrorHandling,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
@@ -53,11 +53,8 @@ rst.stepUp(secondary);
createIdx();
// The new primary should eventually abort the build.
-IndexBuildTest.waitForIndexBuildToStop(primaryDB, primaryColl.getName(), kIndexName);
-IndexBuildTest.waitForIndexBuildToStop(secondaryDB, secondaryColl.getName(), kIndexName);
-
-IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_']);
-IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']);
+IndexBuildTest.assertIndexesSoon(primaryColl, 1, ['_id_']);
+IndexBuildTest.assertIndexesSoon(secondaryColl, 1, ['_id_']);
// Verify failure reason is due to step-up check.
checkLog.checkContainsOnceJsonStringMatch(
diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js
index 8c2a840ee752f..92c19cc04d9bb 100644
--- a/jstests/noPassthrough/indexbg1.js
+++ b/jstests/noPassthrough/indexbg1.js
@@ -56,7 +56,7 @@ while (1) { // if indexing finishes before we can run checks, try indexing w/ m
assert.commandWorked(bulk.execute());
assert.eq(size, t.count());
- bgIndexBuildPid = doParallel(fullName + ".createIndex( {i:1}, {background:true} )");
+ bgIndexBuildPid = doParallel(fullName + ".createIndex( {i:1} )");
try {
// wait for indexing to start
print("wait for indexing to start");
diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js
index e374a406bfc1a..1507ee1810b35 100644
--- a/jstests/noPassthrough/indexbg2.js
+++ b/jstests/noPassthrough/indexbg2.js
@@ -30,7 +30,7 @@ let doParallel = function(work) {
let indexBuild = function() {
let fullName = "db." + baseName;
- return doParallel(fullName + ".createIndex( {i:1}, {background:true, unique:true} )");
+ return doParallel(fullName + ".createIndex( {i:1}, {unique:true} )");
};
let doneParallel = function() {
diff --git a/jstests/noPassthrough/indexbg_drop.js b/jstests/noPassthrough/indexbg_drop.js
index 423af104d6094..2b8a6a3e16d69 100644
--- a/jstests/noPassthrough/indexbg_drop.js
+++ b/jstests/noPassthrough/indexbg_drop.js
@@ -55,7 +55,7 @@ jsTest.log("Starting background indexing for test of: " + tojson(dc));
// Add another index to be sure the drop command works.
primaryDB.getCollection(collection).createIndex({b: 1});
-primaryDB.getCollection(collection).createIndex({i: 1}, {background: true});
+primaryDB.getCollection(collection).createIndex({i: 1});
// Make sure the index build has started on the secondary.
IndexBuildTest.waitForIndexBuildToStart(secondDB);
diff --git a/jstests/noPassthrough/indexbg_killop_primary.js b/jstests/noPassthrough/indexbg_killop_primary.js
deleted file mode 100644
index de0f1c66e867f..0000000000000
--- a/jstests/noPassthrough/indexbg_killop_primary.js
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Confirms that background index builds on a primary can be aborted using killop.
- * @tags: [
- * requires_replication,
- * ]
- */
-(function() {
-"use strict";
-
-load('jstests/noPassthrough/libs/index_build.js');
-
-const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- },
- ]
-});
-const nodes = rst.startSet();
-rst.initiate();
-
-const primary = rst.getPrimary();
-const testDB = primary.getDB('test');
-const coll = testDB.getCollection('test');
-
-assert.commandWorked(coll.insert({a: 1}));
-
-IndexBuildTest.pauseIndexBuilds(primary);
-
-const createIdx =
- IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true});
-
-// When the index build starts, find its op id.
-const opId = IndexBuildTest.waitForIndexBuildToScanCollection(testDB, coll.getName(), 'a_1');
-
-IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId, (op) => {
- jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op));
- assert.eq(
- undefined,
- op.connectionId,
- 'Was expecting IndexBuildsCoordinator op; found db.currentOp() for connection thread instead: ' +
- tojson(op));
- assert.eq(coll.getFullName(),
- op.ns,
- 'Unexpected ns field value in db.currentOp() result for index build: ' + tojson(op));
-});
-
-// Index build should be present in the config.system.indexBuilds collection.
-const indexMap =
- IndexBuildTest.assertIndexes(coll, 2, ["_id_"], ["a_1"], {includeBuildUUIDs: true});
-const indexBuildUUID = indexMap['a_1'].buildUUID;
-assert(primary.getCollection('config.system.indexBuilds').findOne({_id: indexBuildUUID}));
-
-// Kill the index builder thread.
-assert.commandWorked(testDB.killOp(opId));
-
-// Wait for the index build to stop from the killop signal.
-try {
- IndexBuildTest.waitForIndexBuildToStop(testDB);
-} finally {
- IndexBuildTest.resumeIndexBuilds(primary);
-}
-
-const exitCode = createIdx({checkExitSuccess: false});
-assert.neq(0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
-
-// Check that no new index has been created. This verifies that the index build was aborted
-// rather than successfully completed.
-IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
-
-const cmdNs = testDB.getCollection('$cmd').getFullName();
-let ops = rst.dumpOplog(primary, {op: 'c', ns: cmdNs, 'o.startIndexBuild': coll.getName()});
-assert.eq(1, ops.length, 'incorrect number of startIndexBuild oplog entries: ' + tojson(ops));
-ops = rst.dumpOplog(primary, {op: 'c', ns: cmdNs, 'o.abortIndexBuild': coll.getName()});
-assert.eq(1, ops.length, 'incorrect number of abortIndexBuild oplog entries: ' + tojson(ops));
-ops = rst.dumpOplog(primary, {op: 'c', ns: cmdNs, 'o.commitIndexBuild': coll.getName()});
-assert.eq(0, ops.length, 'incorrect number of commitIndexBuild oplog entries: ' + tojson(ops));
-
-// Index build should be removed from the config.system.indexBuilds collection.
-assert.isnull(primary.getCollection('config.system.indexBuilds').findOne({_id: indexBuildUUID}));
-
-rst.stopSet();
-})();
diff --git a/jstests/noPassthrough/indexbg_killop_secondary.js b/jstests/noPassthrough/indexbg_killop_secondary.js
deleted file mode 100644
index a404e6acd92f3..0000000000000
--- a/jstests/noPassthrough/indexbg_killop_secondary.js
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Confirms that aborting a background index builds on a secondary does not leave node in an
- * inconsistent state.
- * @tags: [
- * requires_replication,
- * ]
- */
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js");
-load("jstests/libs/log.js"); // for checkLog
-load('jstests/noPassthrough/libs/index_build.js');
-
-// This test triggers an unclean shutdown (an fassert), which may cause inaccurate fast counts.
-TestData.skipEnforceFastCountOnValidate = true;
-
-const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary. This allows the primary to commit without waiting
- // for the secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- slowms: 30000, // Don't log slow operations on secondary. See SERVER-44821.
- },
- ]
-});
-const nodes = rst.startSet();
-rst.initiate();
-
-const primary = rst.getPrimary();
-const testDB = primary.getDB('test');
-const coll = testDB.getCollection('test');
-
-assert.commandWorked(coll.insert({a: 1}));
-
-let secondary = rst.getSecondary();
-IndexBuildTest.pauseIndexBuilds(secondary);
-
-const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1});
-
-// When the index build starts, find its op id.
-let secondaryDB = secondary.getDB(testDB.getName());
-const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB);
-
-IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId, (op) => {
- jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op));
- assert.eq(coll.getFullName(),
- op.ns,
- 'Unexpected ns field value in db.currentOp() result for index build: ' + tojson(op));
-});
-
-// Wait for the primary to complete the index build and replicate a commit oplog entry.
-// "Index build: completed successfully"
-checkLog.containsJson(primary, 20663);
-
-// Kill the index build.
-assert.commandWorked(secondaryDB.killOp(opId));
-
-const gracefulIndexBuildFlag = FeatureFlagUtil.isEnabled(testDB, "IndexBuildGracefulErrorHandling");
-if (!gracefulIndexBuildFlag) {
- // We expect this to crash the secondary because this error is not recoverable
- assert.soon(function() {
- return rawMongoProgramOutput().search(/Fatal assertion.*(51101)/) >= 0;
- });
-} else {
- // Expect the secondary to crash. Depending on timing, this can be either because the secondary
- // was waiting for a primary abort when a 'commitIndexBuild' is applied, or because the build
- // fails and tries to request an abort while a 'commitIndexBuild' is being applied.
- assert.soon(function() {
- return rawMongoProgramOutput().search(/Fatal assertion.*(7329403|7329407)/) >= 0;
- });
-}
-
-// After restarting the secondary, expect that the index build completes successfully.
-rst.stop(secondary.nodeId, undefined, {forRestart: true, allowedExitCode: MongoRunner.EXIT_ABORT});
-rst.start(secondary.nodeId, undefined, true /* restart */);
-
-secondary = rst.getSecondary();
-secondaryDB = secondary.getDB(testDB.getName());
-
-// Wait for the restarted secondary node to reach SECONDARY state again.
-rst.waitForState(secondary, ReplSetTest.State.SECONDARY);
-
-// Wait for the index build to complete on all nodes.
-rst.awaitReplication();
-
-// Expect successful createIndex command invocation in parallel shell. A new index should be present
-// on the primary and secondary.
-createIdx();
-
-IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']);
-
-// Check that index was created on the secondary despite the attempted killOp().
-const secondaryColl = secondaryDB.getCollection(coll.getName());
-IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']);
-
-rst.stopSet();
-})();
diff --git a/jstests/noPassthrough/indexbg_killop_secondary_success.js b/jstests/noPassthrough/indexbg_killop_secondary_success.js
index 22f3f5238062b..fbb122dd23a2a 100644
--- a/jstests/noPassthrough/indexbg_killop_secondary_success.js
+++ b/jstests/noPassthrough/indexbg_killop_secondary_success.js
@@ -6,10 +6,7 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load('jstests/noPassthrough/libs/index_build.js');
// This test triggers an unclean shutdown (an fassert), which may cause inaccurate fast counts.
@@ -27,9 +24,9 @@ const rst = new ReplSetTest({
slowms: 30000, // Don't log slow operations on secondary. See SERVER-44821.
},
{
- // The arbiter prevents the primary from stepping down in the case where the secondary
- // is restarting due to the (expected) unclean shutdown. Note that the arbiter doesn't
- // participate in the commitQuorum.
+ // The arbiter prevents the primary from stepping down due to lack of majority in the
+ // case where the secondary is restarting due to the (expected) unclean shutdown. Note
+ // that the arbiter doesn't participate in the commitQuorum.
rsConfig: {
arbiterOnly: true,
},
@@ -58,7 +55,8 @@ const createIdx = (gracefulIndexBuildFlag)
// When the index build starts, find its op id.
let secondaryDB = secondary.getDB(primaryDB.getName());
-const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB);
+const opId =
+ IndexBuildTest.waitForIndexBuildToScanCollection(secondaryDB, primaryColl.getName(), "a_1");
IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId, (op) => {
jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op));
@@ -88,6 +86,7 @@ if (!gracefulIndexBuildFlag) {
}
primary = rst.getPrimary();
+rst.awaitSecondaryNodes();
primaryDB = primary.getDB('test');
primaryColl = primaryDB.getCollection('test');
@@ -113,4 +112,3 @@ if (!gracefulIndexBuildFlag) {
}
rst.stopSet();
-})();
diff --git a/jstests/noPassthrough/indexbg_killop_stepdown.js b/jstests/noPassthrough/indexbg_killop_stepdown.js
index b54336ad90c6e..f24f7167ed744 100644
--- a/jstests/noPassthrough/indexbg_killop_stepdown.js
+++ b/jstests/noPassthrough/indexbg_killop_stepdown.js
@@ -6,14 +6,11 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/fail_point_util.js");
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load('jstests/noPassthrough/libs/index_build.js');
-const rst = new ReplSetTest({nodes: 2});
+const rst = new ReplSetTest({nodes: 3});
rst.startSet();
rst.initiate();
@@ -84,11 +81,11 @@ if (!gracefulIndexBuildFlag) {
rst.stop(
primary.nodeId, undefined, {forRestart: true, allowedExitCode: MongoRunner.EXIT_ABORT});
rst.start(primary.nodeId, undefined, true /* restart */);
-} else {
- primary = rst.waitForPrimary();
}
-// Wait for the index build to complete.
+// Wait for primary and secondaries to reach goal state, and for the index build to complete.
+primary = rst.waitForPrimary();
+rst.awaitSecondaryNodes();
rst.awaitReplication();
if (gracefulIndexBuildFlag) {
@@ -104,9 +101,6 @@ if (gracefulIndexBuildFlag) {
rst.getSecondary().getDB('test').getCollection('test'), 1, ['_id_']);
} else {
- // Wait for the index build to complete.
- rst.awaitReplication();
-
// Verify that the stepped up node completed the index build.
IndexBuildTest.assertIndexes(
rst.getPrimary().getDB('test').getCollection('test'), 2, ['_id_', 'a_1']);
@@ -117,5 +111,4 @@ if (gracefulIndexBuildFlag) {
TestData.skipEnforceFastCountOnValidate = true;
}
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/indexbg_killop_stepup.js b/jstests/noPassthrough/indexbg_killop_stepup.js
index bd5d73035c0e8..ae7bd543eb8d0 100644
--- a/jstests/noPassthrough/indexbg_killop_stepup.js
+++ b/jstests/noPassthrough/indexbg_killop_stepup.js
@@ -34,10 +34,8 @@ IndexBuildTest.pauseIndexBuilds(secondary);
let waitForCommitReadinessFP =
configureFailPoint(primary, "hangIndexBuildAfterSignalPrimaryForCommitReadiness");
-const awaitIndexBuild =
- IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true}, [
- ErrorCodes.InterruptedDueToReplStateChange
- ]);
+const awaitIndexBuild = IndexBuildTest.startIndexBuild(
+ primary, coll.getFullName(), {a: 1}, {}, [ErrorCodes.InterruptedDueToReplStateChange]);
// When the index build starts, find its op id.
let secondaryDB = secondary.getDB(testDB.getName());
@@ -81,11 +79,14 @@ awaitStepUp();
// Wait for the index build to be aborted before asserting that it doesn't exist.
IndexBuildTest.waitForIndexBuildToStop(secondaryDB, coll.getName(), "a_1");
-rst.awaitReplication();
-IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
const secondaryColl = secondaryDB.getCollection(coll.getName());
-IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']);
+// Although the index is aborted on the secondary that's stepping up, as of
+// featureFlagIndexBuildGracefulErrorHandling we abort builds on secondaries (that is, we replicate
+// 'abortIndexBuild') asynchronously wrt the index builder thread on the primary. Wait for the
+// secondaries to complete the abort.
+IndexBuildTest.assertIndexesSoon(coll, 1, ['_id_']);
+IndexBuildTest.assertIndexesSoon(secondaryColl, 1, ['_id_']);
rst.stopSet();
})();
diff --git a/jstests/noPassthrough/interrupt_compact_commands.js b/jstests/noPassthrough/interrupt_compact_commands.js
new file mode 100644
index 0000000000000..e579aa4062806
--- /dev/null
+++ b/jstests/noPassthrough/interrupt_compact_commands.js
@@ -0,0 +1,131 @@
+/**
+ * Tests that the compact command is interruptible in the storage engine (WT) layer.
+ * Loads data such that the storage engine compact command finds data to compress and actually runs.
+ * Pauses a compact command in the MDB layer, sets interrupt via killOp, and then releases the
+ * command to discover the interrupt in the storage engine layer.
+ *
+ * @tags: [requires_persistence]
+ */
+
+(function() {
+"use strict";
+
+load("jstests/libs/fail_point_util.js");
+load("jstests/libs/parallelTester.js");
+
+/**
+ * Loads 30000 * 20 documents into collection . via 20 threads.
+ * Tags each insert with a thread ID. Then deletes half the data, by thread ID, to create holes such
+ * that WT::compact finds compaction work to do.
+ */
+function loadData(conn, dbName, collName, coll) {
+ const kThreads = 20;
+
+ coll.createIndex({t: 1});
+
+ jsTestLog("Loading data...");
+
+ const threads = [];
+ for (let t = 0; t < kThreads; t++) {
+ let thread = new Thread(function(t, port, dbName, collName) {
+ const mongo = new Mongo('localhost:' + port);
+ const testDB = mongo.getDB(dbName);
+ const testColl = testDB.getCollection(collName);
+
+ // This is a sufficient amount of data for WT::compact to run. If the data size is too
+ // small, WT::compact skips.
+ const size = 500;
+ const count = 25000;
+ const doc = {a: -1, x: 'x'.repeat(size), b: -1, t: t};
+
+ let bulkInsert = testColl.initializeUnorderedBulkOp();
+ for (var i = 0; i < count; ++i) {
+ bulkInsert.insert(doc);
+ }
+ jsTestLog("Committing inserts, t: " + t);
+ assert.commandWorked(bulkInsert.execute());
+ }, t, conn.port, dbName, collName);
+ threads.push(thread);
+ thread.start();
+ }
+ for (let t = 0; t < kThreads; ++t) {
+ threads[t].join();
+ }
+
+ jsTestLog("Pruning data...");
+
+ for (var t = 0; t < kThreads; t = t + 2) {
+ coll.deleteMany({t: t});
+ }
+
+ jsTestLog("Data setup complete.");
+}
+
+const dbName = jsTestName();
+const collName = 'testColl';
+
+const conn = MongoRunner.runMongod();
+assert.neq(conn, null);
+const testDB = conn.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+loadData(conn, dbName, collName, testColl);
+
+let fp;
+let fpOn = false;
+try {
+ jsTestLog("Setting the failpoint...");
+ fp = configureFailPoint(testDB, "pauseCompactCommandBeforeWTCompact");
+ fpOn = true;
+ TestData.comment = "commentOpIdentifier";
+ TestData.dbName = dbName;
+
+ let compactJoin = startParallelShell(() => {
+ jsTestLog("Starting the compact command, which should stall on a failpoint...");
+ assert.commandFailedWithCode(
+ db.getSiblingDB(TestData.dbName)
+ .runCommand({"compact": "testColl", "comment": TestData.comment}),
+ ErrorCodes.Interrupted);
+ }, conn.port);
+
+ jsTestLog("Waiting for the compact command to hit the failpoint...");
+ fp.wait();
+
+ jsTestLog("Finding the compact command opId in order to call killOp...");
+ let opId = null;
+ assert.soon(function() {
+ const ops = testDB.getSiblingDB("admin")
+ .aggregate([
+ {$currentOp: {allUsers: true}},
+ {$match: {"command.comment": TestData.comment}}
+ ])
+ .toArray();
+ if (ops.length == 0) {
+ return false;
+ }
+ assert.eq(ops.length, 1);
+ opId = ops[0].opid;
+ return true;
+ });
+ jsTestLog("Calling killOp to interrupt the compact command, opId: " + tojson(opId));
+ assert.commandWorked(testDB.killOp(opId));
+
+ jsTestLog("Releasing the failpoint and waiting for the compact command to finish...");
+ fp.off();
+ fpOn = false;
+
+ compactJoin();
+
+ // Make sure that WT::compact did not skip because of too little data.
+ assert(
+ !checkLog.checkContainsOnce(testDB, "there is no useful work to do - skipping compaction"));
+} finally {
+ if (fpOn) {
+ jsTestLog("Release the failpoint");
+ fp.off();
+ }
+}
+
+jsTestLog("Done");
+MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/interrupt_while_yielded.js b/jstests/noPassthrough/interrupt_while_yielded.js
index b063fb98b8c96..69e1332e179f9 100644
--- a/jstests/noPassthrough/interrupt_while_yielded.js
+++ b/jstests/noPassthrough/interrupt_while_yielded.js
@@ -1,7 +1,7 @@
/**
* @tags: [
- * # TODO SERVER-64007: Support yielding in CQF plans.
- * cqf_incompatible,
+ * # TODO SERVER-70446: Enable yielding for index plans in CQF.
+ * cqf_experimental_incompatible,
* ]
*/
(function() {
diff --git a/jstests/noPassthrough/list_collections_large_number.js b/jstests/noPassthrough/list_collections_large_number.js
index 379d4ea5dd7a6..2b0b5d53926cd 100644
--- a/jstests/noPassthrough/list_collections_large_number.js
+++ b/jstests/noPassthrough/list_collections_large_number.js
@@ -31,4 +31,4 @@ assert.commandWorked(db.runCommand({"listCollections": 1}));
// Do not validate collections since that is an expensive action.
MongoRunner.stopMongod(conn, undefined, {skipValidation: true});
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/list_indexes_index_build_info.js b/jstests/noPassthrough/list_indexes_index_build_info.js
index 69d2c1cc7f103..d0da35470bbd6 100644
--- a/jstests/noPassthrough/list_indexes_index_build_info.js
+++ b/jstests/noPassthrough/list_indexes_index_build_info.js
@@ -248,7 +248,7 @@ try {
'unique index info does not contain replicationState: ' + tojson(uniqueIndexBuildInfo));
const replicationState = uniqueIndexBuildInfo.replicationState;
assert.eq(replicationState.state,
- 'Aborted',
+ 'External abort',
'Unexpected replication state: ' + tojson(uniqueIndexBuildInfo));
assert(replicationState.hasOwnProperty('timestamp'),
'replication state should contain abort timestamp: ' + tojson(uniqueIndexBuildInfo));
diff --git a/jstests/noPassthrough/list_indexes_ready_and_in_progress.js b/jstests/noPassthrough/list_indexes_ready_and_in_progress.js
index 17ca6f983d588..26e8e7372c2ac 100644
--- a/jstests/noPassthrough/list_indexes_ready_and_in_progress.js
+++ b/jstests/noPassthrough/list_indexes_ready_and_in_progress.js
@@ -27,8 +27,7 @@ IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
assert.commandWorked(coll.insert({a: 1}));
IndexBuildTest.pauseIndexBuilds(conn);
-const createIdx =
- IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {b: 1}, {background: true});
+const createIdx = IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {b: 1});
IndexBuildTest.waitForIndexBuildToScanCollection(testDB, coll.getName(), 'b_1');
// The listIndexes command supports returning all indexes, including ones that are not ready.
diff --git a/jstests/noPassthrough/list_indexes_with_build_uuids.js b/jstests/noPassthrough/list_indexes_with_build_uuids.js
index 385acd89ce345..f74901a8d462b 100644
--- a/jstests/noPassthrough/list_indexes_with_build_uuids.js
+++ b/jstests/noPassthrough/list_indexes_with_build_uuids.js
@@ -53,7 +53,7 @@ replSet.awaitReplication();
// Build and finish the first index.
assert.commandWorked(primaryDB.runCommand(
- {createIndexes: collName, indexes: [{key: {i: 1}, name: firstIndexName, background: true}]}));
+ {createIndexes: collName, indexes: [{key: {i: 1}, name: firstIndexName}]}));
replSet.awaitReplication();
// Start hanging index builds on the secondary.
diff --git a/jstests/noPassthrough/log_and_profile_query_hash.js b/jstests/noPassthrough/log_and_profile_query_hash.js
index 82dce784dfd50..0418f778b3482 100644
--- a/jstests/noPassthrough/log_and_profile_query_hash.js
+++ b/jstests/noPassthrough/log_and_profile_query_hash.js
@@ -5,15 +5,11 @@
* requires_profiling,
* assumes_read_preference_unchanged,
* # TODO SERVER-67607: support query hash in slow query log lines.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-
// For getLatestProfilerEntry().
load("jstests/libs/profiler.js");
-load("jstests/libs/sbe_util.js");
// Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
// of data logged for each op. For some of the testcases below, including the cluster time would
@@ -154,5 +150,4 @@ const creationLogList = log.filter(
logLine.indexOf('"queryHash":"' + String(onCreationHashes.queryHash)) != -1));
assert.eq(1, creationLogList.length);
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/loglong.js b/jstests/noPassthrough/loglong.js
index 7e53701d19948..a807d9399384b 100644
--- a/jstests/noPassthrough/loglong.js
+++ b/jstests/noPassthrough/loglong.js
@@ -53,4 +53,4 @@ function assertLogTruncated(db, t) {
assert(found, tojson(log));
}
MongoRunner.stopMongod(conn);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/lookup_max_intermediate_size.js b/jstests/noPassthrough/lookup_max_intermediate_size.js
index c99a1e836dbb7..08cea2270c369 100644
--- a/jstests/noPassthrough/lookup_max_intermediate_size.js
+++ b/jstests/noPassthrough/lookup_max_intermediate_size.js
@@ -5,10 +5,7 @@
// ]
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-
-(function() {
-"use strict";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
// Used by testPipeline to sort result documents. All _ids must be primitives.
function compareId(a, b) {
@@ -119,4 +116,3 @@ assert(sharded.adminCommand({shardCollection: "test.lookUp", key: {_id: 'hashed'
runTest(sharded.getDB('test').lookUp, sharded.getDB('test').from, 4568);
sharded.stop();
-}());
diff --git a/jstests/noPassthrough/lookup_metrics.js b/jstests/noPassthrough/lookup_metrics.js
index 9f2aec0bfdb7d..ca6bf81d04685 100644
--- a/jstests/noPassthrough/lookup_metrics.js
+++ b/jstests/noPassthrough/lookup_metrics.js
@@ -2,11 +2,7 @@
* Tests that the lookup metrics are recorded correctly in serverStatus.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
-load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStages' and other explain helpers.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod({setParameter: {allowDiskUseByDefault: true}});
assert.neq(null, conn, "mongod was unable to start up");
@@ -17,7 +13,7 @@ if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because either the sbe lookup pushdown feature flag is disabled or" +
" sbe itself is disabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
assert.commandWorked(db.dropDatabase());
@@ -142,5 +138,4 @@ assert.eq(
4 /* Matching results */);
compareLookupCounters(expectedCounters);
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/lookup_pushdown.js b/jstests/noPassthrough/lookup_pushdown.js
index 641c73d5ff2ad..42979d3051249 100644
--- a/jstests/noPassthrough/lookup_pushdown.js
+++ b/jstests/noPassthrough/lookup_pushdown.js
@@ -3,11 +3,14 @@
*
* @tags: [requires_sharding, uses_transactions]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
-load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStages' and other explain helpers.
+import {
+ aggPlanHasStage,
+ getAggPlanStage,
+ getAggPlanStages,
+ hasRejectedPlans,
+ planHasStage,
+} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const JoinAlgorithm = {
Classic: 0,
@@ -116,7 +119,7 @@ const sbeEnabled = checkSBEEnabled(db);
if (!sbeEnabled) {
jsTestLog("Skipping test because SBE is disabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
let coll = db[name];
@@ -1024,5 +1027,4 @@ assert.commandWorked(db.createView(shardedViewName, name, [{$match: {b: {$gte: 0
[{$lookup: {from: shardedViewName, localField: "a", foreignField: "b", as: "out"}}],
JoinAlgorithm.Classic /* expectedJoinAlgorithm */);
}());
-st.stop();
-}());
+st.stop();
\ No newline at end of file
diff --git a/jstests/noPassthrough/lookup_with_limit_sharded.js b/jstests/noPassthrough/lookup_with_limit_sharded.js
index 6846db7f0f872..4b9e348e44ad0 100644
--- a/jstests/noPassthrough/lookup_with_limit_sharded.js
+++ b/jstests/noPassthrough/lookup_with_limit_sharded.js
@@ -12,9 +12,13 @@
* requires_sharding,
* ]
*/
-(function() {
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStages().
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {
+ flattenQueryPlanTree,
+ getAggPlanStages,
+ getPlanStage,
+ getWinningPlan
+} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const st = new ShardingTest({shards: 2, config: 1});
const db = st.s.getDB("test");
@@ -22,7 +26,7 @@ const db = st.s.getDB("test");
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE $lookup is not enabled.");
st.stop();
- return;
+ quit();
}
const coll = db.lookup_with_limit;
@@ -127,4 +131,3 @@ checkShardedResults(sortPipeline, 0);
checkShardedResults(topKSortPipeline, 2);
st.stop();
-}());
diff --git a/jstests/noPassthrough/match_expression_optimization_failpoint.js b/jstests/noPassthrough/match_expression_optimization_failpoint.js
index 590102ba8e823..622001b792d18 100644
--- a/jstests/noPassthrough/match_expression_optimization_failpoint.js
+++ b/jstests/noPassthrough/match_expression_optimization_failpoint.js
@@ -1,9 +1,5 @@
// Tests that match expression optimization works properly when the failpoint isn't triggered, and
// is disabled properly when it is triggered.
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For aggPlan functions.
Random.setRandomSeed();
const conn = MongoRunner.runMongod({});
@@ -38,5 +34,4 @@ const disabledResult = coll.aggregate(pipeline).toArray();
// Test that the result is the same with and without optimizations enabled (result is sorted).
assert.eq(enabledResult, disabledResult);
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/merge_on_secondary.js b/jstests/noPassthrough/merge_on_secondary.js
index 14f3454a1387d..0cf46c4c1c2c2 100644
--- a/jstests/noPassthrough/merge_on_secondary.js
+++ b/jstests/noPassthrough/merge_on_secondary.js
@@ -74,4 +74,4 @@ assert(!res.hasOwnProperty("writeErrors"));
assert(!res.hasOwnProperty("writeConcernError"));
replTest.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js b/jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js
index f275244f78f75..ad2010bcf0ff7 100644
--- a/jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js
+++ b/jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js
@@ -91,4 +91,4 @@ const outStage = `{$out: "${outputCollName}"}`;
runTest(outStage, outFailPoint);
replTest.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/metadata_size_estimate.js b/jstests/noPassthrough/metadata_size_estimate.js
new file mode 100644
index 0000000000000..a052fe0c17337
--- /dev/null
+++ b/jstests/noPassthrough/metadata_size_estimate.js
@@ -0,0 +1,57 @@
+// Test the impact of having too many roles
+// @tags: [requires_sharding]
+
+(function() {
+'use strict';
+
+// Use a relatively small record size to more reliably hit a tipping point where the write batching
+// logic thinks we have more space available for metadata than we really do. Note also that by using
+// small records, we are verifying that the batching logic is accounting for the overhead required
+// to serialize each document into a BSONArray.
+const kDataBlockSize = 4 * 1024;
+const kDataBlock = 'x'.repeat(kDataBlockSize);
+const kBSONMaxObjSize = 16 * 1024 * 1024;
+const kNumRows = (kBSONMaxObjSize / kDataBlockSize) + 5;
+
+function runTest(conn) {
+ const admin = conn.getDB('admin');
+ assert.commandWorked(admin.runCommand({createUser: 'admin', pwd: 'pwd', roles: ['root']}));
+ assert(admin.auth('admin', 'pwd'));
+
+ // Create more than 16KB of role data.
+ // These roles are grouped into a meta-role to avoid calls to `usersInfo` unexpectedly
+ // overflowing from duplication of roles/inheritedRoles plus showPrivileges.
+ const userRoles = [];
+ for (let i = 0; i < 10000; ++i) {
+ userRoles.push({db: 'qwertyuiopasdfghjklzxcvbnm_' + i, role: 'read'});
+ }
+ assert.commandWorked(
+ admin.runCommand({createRole: 'bigRole', roles: userRoles, privileges: []}));
+ assert.commandWorked(admin.runCommand({createUser: 'user', pwd: 'pwd', roles: ['bigRole']}));
+ admin.logout();
+
+ assert(admin.auth('user', 'pwd'));
+ const db = conn.getDB(userRoles[0].db);
+
+ // Fill a collection with enough rows to necessitate paging.
+ for (let i = 1; i <= kNumRows; ++i) {
+ assert.commandWorked(db.myColl.insert({_id: i, data: kDataBlock}));
+ }
+ // Verify initial write.
+ assert.eq(kNumRows, db.myColl.count({}));
+
+ // Create an aggregation which will batch up to kMaxWriteBatchSize or 16MB
+ // (not counting metadata)
+ assert.eq(0, db.myColl.aggregate([{"$out": 'yourColl'}]).itcount(), 'Aggregation failed');
+
+ // Verify the $out stage completed.
+ assert.eq(db.myColl.count({}), db.yourColl.count({}));
+ assert.eq(kNumRows, db.yourColl.count({}));
+}
+
+{
+ const st = new ShardingTest({mongos: 1, config: 1, shards: 1});
+ runTest(st.s0);
+ st.stop();
+}
+})();
diff --git a/jstests/noPassthrough/mirror_reads.js b/jstests/noPassthrough/mirror_reads.js
index 4a27724aeef41..9122e6678b423 100644
--- a/jstests/noPassthrough/mirror_reads.js
+++ b/jstests/noPassthrough/mirror_reads.js
@@ -77,18 +77,19 @@ function sendAndCheckReads({rst, cmd, minRate, maxRate, burstCount}) {
return ((readsPending == 0) && (readsSent === readsResolved));
}, "Did not resolve all requests within time limit", 10000);
- // The number of mirrored reads processed across all secondaries.
- let readsProcessedAsSecondaryTotal = 0;
- for (let i = 0; i < secondaries.length; i++) {
- const currentSecondaryMirroredReadsStats = getMirroredReadsStats(secondaries[i]);
- const processedAsSecondary = currentSecondaryMirroredReadsStats.processedAsSecondary -
- initialProcessedAsSecondary[i];
- jsTestLog("Verifying number of reads processed by secondary " + secondaries[i] + ": " +
- tojson({processedAsSecondary: processedAsSecondary}));
- readsProcessedAsSecondaryTotal += processedAsSecondary;
- }
- assert.eq(readsProcessedAsSecondaryTotal, readsSucceeded);
- assert.eq(readsProcessedAsSecondaryTotal, readsSent);
+ assert.soon(() => {
+ // The number of mirrored reads processed across all secondaries.
+ let readsProcessedAsSecondaryTotal = 0;
+ for (let i = 0; i < secondaries.length; i++) {
+ const currentSecondaryMirroredReadsStats = getMirroredReadsStats(secondaries[i]);
+ const processedAsSecondary = currentSecondaryMirroredReadsStats.processedAsSecondary -
+ initialProcessedAsSecondary[i];
+ jsTestLog("Verifying number of reads processed by secondary " + secondaries[i] + ": " +
+ tojson({processedAsSecondary: processedAsSecondary}));
+ readsProcessedAsSecondaryTotal += processedAsSecondary;
+ }
+ return readsProcessedAsSecondaryTotal == readsSucceeded && readsSucceeded == readsSent;
+ }, "Read metrics across secondaries did not converge to expected results", 10000);
jsTestLog("Verifying primary statistics: " +
tojson({current: currentPrimaryMirroredReadsStats, start: initialPrimaryStats}));
diff --git a/jstests/noPassthrough/mongobridge_testcommands.js b/jstests/noPassthrough/mongobridge_testcommands.js
index a76a89e8486be..749a02bf5632f 100644
--- a/jstests/noPassthrough/mongobridge_testcommands.js
+++ b/jstests/noPassthrough/mongobridge_testcommands.js
@@ -5,6 +5,8 @@
* @tags: [
* requires_replication,
* requires_sharding,
+ * # Tests running with experimental CQF behavior require test commands to be enabled.
+ * cqf_experimental_incompatible,
* ]
*/
diff --git a/jstests/noPassthrough/mr_disk_use.js b/jstests/noPassthrough/mr_disk_use.js
index f2d178aa9c2de..22a867b506b9e 100644
--- a/jstests/noPassthrough/mr_disk_use.js
+++ b/jstests/noPassthrough/mr_disk_use.js
@@ -40,4 +40,4 @@ const res = assert.commandWorked(db.runCommand(mapReduceCmd));
assert.eq(res.results[0], {_id: "a", value: 42}, res);
MongoRunner.stopMongod(conn);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/mr_mutable_properties.js b/jstests/noPassthrough/mr_mutable_properties.js
index ac33778de5bd0..3564f5994a0f5 100644
--- a/jstests/noPassthrough/mr_mutable_properties.js
+++ b/jstests/noPassthrough/mr_mutable_properties.js
@@ -17,15 +17,30 @@ const map = function() {
};
const reduce = function(key, values) {
- // set property on receiver
+ // Deal with the possibility that the input 'values' may have already been partially reduced.
+ values = values.reduce(function(acc, current) {
+ if (current.hasOwnProperty("food")) {
+ return acc.concat(current.food);
+ } else {
+ acc.push(current);
+ return acc;
+ }
+ }, []);
+
+ // Set property on receiver.
this.feed = {beat: 1};
- // set property on key arg
+ // Set property on key arg.
key.fed = {mochi: 1};
- // push properties onto values array arg
- values.push(this.feed);
- values.push(key.fed);
+ // Push properties onto values array arg, if they are not present in the array already due to
+ // an earlier reduction.
+ if (!values.some(obj => obj.hasOwnProperty("beat"))) {
+ values.push(this.feed);
+ }
+ if (!values.some(obj => obj.hasOwnProperty("mochi"))) {
+ values.push(key.fed);
+ }
// modify each value in the (modified) array arg
values.forEach(function(val) {
diff --git a/jstests/noPassthrough/nested_sort_merge.js b/jstests/noPassthrough/nested_sort_merge.js
index 5d2568e690643..464ee2d0ace86 100644
--- a/jstests/noPassthrough/nested_sort_merge.js
+++ b/jstests/noPassthrough/nested_sort_merge.js
@@ -2,9 +2,7 @@
* Verifies that nested SORT_MERGE plans are handled correctly by the SBE stage builder.
* Intended to reproduce SERVER-61496.
*/
-(function() {
-
-load("jstests/libs/analyze_plan.js"); // for 'getPlanStages'.
+import {getPlanStages} from "jstests/libs/analyze_plan.js";
const conn = MongoRunner.runMongod();
const db = conn.getDB("test");
@@ -97,5 +95,4 @@ for (const doc of queries) {
}
}
}
-MongoRunner.stopMongod(conn);
-})();
\ No newline at end of file
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/noFetchBonus.js b/jstests/noPassthrough/noFetchBonus.js
index 9dd1c197d725e..fd0fa80e60479 100644
--- a/jstests/noPassthrough/noFetchBonus.js
+++ b/jstests/noPassthrough/noFetchBonus.js
@@ -2,10 +2,7 @@
// requires_replication,
// requires_sharding,
// ]
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js');
+import {getRejectedPlans, isIndexOnly, planHasStage} from "jstests/libs/analyze_plan.js";
const st = new ShardingTest({shards: 1, rs: {nodes: 1}, config: 1});
const db = st.s.getDB("test");
@@ -27,5 +24,4 @@ assert.eq(rejected.length, 1, rejected);
assert(planHasStage(db, rejected[0], 'SHARDING_FILTER'), explain);
assert(planHasStage(db, rejected[0], 'FETCH'), rejected);
-st.stop();
-}());
+st.stop();
\ No newline at end of file
diff --git a/jstests/noPassthrough/no_query_plan_unindexed_child_with_text.js b/jstests/noPassthrough/no_query_plan_unindexed_child_with_text.js
new file mode 100644
index 0000000000000..4e298a3a9a0bb
--- /dev/null
+++ b/jstests/noPassthrough/no_query_plan_unindexed_child_with_text.js
@@ -0,0 +1,54 @@
+/**
+ * Tests that query planning fails when an $or has a text child along with an unindexed child.
+ *
+ * @tags: [
+ * requires_fcv_71,
+ * ]
+ */
+(function() {
+"use strict";
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+
+const db = conn.getDB("test");
+const coll = db.getCollection(jsTestName());
+coll.drop();
+
+assert.commandWorked(coll.insert({x: 1}));
+
+assert.commandWorked(coll.createIndex({"$**": "text"}));
+
+assert.commandWorked(coll.createIndex({"indexed": 1}));
+
+const pipeline = [
+ {
+ $match: {
+ $and: [{
+ $and: [
+ {"indexed": {$eq: 1}},
+ {
+ $or: [
+ {$text: {$search: "abcd"}},
+ {"unindexed": {$eq: 1}},
+ ]
+ },
+ ]
+ }]
+ }
+ },
+];
+
+assert.throwsWithCode(function() {
+ coll.aggregate(pipeline);
+}, ErrorCodes.NoQueryExecutionPlans);
+
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: "disableMatchExpressionOptimization", mode: "alwaysOn"}));
+
+assert.throwsWithCode(function() {
+ coll.aggregate(pipeline);
+}, ErrorCodes.NoQueryExecutionPlans);
+
+MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/non_multikey_ixscan_on_path_with_positional_component.js b/jstests/noPassthrough/non_multikey_ixscan_on_path_with_positional_component.js
index a17ee3ed3c1bd..09d3ec293f38a 100644
--- a/jstests/noPassthrough/non_multikey_ixscan_on_path_with_positional_component.js
+++ b/jstests/noPassthrough/non_multikey_ixscan_on_path_with_positional_component.js
@@ -2,10 +2,8 @@
* Tests that we can execute a query which survived a yield using an index scan on a path containing
* a positional component. This test was designed to reproduce SERVER-52589.
*/
-(function() {
-"use strict";
+import {getPlanStage} from "jstests/libs/analyze_plan.js";
-load("jstests/libs/analyze_plan.js"); // For explain helpers.
load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
// Configure 'internalQueryExecYieldIterations' such that operations will yield on each PlanExecutor
@@ -59,5 +57,4 @@ assert.eq(ixscan.isMultiKey, false, explain);
// Now execute the query and validate the result.
assertArrayEq({actual: cursor.toArray(), expected: [doc]});
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js b/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js
index 77e6709aa9f28..a20fa003ed825 100644
--- a/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js
+++ b/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js
@@ -6,14 +6,11 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
+import {aggPlanHasStage} from "jstests/libs/analyze_plan.js";
// Deliberately inserts orphans outside of migration.
TestData.skipCheckOrphans = true;
-load('jstests/libs/analyze_plan.js'); // For aggPlanHasStage().
-
// Set up a 2-shard cluster.
const st = new ShardingTest({name: jsTestName(), shards: 2, rs: {nodes: 1}});
@@ -150,4 +147,3 @@ runSampleAndConfirmResults({
});
st.stop();
-})();
diff --git a/jstests/noPassthrough/out_majority_read_replset.js b/jstests/noPassthrough/out_majority_read_replset.js
index 496520f8a4391..989cd0aedc3bf 100644
--- a/jstests/noPassthrough/out_majority_read_replset.js
+++ b/jstests/noPassthrough/out_majority_read_replset.js
@@ -6,7 +6,6 @@
"use strict";
load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
-load("jstests/libs/feature_flag_util.js");
const rst = new ReplSetTest({nodes: 2, nodeOptions: {enableMajorityReadConcern: ""}});
@@ -57,14 +56,6 @@ const awaitShell = startParallelShell(`{
}`,
db.getMongo().port);
-// Wait for the $out before restarting the replication when not using point-in-time reads.
-if (!FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) {
- assert.soon(function() {
- const filter = {"command.aggregate": "sourceColl"};
- return assert.commandWorked(db.currentOp(filter)).inprog.length === 1;
- });
-}
-
// Restart data replication and wait until the new write becomes visible.
restartReplicationOnSecondaries(rst);
rst.awaitLastOpCommitted();
diff --git a/jstests/noPassthrough/out_merge_majority_read.js b/jstests/noPassthrough/out_merge_majority_read.js
index 826604cc69434..0fe1c412a281e 100644
--- a/jstests/noPassthrough/out_merge_majority_read.js
+++ b/jstests/noPassthrough/out_merge_majority_read.js
@@ -13,8 +13,9 @@
(function() {
'use strict';
-// Skip metadata consistency check since the sharded clsuter is started with 0 shards
+// Skip metadata consistency checks since the sharded cluster is started with 0 shards
TestData.skipCheckMetadataConsistency = true;
+TestData.skipCheckRoutingTableConsistency = true;
const testServer = MongoRunner.runMongod();
const db = testServer.getDB("test");
diff --git a/jstests/noPassthrough/out_merge_on_secondary_metadata.js b/jstests/noPassthrough/out_merge_on_secondary_metadata.js
index 3d9727bef7bcf..9668c16fd411a 100644
--- a/jstests/noPassthrough/out_merge_on_secondary_metadata.js
+++ b/jstests/noPassthrough/out_merge_on_secondary_metadata.js
@@ -115,4 +115,4 @@ const outPipeline = [{$group: {_id: "$_id", sum: {$sum: "$a"}}}, {$out: outCollN
testMetadata(outPipeline, "out_on_secondary_metadata");
replTest.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/out_merge_on_secondary_write_concern.js b/jstests/noPassthrough/out_merge_on_secondary_write_concern.js
index 1229d898a7fb3..13baa6f381996 100644
--- a/jstests/noPassthrough/out_merge_on_secondary_write_concern.js
+++ b/jstests/noPassthrough/out_merge_on_secondary_write_concern.js
@@ -75,4 +75,4 @@ const outPipeline = [{$group: {_id: "$_id", sum: {$sum: "$a"}}}, {$out: outColl.
testWriteConcern(outPipeline, "out_on_secondary_write_concern");
replTest.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/parse_zone_info.js b/jstests/noPassthrough/parse_zone_info.js
index bdfdfd5cd34e7..5e88bb6f24e63 100644
--- a/jstests/noPassthrough/parse_zone_info.js
+++ b/jstests/noPassthrough/parse_zone_info.js
@@ -43,7 +43,7 @@ function testWithGoodTimeZoneDir(tz_good_path) {
// changes from the slim-format files. This was fixed in the timelib 2021 series.
const corner_coll = testDB.parse_zone_info_corner_cases;
- test_dates = [
+ let test_dates = [
{
test_date: "2020-10-20T19:49:47.634Z",
test_date_parts: {
diff --git a/jstests/noPassthrough/partial_unique_indexes.js b/jstests/noPassthrough/partial_unique_indexes.js
index 3aa5d53f94cf5..04a0f866101e2 100644
--- a/jstests/noPassthrough/partial_unique_indexes.js
+++ b/jstests/noPassthrough/partial_unique_indexes.js
@@ -49,7 +49,7 @@ assert.commandWorked(testDB.adminCommand(
{configureFailPoint: 'WTWriteConflictExceptionForReads', mode: {activationProbability: 0.01}}));
assert.commandWorked(testDB.adminCommand(
{configureFailPoint: 'WTWriteConflictException', mode: {activationProbability: 0.01}}));
-res = benchRun(benchArgs);
+let res = benchRun(benchArgs);
printjson({res});
assert.commandWorked(
diff --git a/jstests/noPassthrough/pin_code_segments_on_startup.js b/jstests/noPassthrough/pin_code_segments_on_startup.js
index 41a6826ca74fc..9135fae85ba7d 100644
--- a/jstests/noPassthrough/pin_code_segments_on_startup.js
+++ b/jstests/noPassthrough/pin_code_segments_on_startup.js
@@ -2,7 +2,8 @@
* Tests that a standalone mongod is able to pin code segments on startup when
* 'lockCodeSegmentsInMemory=true'.
* TODO (SERVER-75632): Re-enable this test on amazon linux once ulimits are configured.
- * @tags: [incompatible_with_macos, incompatible_with_windows_tls, incompatible_with_amazon_linux]
+ * @tags: [requires_increased_memlock_limits, incompatible_with_macos,
+ * incompatible_with_windows_tls, incompatible_with_amazon_linux]
*/
(function() {
diff --git a/jstests/noPassthrough/pipeline_optimization_failpoint.js b/jstests/noPassthrough/pipeline_optimization_failpoint.js
index 543bc9d6a397c..6c5bb123db824 100644
--- a/jstests/noPassthrough/pipeline_optimization_failpoint.js
+++ b/jstests/noPassthrough/pipeline_optimization_failpoint.js
@@ -1,9 +1,7 @@
// Tests that pipeline optimization works properly when the failpoint isn't triggered, and is
// disabled properly when it is triggered.
-(function() {
-"use strict";
+import {aggPlanHasStage} from "jstests/libs/analyze_plan.js";
-load("jstests/libs/analyze_plan.js"); // For aggPlan functions.
Random.setRandomSeed();
const conn = MongoRunner.runMongod({});
@@ -61,5 +59,4 @@ const disabledResult = coll.aggregate(pipeline).toArray();
// Test that the result is the same with and without optimizations enabled.
assert.eq(enabledResult, disabledResult);
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/plan_cache_group_lookup.js b/jstests/noPassthrough/plan_cache_group_lookup.js
index 8975bdf96a0d8..26f7400b708ec 100644
--- a/jstests/noPassthrough/plan_cache_group_lookup.js
+++ b/jstests/noPassthrough/plan_cache_group_lookup.js
@@ -2,14 +2,11 @@
* Test that plans with $group and $lookup lowered to SBE are cached and invalidated correctly.
* @tags: [
* # TODO SERVER-67607: Test plan cache with CQF enabled.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod();
const db = conn.getDB("test");
@@ -19,7 +16,7 @@ const foreignColl = db.plan_cache_pipeline_foreign;
if (!checkSBEEnabled(db)) {
jsTest.log("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
assert.commandWorked(coll.insert({a: 1}));
@@ -225,5 +222,4 @@ const groupStage = {
db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "trySbeEngine"}));
})();
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/plan_cache_hits_and_misses_metrics.js b/jstests/noPassthrough/plan_cache_hits_and_misses_metrics.js
index 78b08e13533b9..0516603fd993c 100644
--- a/jstests/noPassthrough/plan_cache_hits_and_misses_metrics.js
+++ b/jstests/noPassthrough/plan_cache_hits_and_misses_metrics.js
@@ -3,14 +3,11 @@
* is recovered from the plan cache.
*
* @tags: [
- * # Bonsai optimizer cannot use the plan cache yet.
+ * # TODO SERVER-67607: Test plan cache with CQF enabled.
* cqf_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js");
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod({});
const db = conn.getDB("plan_cache_hits_and_misses_metrics");
@@ -169,5 +166,4 @@ function runCommandAndCheckPlanCacheMetric(
},
].forEach(testCase => runCommandAndCheckPlanCacheMetric(testCase));
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/plan_cache_index_create.js b/jstests/noPassthrough/plan_cache_index_create.js
index 7a37e49bfc465..6ecd6e366d971 100644
--- a/jstests/noPassthrough/plan_cache_index_create.js
+++ b/jstests/noPassthrough/plan_cache_index_create.js
@@ -4,14 +4,11 @@
* @tags: [
* requires_replication,
* # TODO SERVER-67607: Test plan cache with CQF enabled.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js'); // For getCachedPlan().
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getCachedPlan} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const dbName = "test";
const collName = "coll";
@@ -63,10 +60,7 @@ function runTest({rst, readDB, writeDB}) {
// single plan exists.
assert.commandWorked(writeDB.runCommand({
createIndexes: collName,
- indexes: [
- {key: {y: 1}, name: "less_selective", background: false},
- {key: {z: 1}, name: "least_selective", background: false}
- ],
+ indexes: [{key: {y: 1}, name: "less_selective"}, {key: {z: 1}, name: "least_selective"}],
writeConcern: {w: "majority"}
}));
@@ -102,7 +96,7 @@ function runTest({rst, readDB, writeDB}) {
const testDB = db.getSiblingDB(TestData.dbName);
assert.commandWorked(testDB.runCommand({
createIndexes: TestData.collName,
- indexes: [{key: {x: 1}, name: "most_selective", background: true}],
+ indexes: [{key: {x: 1}, name: "most_selective"}],
writeConcern: {w: "majority"}
}));
}, writeDB.getMongo().port);
@@ -155,7 +149,7 @@ function runTest({rst, readDB, writeDB}) {
// Build a "most selective" index in the foreground.
assert.commandWorked(writeDB.runCommand({
createIndexes: collName,
- indexes: [{key: {x: 1}, name: "most_selective", background: false}],
+ indexes: [{key: {x: 1}, name: "most_selective"}],
writeConcern: {w: "majority"}
}));
@@ -183,11 +177,10 @@ const secondaryDB = rst.getSecondary().getDB(dbName);
if (checkSBEEnabled(primaryDB)) {
jsTest.log("Skipping test because SBE is enabled");
rst.stopSet();
- return;
+ quit();
}
runTest({rst: rst, readDB: primaryDB, writeDB: primaryDB});
runTest({rst: rst, readDB: secondaryDB, writeDB: primaryDB});
rst.stopSet();
-})();
diff --git a/jstests/noPassthrough/plan_cache_invalidation.js b/jstests/noPassthrough/plan_cache_invalidation.js
index 327fc55d5cf09..d6dfc27be411f 100644
--- a/jstests/noPassthrough/plan_cache_invalidation.js
+++ b/jstests/noPassthrough/plan_cache_invalidation.js
@@ -3,7 +3,7 @@
* and clearing.
* @tags: [
* # TODO SERVER-67607: Test plan cache with CQF enabled.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
diff --git a/jstests/noPassthrough/plan_cache_list_failed_plans.js b/jstests/noPassthrough/plan_cache_list_failed_plans.js
index 3e778a53e3a23..d5b3054c7a4e2 100644
--- a/jstests/noPassthrough/plan_cache_list_failed_plans.js
+++ b/jstests/noPassthrough/plan_cache_list_failed_plans.js
@@ -1,8 +1,5 @@
// Confirms the $planCacheStats output format includes information about failed plans.
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod was unable to start up");
@@ -12,7 +9,7 @@ const coll = testDB.test;
if (checkSBEEnabled(testDB)) {
jsTest.log("Skipping test because SBE is enabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
coll.drop();
@@ -50,5 +47,4 @@ const candidatePlanScores = planCacheEntry.candidatePlanScores;
assert.eq(candidatePlanScores.length, 2, planCacheEntry);
assert.eq(candidatePlanScores[1], 0, planCacheEntry);
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/plan_cache_memory_debug_info.js b/jstests/noPassthrough/plan_cache_memory_debug_info.js
index a52d1f5aebee3..087c92c27343d 100644
--- a/jstests/noPassthrough/plan_cache_memory_debug_info.js
+++ b/jstests/noPassthrough/plan_cache_memory_debug_info.js
@@ -3,12 +3,10 @@
* cumulative size of the system's plan caches exceeds a pre-configured threshold.
* @tags: [
* # TODO SERVER-67607: Test plan cache with CQF enabled.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
/**
* Creates two indexes for the given collection. In order for plans to be cached, there need to be
@@ -86,7 +84,7 @@ const coll = db.plan_cache_memory_debug_info;
if (checkSBEEnabled(db)) {
jsTest.log("Skipping test because SBE is enabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
coll.drop();
@@ -223,5 +221,4 @@ largeQueryCacheEntry = getPlanCacheEntryForFilter(coll, largeQuery);
assertCacheEntryHasDebugInfo(largeQueryCacheEntry);
assert.gt(largeQueryCacheEntry.estimatedSizeBytes, 10 * 1024, largeQueryCacheEntry);
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/plan_cache_metrics.js b/jstests/noPassthrough/plan_cache_metrics.js
index 93565a6d13fca..bc9e93c5fb9d5 100644
--- a/jstests/noPassthrough/plan_cache_metrics.js
+++ b/jstests/noPassthrough/plan_cache_metrics.js
@@ -3,7 +3,7 @@
* and cleared from the cache.
* @tags: [
* # TODO SERVER-67607: Test plan cache with CQF enabled.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
(function() {
diff --git a/jstests/noPassthrough/plan_cache_replan_group_lookup.js b/jstests/noPassthrough/plan_cache_replan_group_lookup.js
index 941b9ba7d8d86..aede304556640 100644
--- a/jstests/noPassthrough/plan_cache_replan_group_lookup.js
+++ b/jstests/noPassthrough/plan_cache_replan_group_lookup.js
@@ -4,14 +4,10 @@
* requires_profiling,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getAggPlanStages, getCachedPlan, getPlanStage} from "jstests/libs/analyze_plan.js";
load("jstests/libs/log.js"); // For findMatchingLogLine.
load("jstests/libs/profiler.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStages()'
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod();
const db = conn.getDB("test");
@@ -644,4 +640,3 @@ if (sbeEnabled) {
}
MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/plan_cache_replan_sort.js b/jstests/noPassthrough/plan_cache_replan_sort.js
index 16c80ea346b2b..23df2cd776561 100644
--- a/jstests/noPassthrough/plan_cache_replan_sort.js
+++ b/jstests/noPassthrough/plan_cache_replan_sort.js
@@ -5,12 +5,9 @@
* requires_profiling,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getCachedPlan} from "jstests/libs/analyze_plan.js";
load("jstests/libs/profiler.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod({setParameter: {allowDiskUseByDefault: false}});
const db = conn.getDB("test");
@@ -68,4 +65,3 @@ assert.eq(
profileObj);
MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/plan_cache_stats_agg_source.js b/jstests/noPassthrough/plan_cache_stats_agg_source.js
index 9c2777e1f04f4..1b2e4be51663c 100644
--- a/jstests/noPassthrough/plan_cache_stats_agg_source.js
+++ b/jstests/noPassthrough/plan_cache_stats_agg_source.js
@@ -2,14 +2,17 @@
* Tests for the $planCacheStats aggregation metadata source.
* @tags: [
* # TODO SERVER-67607: Test plan cache with CQF enabled.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {
+ getAggPlanStage,
+ getCachedPlan,
+ getPlanCacheKeyFromShape,
+ getPlanStage,
+ getPlanStages,
+} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod failed to start up");
@@ -182,4 +185,3 @@ assert.commandWorked(testDb.runCommand({planCacheClear: coll.getName()}));
assert.eq(0, coll.aggregate([{$planCacheStats: {}}]).itcount());
MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/plan_cache_stats_all_hosts_sharded.js b/jstests/noPassthrough/plan_cache_stats_all_hosts_sharded.js
new file mode 100644
index 0000000000000..6b928a1b3f88e
--- /dev/null
+++ b/jstests/noPassthrough/plan_cache_stats_all_hosts_sharded.js
@@ -0,0 +1,56 @@
+// Tests that the $planCacheStats will collect data from all nodes in a shard.
+//
+// @tags: [
+// assumes_read_concern_unchanged,
+// assumes_read_preference_unchanged,
+// # TODO SERVER-67607: Test plan cache with CQF enabled.
+// cqf_experimental_incompatible,
+// ]
+(function() {
+"use strict";
+
+load("jstests/sharding/libs/create_sharded_collection_util.js");
+
+for (let shardCount = 1; shardCount <= 2; shardCount++) {
+ const st = new ShardingTest({name: jsTestName(), shards: shardCount, rs: {nodes: 2}});
+
+ const db = st.s.getDB("test");
+ const coll = db.plan_cache_stats_all_servers;
+ coll.drop();
+ const planCache = coll.getPlanCache();
+
+ CreateShardedCollectionUtil.shardCollectionWithChunks(coll, {a: 1}, [
+ {min: {a: MinKey}, max: {a: 5}, shard: st.shard0.shardName},
+ {min: {a: 5}, max: {a: MaxKey}, shard: st["shard" + (1 % shardCount).toString()].shardName},
+ ]);
+
+ assert.commandWorked(coll.createIndex({b: 1}));
+ assert.commandWorked(coll.createIndex({c: 1}));
+ assert.commandWorked(coll.insertOne({a: 1, b: 2, c: 3}));
+ assert.commandWorked(coll.insertOne({a: 11, b: 12, c: 13}));
+
+ planCache.clear();
+
+ // Send single shard request to primary node.
+ assert.eq(1, coll.find({a: 1, b: 2}).readPref("primary").itcount());
+ // Send multi shard request to secondary nodes.
+ assert.eq(1, coll.find({b: 12, c: 13}).readPref("secondary").itcount());
+
+ // On primary there is only one plan in the plan cache, because the query was sent to a single
+ // shard
+ db.getMongo().setReadPref("primary");
+ assert.eq(1, coll.aggregate({$planCacheStats: {}}).itcount());
+ // On secondaries there is a plan for each shard
+ db.getMongo().setReadPref("secondary");
+ assert.eq(shardCount, coll.aggregate({$planCacheStats: {}}).itcount());
+
+ // If we set allHosts: true, we return all plans despite any read preference setting.
+ const totalPlans = 1 + shardCount;
+ db.getMongo().setReadPref("primary");
+ assert.eq(totalPlans, coll.aggregate({$planCacheStats: {allHosts: true}}).itcount());
+ db.getMongo().setReadPref("secondary");
+ assert.eq(totalPlans, coll.aggregate({$planCacheStats: {allHosts: true}}).itcount());
+
+ st.stop();
+}
+}());
diff --git a/jstests/noPassthrough/point_in_time_lookups.js b/jstests/noPassthrough/point_in_time_lookups.js
index 5942f332f5f19..edbb7da6f1c04 100644
--- a/jstests/noPassthrough/point_in_time_lookups.js
+++ b/jstests/noPassthrough/point_in_time_lookups.js
@@ -4,7 +4,6 @@
* @tags: [
* requires_persistence,
* requires_replication,
- * featureFlagPointInTimeCatalogLookups,
* requires_fcv_70,
* ]
*/
diff --git a/jstests/noPassthrough/point_in_time_lookups_drop_pending.js b/jstests/noPassthrough/point_in_time_lookups_drop_pending.js
index d7ba210d818b7..11e413bac1b05 100644
--- a/jstests/noPassthrough/point_in_time_lookups_drop_pending.js
+++ b/jstests/noPassthrough/point_in_time_lookups_drop_pending.js
@@ -5,7 +5,6 @@
* @tags: [
* requires_persistence,
* requires_replication,
- * featureFlagPointInTimeCatalogLookups,
* requires_fcv_70,
* ]
*/
diff --git a/jstests/noPassthrough/preimages_can_be_inconsistent.js b/jstests/noPassthrough/preimages_can_be_inconsistent.js
new file mode 100644
index 0000000000000..f658c6940d9ed
--- /dev/null
+++ b/jstests/noPassthrough/preimages_can_be_inconsistent.js
@@ -0,0 +1,101 @@
+/**
+ * Test that consistency checks for preimage work as expected. Consistency is defined by performing
+ * these steps:
+ * * Fix a nsUUID to scan the preimage collection
+ * * Obtain all preimage entries of the namespace by sorting in descending order of '_id.ts' and
+ * '_id.applyIndexOps'.
+ * * For each entry position and node:
+ * * The entry exists in the node at that position and is equal across all nodes in that
+ * position.
+ * * The entry doesn't exist in the node at that position.
+ * @tags: [
+ * requires_replication,
+ * ]
+ */
+
+(function() {
+"use strict";
+
+function getPreImage(collectionIndex, ts) {
+ const farOffDate = ISODate("2100-01-01");
+ const epochSeconds = farOffDate.valueOf() / 1000;
+ // Return a document inserted with a date really far off into the future.
+ return {
+ _id: {
+ nsUUID: UUID(`3b241101-e2bb-4255-8caf-4136c566a12${collectionIndex}`),
+ ts: new Timestamp(epochSeconds, ts),
+ applyOpsIndex: 0,
+ },
+ operationTime: farOffDate,
+ };
+}
+
+assert.doesNotThrow(() => {
+ const replSetTest = new ReplSetTest({name: "replSet", nodes: 2});
+ replSetTest.startSet();
+ replSetTest.initiate();
+
+ const primary = replSetTest.getPrimary();
+ const secondary = replSetTest.getSecondary();
+
+ const coll = primary.getDB("config")["system.preimages"];
+ const secondaryColl = secondary.getDB("config")["system.preimages"];
+
+ // Insert documents to the preimages collection. Ensure they are not replicated to secondaries.
+ coll.insert(getPreImage(1, 0));
+ coll.insert(getPreImage(1, 1));
+ coll.insert(getPreImage(2, 1));
+ coll.insert(getPreImage(3, 1));
+
+ assert.eq(coll.find({}).itcount(), 4);
+ assert.eq(secondaryColl.find({}).itcount(), 0);
+
+ // Now insert preimages in the old secondary.
+ replSetTest.stepUp(secondary);
+
+ const newPrimary = replSetTest.getPrimary();
+ const newColl = newPrimary.getDB("config")["system.preimages"];
+ newColl.insert(getPreImage(1, 1));
+ newColl.insert(getPreImage(2, 1));
+
+ // Verify that even if the data isn't consistent the test passes as consistency is defined as
+ // two nodes having entries equal or non-existent starting from the end.
+ replSetTest.stopSet();
+});
+
+const replSetTest = new ReplSetTest({name: "replSet", nodes: 2});
+replSetTest.startSet();
+replSetTest.initiate();
+
+const primary = replSetTest.getPrimary();
+const secondary = replSetTest.getSecondary();
+
+const coll = primary.getDB("config")["system.preimages"];
+const secondaryColl = secondary.getDB("config")["system.preimages"];
+
+// Insert a document to the preimage collection. Ensure it is not replicated to secondaries.
+coll.insert(getPreImage(1, 0));
+assert.eq(coll.find({}).itcount(), 1);
+assert.eq(secondaryColl.find({}).itcount(), 0);
+
+// Now insert another document to the secondary, this will cause an inconsistency error when we stop
+// the replica set.
+replSetTest.stepUp(secondary);
+
+const newPrimary = replSetTest.getPrimary();
+
+const newColl = newPrimary.getDB("config")["system.preimages"];
+newColl.insert(getPreImage(1, 1));
+
+// Verify that the two nodes are inconsistent.
+assert.throws(() => replSetTest.stopSet());
+
+try {
+ replSetTest.stopSet();
+} catch (e) {
+ // Verify that the inconsistency is the one we're looking for in preimages.
+ assert.eq(e.message.includes("Detected preimage entries that have different content"), true);
+}
+// Tear down the nodes now without checking for consistency.
+replSetTest.stopSet(undefined, undefined, {skipCheckDBHashes: true});
+})();
diff --git a/jstests/noPassthrough/profile_operation_metrics.js b/jstests/noPassthrough/profile_operation_metrics.js
index 63f3ded21426a..727ea0a091139 100644
--- a/jstests/noPassthrough/profile_operation_metrics.js
+++ b/jstests/noPassthrough/profile_operation_metrics.js
@@ -8,16 +8,13 @@
* requires_wiredtiger,
* # TODO SERVER-71170: docBytesRead for read operations using cqf are reported are higher than
* # tests expect.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
-load("jstests/libs/fixture_helpers.js"); // For isReplSet().
-load("jstests/libs/os_helpers.js"); // For isLinux().
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled().
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+load("jstests/libs/fixture_helpers.js"); // For isReplSet().
+load("jstests/libs/os_helpers.js"); // For isLinux().
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const dbName = jsTestName();
const collName = 'coll';
@@ -1193,20 +1190,24 @@ const operations = [
},
profileFilter: {op: 'command', 'command.aggregate': collName},
profileAssert: (db, profileDoc) => {
- // TODO SERVER-71684: We currently erroneously account for reads from and writes to
- // temporary record stores used as spill tables. This test accommodates the erroneous
- // behavior. Such accommodation is only necessary for debug builds, since we spill
- // artificially in debug builds in order to exercise the query execution engine's
- // spilling logic.
- //
- // The classic engine spills to files outside the storage engine rather than to a
- // temporary record store, so it is not subject to SERVER-71684.
- if (isDebugBuild(db) && checkSBEEnabled(db)) {
- // For $group, we incorporate the number of items spilled into "keysSorted" and the
- // number of individual spill events into "sorterSpills".
+ // In debug builds we spill artificially in order to exercise the query execution
+ // engine's spilling logic. For $group, we incorporate the number of items spilled into
+ // "keysSorted" and the number of individual spill events into "sorterSpills".
+ if (isDebugBuild(db)) {
assert.gt(profileDoc.keysSorted, 0);
assert.gt(profileDoc.sorterSpills, 0);
+ } else {
+ assert.eq(profileDoc.keysSorted, 0);
+ assert.eq(profileDoc.sorterSpills, 0);
+ }
+ // TODO SERVER-71684: We currently erroneously account for reads from and writes to
+ // temporary record stores used as spill tables. This test accommodates the erroneous
+ // behavior. Such accommodation is only necessary for debug builds (where we spill
+ // artificially for test purposes), and when SBE is used. The classic engine spills to
+ // files outside the storage engine rather than to a temporary record store, so it is
+ // not subject to SERVER-71684.
+ if (isDebugBuild(db) && checkSBEEnabled(db)) {
assert.gt(profileDoc.docBytesWritten, 0);
assert.gt(profileDoc.docUnitsWritten, 0);
assert.gt(profileDoc.totalUnitsWritten, 0);
@@ -1214,9 +1215,6 @@ const operations = [
assert.eq(profileDoc.docBytesRead, 29 * 100 + profileDoc.docBytesWritten);
assert.eq(profileDoc.docUnitsRead, 100 + profileDoc.docUnitsWritten);
} else {
- assert.eq(profileDoc.keysSorted, 0);
- assert.eq(profileDoc.sorterSpills, 0);
-
assert.eq(profileDoc.docBytesRead, 29 * 100);
assert.eq(profileDoc.docUnitsRead, 100);
assert.eq(profileDoc.docBytesWritten, 0);
@@ -1243,20 +1241,8 @@ const operations = [
},
profileFilter: {op: 'command', 'command.aggregate': collName},
profileAssert: (db, profileDoc) => {
- if (isDebugBuild(db) && !checkSBEEnabled(db)) {
- // In debug builds, the classic engine does some special spilling for test purposes
- // when disk use is disabled. We spill for each of the first 20 documents, spilling
- // less often after we reach that limit. This 26 is the sum of 20 spills of
- // documents in groups 0 through 3 plus 6 additional items spilled for groups 4
- // through 10.
- assert.eq(profileDoc.keysSorted, 26);
- // This 21 is the sum of 20 debug spills plus 1 final debug spill.
- assert.eq(profileDoc.sorterSpills, 21);
- } else {
- assert.eq(profileDoc.keysSorted, 0);
- assert.eq(profileDoc.sorterSpills, 0);
- }
-
+ assert.eq(profileDoc.keysSorted, 0);
+ assert.eq(profileDoc.sorterSpills, 0);
assert.eq(profileDoc.docBytesRead, 29 * 100);
assert.eq(profileDoc.docUnitsRead, 100);
assert.eq(profileDoc.docBytesWritten, 0);
@@ -1550,16 +1536,9 @@ const operations = [
},
profileFilter: {op: 'insert', 'command.insert': 'ts', 'command.ordered': true},
profileAssert: (db, profileDoc) => {
- // Debug builds may perform extra reads of the _mdb_catalog when updating index entries.
- if (TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db) && isDebugBuild(db)) {
- assert.gte(profileDoc.docBytesRead, 216);
- assert.gte(profileDoc.docUnitsRead, 2);
- assert.gte(profileDoc.cursorSeeks, 2);
- } else {
- assert.eq(profileDoc.docBytesRead, 207);
- assert.eq(profileDoc.docUnitsRead, 2);
- assert.eq(profileDoc.cursorSeeks, 2);
- }
+ assert.eq(profileDoc.docBytesRead, 207);
+ assert.eq(profileDoc.docUnitsRead, 2);
+ assert.eq(profileDoc.cursorSeeks, 2);
assert.eq(profileDoc.docBytesWritten, 233);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
@@ -1585,16 +1564,9 @@ const operations = [
},
profileFilter: {op: 'insert', 'command.insert': 'ts', 'command.ordered': false},
profileAssert: (db, profileDoc) => {
- // Debug builds may perform extra reads of the _mdb_catalog when updating index entries.
- if (TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db) && isDebugBuild(db)) {
- assert.gte(profileDoc.docBytesRead, 216);
- assert.gte(profileDoc.docUnitsRead, 2);
- assert.gte(profileDoc.cursorSeeks, 2);
- } else {
- assert.eq(profileDoc.docBytesRead, 207);
- assert.eq(profileDoc.docUnitsRead, 2);
- assert.eq(profileDoc.cursorSeeks, 2);
- }
+ assert.eq(profileDoc.docBytesRead, 207);
+ assert.eq(profileDoc.docUnitsRead, 2);
+ assert.eq(profileDoc.cursorSeeks, 2);
assert.eq(profileDoc.docBytesWritten, 233);
assert.eq(profileDoc.idxEntryBytesRead, 0);
assert.eq(profileDoc.idxEntryUnitsRead, 0);
@@ -1699,4 +1671,3 @@ jsTestLog("Testing replica set");
runTest(db);
rst.stopSet();
})();
-})();
diff --git a/jstests/noPassthrough/profile_query_planning_time_metric.js b/jstests/noPassthrough/profile_query_planning_time_metric.js
index d8748514015e3..09456028b4173 100644
--- a/jstests/noPassthrough/profile_query_planning_time_metric.js
+++ b/jstests/noPassthrough/profile_query_planning_time_metric.js
@@ -1,10 +1,6 @@
/**
* Tests that the query planning time is captured in the profiler.
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js');
load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
const conn = MongoRunner.runMongod();
@@ -66,5 +62,4 @@ verifyProfilerLog(commandProfilerFilter);
coll.findOne({});
verifyProfilerLog(findProfilerFilter);
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/queryStats/agg_cmd_one_way_tokenization.js b/jstests/noPassthrough/queryStats/agg_cmd_one_way_tokenization.js
new file mode 100644
index 0000000000000..f9f95c4c27f41
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/agg_cmd_one_way_tokenization.js
@@ -0,0 +1,195 @@
+/**
+ * Test that $queryStats properly tokenizes aggregation commands, on mongod and mongos.
+ * @tags: [featureFlagQueryStats]
+ */
+load("jstests/libs/query_stats_utils.js");
+(function() {
+"use strict";
+
+const kHashedDbName = "iDlS7h5jf5HHxWPJpeHRbA+jLTNNZaqxVVkplrEkfko=";
+const kHashedCollName = "w6Ax20mVkbJu4wQWAMjL8Sl+DfXAr2Zqdc3kJRB7Oo0=";
+const kHashedFieldA = "GDiF6ZEXkeo4kbKyKEAAViZ+2RHIVxBQV9S6b6Lu7gU=";
+const kHashedFieldB = "m1xtUkfSpZNxXjNZYKwo86vGD37Zxmd2gtt+TXDO558=";
+
+function verifyConsistentFields(key) {
+ assert.eq({"db": `${kHashedDbName}`, "coll": `${kHashedCollName}`}, key.queryShape.cmdNs);
+ assert.eq("aggregate", key.queryShape.command);
+ assert.eq({batchSize: "?number"}, key.cursor);
+ assert.eq(kShellApplicationName, key.client.application.name);
+}
+
+function runTest(conn) {
+ const db = conn.getDB("testDB");
+ const admin = conn.getDB("admin");
+
+ db.test.drop();
+ db.otherColl.drop();
+ assert.commandWorked(db.test.insert({a: "foobar", b: 15}));
+ assert.commandWorked(db.test.insert({a: "foobar", b: 20}));
+ assert.commandWorked(db.otherColl.insert({a: "foobar", price: 2.50}));
+
+ // First checks proper tokenization on a basic pipeline.
+ {
+ db.test
+ .aggregate([
+ {$sort: {a: -1}},
+ {$match: {a: {$regex: "foo(.*)"}, b: {$gt: 10}}},
+ {$skip: 5},
+ ])
+ .toArray();
+
+ const stats = getQueryStatsAggCmd(admin, {transformIdentifiers: true});
+
+ assert.eq(1, stats.length);
+ const key = stats[0].key;
+ verifyConsistentFields(key);
+ // Make sure there is no otherNss field when there are no secondary namespaces.
+ assert(!key.hasOwnProperty('otherNss'), key);
+ // Ensure the query stats key pipeline holds the raw input without optimization (e.g., the
+ // $sort stays before the $match, as in the raw query).
+ assert.eq(
+ [
+ {"$sort": {[kHashedFieldA]: -1}},
+ {
+ "$match": {
+ "$and": [
+ {[kHashedFieldA]: {"$regex": "?string"}},
+ {[kHashedFieldB]: {"$gt": "?number"}}
+ ]
+ }
+ },
+ {"$skip": "?number"}
+ ],
+ key.queryShape.pipeline,
+ key.queryShape.pipeline);
+ }
+
+ // Checks proper tokenization on another basic pipeline that is a subset of the original
+ // pipeline to make sure there are separate query stats entries per separate query shape.
+ {
+ db.test.aggregate([{$match: {a: {$regex: "foo(.*)"}, b: {$gt: 0}}}]).toArray();
+ const stats = getQueryStatsAggCmd(admin, {transformIdentifiers: true});
+
+ assert.eq(2, stats.length);
+ const key = stats[0].key;
+ verifyConsistentFields(key);
+ // Make sure there is no otherNss field when there are no secondary namespaces.
+ assert(!key.hasOwnProperty('otherNss'), key);
+ assert.eq([{
+ "$match": {
+ "$and": [
+ {[kHashedFieldA]: {"$regex": "?string"}},
+ {[kHashedFieldB]: {"$gt": "?number"}}
+ ]
+ }
+ }],
+ key.queryShape.pipeline,
+ key.queryShape.pipeline);
+ }
+ // Checks proper tokenization on a pipeline that involves a let variable and a $lookup stage
+ // that has its own subpipeline and references another namespace.
+ {
+ const kHashedOtherCollName = "8Rfz9QKu4P3BbyJ3Zpf5kxlUGx7gMvVk2PXZlJVfikE=";
+ const kHashedAsOutputName = "OsoJyz+7myXF2CkbE5dKd9DJ1gDAUw5uyt12k1ENQpY=";
+ const kHashedFieldOrderName = "KcpgS5iaiD5/3BKdQRG5rodz+aEE9FkcTPTYZ+G7cpA=";
+ const kHashedFieldPrice = "LiAftyHzrbrVhwtTPaiHd8Lu9gUILkWgcP682amX7lI=";
+ const kHashedFieldMaxPrice = "lFzklZZ6KbbYMBTi8KtTTp1GZCcPaUKUmOe3iko+IF8=";
+ const kHashedFieldRole = "SGZr91N1v3SFufKI5ww9WSZ4krOXKRpxpS+QshHwyUk=";
+
+ db.test.aggregate([{
+ $lookup: {
+ from: "otherColl",
+ let: { order_name: "$a", price: "$price"},
+ pipeline: [{
+ $match: {
+ $expr: {
+ $and: [
+ { $eq: ["$a", "$$order_name"] },
+ { $lte: ["$$price", "$$max_price"] }
+ ]
+ }
+ }
+ }],
+ as: "my_output"
+ }},
+ {
+ $match: {$expr: {$eq: ["$role", "$$USER_ROLES.role"]}}
+ }], {let: {max_price: 3.00}}).toArray();
+ const stats = getQueryStatsAggCmd(admin, {transformIdentifiers: true});
+
+ assert.eq(3, stats.length);
+ const key = stats[0].key;
+ verifyConsistentFields(key);
+ assert.eq(
+ [
+ {
+ "$lookup": {
+ "from": `${kHashedOtherCollName}`,
+ "as": `${kHashedAsOutputName}`,
+ "let": {
+ [kHashedFieldOrderName]: asFieldPath(kHashedFieldA),
+ [kHashedFieldPrice]: asFieldPath(kHashedFieldPrice)
+ },
+ "pipeline": [{
+ "$match": {
+ "$expr": {
+ "$and": [
+ {
+ "$eq": [
+ asFieldPath(kHashedFieldA),
+ asVarRef(kHashedFieldOrderName)
+ ],
+ },
+ {
+ "$lte": [
+ asVarRef(kHashedFieldPrice),
+ asVarRef(kHashedFieldMaxPrice)
+ ]
+ }
+ ]
+ }
+ }
+ }]
+ }
+ },
+ {
+ "$match": {
+ "$expr": {
+ "$eq": [
+ asFieldPath(kHashedFieldRole),
+ asVarRef("USER_ROLES." + kHashedFieldRole)
+ ]
+ }
+ }
+ }
+ ],
+ key.queryShape.pipeline,
+ key.queryShape.pipeline);
+ assert.eq({[kHashedFieldMaxPrice]: "?number"}, key.queryShape.let);
+ assert.eq([{"db": `${kHashedDbName}`, "coll": `${kHashedOtherCollName}`}], key.otherNss);
+ }
+}
+
+const conn = MongoRunner.runMongod({
+ setParameter: {
+ internalQueryStatsRateLimit: -1,
+ }
+});
+runTest(conn);
+MongoRunner.stopMongod(conn);
+
+const st = new ShardingTest({
+ mongos: 1,
+ shards: 1,
+ config: 1,
+ rs: {nodes: 1},
+ mongosOptions: {
+ setParameter: {
+ internalQueryStatsRateLimit: -1,
+ 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
+ }
+ },
+});
+runTest(st.s);
+st.stop();
+}());
diff --git a/jstests/noPassthrough/queryStats/application_name_find.js b/jstests/noPassthrough/queryStats/application_name_find.js
new file mode 100644
index 0000000000000..29e887391a490
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/application_name_find.js
@@ -0,0 +1,39 @@
+/**
+ * Test that applicationName and namespace appear in queryStats for the find command.
+ * @tags: [featureFlagQueryStats]
+ */
+load("jstests/libs/query_stats_utils.js");
+(function() {
+"use strict";
+
+const kApplicationName = "MongoDB Shell";
+const kHashedCollName = "w6Ax20mVkbJu4wQWAMjL8Sl+DfXAr2Zqdc3kJRB7Oo0=";
+const kHashedFieldName = "lU7Z0mLRPRUL+RfAD5jhYPRRpXBsZBxS/20EzDwfOG4=";
+
+// Turn on the collecting of queryStats metrics.
+let options = {
+ setParameter: {internalQueryStatsRateLimit: -1},
+};
+
+const conn = MongoRunner.runMongod(options);
+conn.setLogLevel(3, "query");
+const testDB = conn.getDB('test');
+var coll = testDB[jsTestName()];
+coll.drop();
+
+coll.insert({v: 1});
+coll.insert({v: 2});
+coll.insert({v: 3});
+
+coll.find({v: 1}).toArray();
+
+let queryStats = getQueryStats(conn);
+assert.eq(1, queryStats.length, queryStats);
+assert.eq(kApplicationName, queryStats[0].key.client.application.name, queryStats);
+
+queryStats = getQueryStatsFindCmd(conn, {transformIdentifiers: true});
+assert.eq(1, queryStats.length, queryStats);
+assert.eq(kApplicationName, queryStats[0].key.client.application.name, queryStats);
+
+MongoRunner.stopMongod(conn);
+}());
diff --git a/jstests/noPassthrough/queryStats/clear_query_stats_store.js b/jstests/noPassthrough/queryStats/clear_query_stats_store.js
new file mode 100644
index 0000000000000..4cdf67ebd99e3
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/clear_query_stats_store.js
@@ -0,0 +1,43 @@
+/**
+ * Test that the telemetry store can be cleared when the cache size is reset to 0.
+ * @tags: [featureFlagQueryStats]
+ */
+load("jstests/libs/query_stats_utils.js"); // For verifyMetrics.
+
+(function() {
+"use strict";
+
+// Turn on the collecting of telemetry metrics.
+let options = {
+ setParameter: {internalQueryStatsRateLimit: -1, internalQueryStatsCacheSize: "10MB"},
+};
+
+const conn = MongoRunner.runMongod(options);
+const testDB = conn.getDB('test');
+var coll = testDB[jsTestName()];
+coll.drop();
+
+let query = {};
+for (var j = 0; j < 10; ++j) {
+ query["foo.field.xyz." + j] = 1;
+ query["bar.field.xyz." + j] = 2;
+ query["baz.field.xyz." + j] = 3;
+ coll.aggregate([{$match: query}]).itcount();
+}
+
+// Confirm number of entries in the store and that none have been evicted.
+let telemetryResults = testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]).toArray();
+assert.eq(telemetryResults.length, 10, telemetryResults);
+assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 0);
+
+// Command to clear the cache.
+assert.commandWorked(testDB.adminCommand({setParameter: 1, internalQueryStatsCacheSize: "0MB"}));
+
+// 10 regular queries plus the $queryStats query, means 11 entries evicted when the cache is
+// cleared.
+assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 11);
+
+// Calling $queryStats should fail when the telemetry store size is 0 bytes.
+assert.throwsWithCode(() => testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]), 6579000);
+MongoRunner.stopMongod(conn);
+}());
diff --git a/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js b/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js
new file mode 100644
index 0000000000000..811ae2039ed19
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js
@@ -0,0 +1,120 @@
+/**
+ * Test the $queryStats hmac properties.
+ * @tags: [featureFlagQueryStats]
+ */
+
+load("jstests/aggregation/extras/utils.js"); // For assertAdminDBErrCodeAndErrMsgContains.
+load("jstests/libs/query_stats_utils.js");
+
+(function() {
+"use strict";
+
+// Assert the expected queryStats key with no hmac.
+function assertQueryStatsKeyWithoutHmac(queryStatsKey) {
+ assert.eq(queryStatsKey.filter, {"foo": {"$lte": "?number"}});
+ assert.eq(queryStatsKey.sort, {"bar": -1});
+ assert.eq(queryStatsKey.limit, "?number");
+}
+
+function runTest(conn) {
+ const testDB = conn.getDB('test');
+ var coll = testDB[jsTestName()];
+ coll.drop();
+
+ coll.insert({foo: 1});
+ coll.find({foo: {$lte: 2}}).sort({bar: -1}).limit(2).toArray();
+ // Default is no hmac.
+ assertQueryStatsKeyWithoutHmac(getQueryStatsFindCmd(conn)[0].key.queryShape);
+
+ // Turning on hmac should apply hmac to all field names on all entries, even previously cached
+ // ones.
+ const queryStatsKey = getQueryStatsFindCmd(conn, {transformIdentifiers: true})[0]["key"];
+ assert.eq(queryStatsKey.queryShape.filter,
+ {"fNWkKfogMv6MJ77LpBcuPrO7Nq+R+7TqtD+Lgu3Umc4=": {"$lte": "?number"}});
+ assert.eq(queryStatsKey.queryShape.sort, {"CDDQIXZmDehLKmQcRxtdOQjMqoNqfI2nGt2r4CgJ52o=": -1});
+ assert.eq(queryStatsKey.queryShape.limit, "?number");
+
+ // Turning hmac back off should preserve field names on all entries, even previously cached
+ // ones.
+ const queryStats = getQueryStats(conn)[1]["key"];
+ assertQueryStatsKeyWithoutHmac(queryStats.queryShape);
+
+ // Explicitly set transformIdentifiers to false.
+ assertQueryStatsKeyWithoutHmac(
+ getQueryStatsFindCmd(conn, {transformIdentifiers: false})[0]["key"].queryShape);
+
+ // Wrong parameter name throws error.
+ let pipeline = [{$queryStats: {redactFields: true}}];
+ assertAdminDBErrCodeAndErrMsgContains(
+ coll, pipeline, 40415, "BSON field '$queryStats.redactFields' is an unknown field.");
+
+ // Wrong parameter name throws error.
+ pipeline = [{$queryStats: {algorithm: "hmac-sha-256"}}];
+ assertAdminDBErrCodeAndErrMsgContains(
+ coll, pipeline, 40415, "BSON field '$queryStats.algorithm' is an unknown field.");
+
+ // Wrong parameter type throws error.
+ pipeline = [{$queryStats: {transformIdentifiers: {algorithm: 1}}}];
+ assertAdminDBErrCodeAndErrMsgContains(
+ coll,
+ pipeline,
+ ErrorCodes.TypeMismatch,
+ "BSON field '$queryStats.transformIdentifiers.algorithm' is the wrong type 'double', expected type 'string'");
+
+ pipeline = [{$queryStats: {transformIdentifiers: {algorithm: "hmac-sha-256", hmacKey: 1}}}];
+ assertAdminDBErrCodeAndErrMsgContains(
+ coll,
+ pipeline,
+ ErrorCodes.TypeMismatch,
+ "BSON field '$queryStats.transformIdentifiers.hmacKey' is the wrong type 'double', expected type 'binData'");
+
+ // Unsupported algorithm throws error.
+ pipeline = [{$queryStats: {transformIdentifiers: {algorithm: "hmac-sha-1"}}}];
+ assertAdminDBErrCodeAndErrMsgContains(
+ coll,
+ pipeline,
+ ErrorCodes.BadValue,
+ "Enumeration value 'hmac-sha-1' for field '$queryStats.transformIdentifiers.algorithm' is not a valid value.");
+
+ // TransformIdentifiers with missing algorithm throws error.
+ pipeline = [{$queryStats: {transformIdentifiers: {}}}];
+ assertAdminDBErrCodeAndErrMsgContains(
+ coll,
+ pipeline,
+ 40414,
+ "BSON field '$queryStats.transformIdentifiers.algorithm' is missing but a required field");
+
+ // Parameter object with unrecognized key throws error.
+ pipeline =
+ [{$queryStats: {transformIdentifiers: {algorithm: "hmac-sha-256", hmacStrategy: "on"}}}];
+ assertAdminDBErrCodeAndErrMsgContains(
+ coll,
+ pipeline,
+ 40415,
+ "BSON field '$queryStats.transformIdentifiers.hmacStrategy' is an unknown field.");
+}
+
+const conn = MongoRunner.runMongod({
+ setParameter: {
+ internalQueryStatsRateLimit: -1,
+ featureFlagQueryStats: true,
+ }
+});
+runTest(conn);
+MongoRunner.stopMongod(conn);
+
+const st = new ShardingTest({
+ mongos: 1,
+ shards: 1,
+ config: 1,
+ rs: {nodes: 1},
+ mongosOptions: {
+ setParameter: {
+ internalQueryStatsRateLimit: -1,
+ 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
+ }
+ },
+});
+runTest(st.s);
+st.stop();
+}());
diff --git a/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js b/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js
new file mode 100644
index 0000000000000..8eed4aa836cb8
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js
@@ -0,0 +1,49 @@
+/**
+ * Test that calls to read from telemetry store fail when feature flag is turned off and sampling
+ * rate > 0.
+ */
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
+// Set sampling rate to -1.
+let options = {
+ setParameter: {internalQueryStatsRateLimit: -1},
+};
+const conn = MongoRunner.runMongod(options);
+const testdb = conn.getDB('test');
+
+// This test specifically tests error handling when the feature flag is not on.
+// TODO SERVER-65800 This test can be deleted when the feature is on by default.
+if (!conn || FeatureFlagUtil.isEnabled(testdb, "QueryStats")) {
+ jsTestLog(`Skipping test since feature flag is disabled. conn: ${conn}`);
+ if (conn) {
+ MongoRunner.stopMongod(conn);
+ }
+ quit();
+}
+
+var coll = testdb[jsTestName()];
+coll.drop();
+
+// Bulk insert documents to reduces roundtrips and make timeout on a slow machine less likely.
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 1; i <= 20; i++) {
+ bulk.insert({foo: 0, bar: Math.floor(Math.random() * 3)});
+}
+assert.commandWorked(bulk.execute());
+
+// Pipeline to read telemetry store should fail without feature flag turned on even though sampling
+// rate is > 0.
+assert.commandFailedWithCode(
+ testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}),
+ ErrorCodes.QueryFeatureNotAllowed);
+
+// Pipeline, with a filter, to read telemetry store fails without feature flag turned on even though
+// sampling rate is > 0.
+assert.commandFailedWithCode(testdb.adminCommand({
+ aggregate: 1,
+ pipeline: [{$queryStats: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}],
+ cursor: {}
+}),
+ ErrorCodes.QueryFeatureNotAllowed);
+
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/queryStats/find_cmd_one_way_tokenization.js b/jstests/noPassthrough/queryStats/find_cmd_one_way_tokenization.js
new file mode 100644
index 0000000000000..f304eca8c228d
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/find_cmd_one_way_tokenization.js
@@ -0,0 +1,69 @@
+/**
+ * Test that $queryStats properly tokenizes find commands, on mongod and mongos.
+ */
+load("jstests/libs/query_stats_utils.js");
+(function() {
+"use strict";
+
+const kHashedCollName = "w6Ax20mVkbJu4wQWAMjL8Sl+DfXAr2Zqdc3kJRB7Oo0=";
+const kHashedFieldName = "lU7Z0mLRPRUL+RfAD5jhYPRRpXBsZBxS/20EzDwfOG4=";
+
+function runTest(conn) {
+ const db = conn.getDB("test");
+ const admin = conn.getDB("admin");
+
+ db.test.drop();
+ db.test.insert({v: 1});
+
+ db.test.find({v: 1}).toArray();
+
+ let queryStats = getQueryStatsFindCmd(admin, {transformIdentifiers: true});
+
+ assert.eq(1, queryStats.length);
+ assert.eq("find", queryStats[0].key.queryShape.command);
+ assert.eq({[kHashedFieldName]: {$eq: "?number"}}, queryStats[0].key.queryShape.filter);
+
+ db.test.insert({v: 2});
+
+ const cursor = db.test.find({v: {$gt: 0, $lt: 3}}).batchSize(1);
+ queryStats = getQueryStatsFindCmd(admin, {transformIdentifiers: true});
+ // Cursor isn't exhausted, so there shouldn't be another entry yet.
+ assert.eq(1, queryStats.length);
+
+ assert.commandWorked(
+ db.runCommand({getMore: cursor.getId(), collection: db.test.getName(), batchSize: 2}));
+
+ queryStats = getQueryStatsFindCmd(admin, {transformIdentifiers: true});
+ assert.eq(2, queryStats.length);
+ assert.eq("find", queryStats[1].key.queryShape.command);
+ assert.eq({
+ "$and": [{[kHashedFieldName]: {"$gt": "?number"}}, {[kHashedFieldName]: {"$lt": "?number"}}]
+ },
+ queryStats[1].key.queryShape.filter);
+}
+
+const conn = MongoRunner.runMongod({
+ setParameter: {
+ internalQueryStatsRateLimit: -1,
+ featureFlagQueryStats: true,
+ }
+});
+runTest(conn);
+MongoRunner.stopMongod(conn);
+
+const st = new ShardingTest({
+ mongos: 1,
+ shards: 1,
+ config: 1,
+ rs: {nodes: 1},
+ mongosOptions: {
+ setParameter: {
+ internalQueryStatsRateLimit: -1,
+ featureFlagQueryStats: true,
+ 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
+ }
+ },
+});
+runTest(st.s);
+st.stop();
+}());
diff --git a/jstests/noPassthrough/queryStats/geometry_without_coordinates.js b/jstests/noPassthrough/queryStats/geometry_without_coordinates.js
new file mode 100644
index 0000000000000..98ba6ec40aa07
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/geometry_without_coordinates.js
@@ -0,0 +1,22 @@
+// This test was designed to reproduce SERVER-77430. There was a mistaken assertion in a parser that
+// we are interested in proving will not fail here.
+// @tags: [featureFlagQueryStats]
+(function() {
+"use strict";
+
+const st = new ShardingTest({
+ mongos: 1,
+ shards: 1,
+ config: 1,
+ rs: {nodes: 1},
+ mongosOptions: {
+ setParameter: {
+ internalQueryStatsRateLimit: -1,
+ }
+ },
+});
+const coll = st.s.getDB("test").geometry_without_coordinates;
+// This is a query that once mistakenly threw an error.
+assert.doesNotThrow(() => coll.find({geo: {$geoIntersects: {$geometry: {x: 40, y: 5}}}}).itcount());
+st.stop();
+}());
diff --git a/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js b/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js
new file mode 100644
index 0000000000000..78297e416daf6
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js
@@ -0,0 +1,308 @@
+/**
+ * Test that mongos is collecting query stats metrics.
+ * @tags: [featureFlagQueryStats]
+ */
+
+load('jstests/libs/query_stats_utils.js');
+
+(function() {
+"use strict";
+
+const setup = () => {
+ const st = new ShardingTest({
+ mongos: 1,
+ shards: 1,
+ config: 1,
+ rs: {nodes: 1},
+ mongosOptions: {
+ setParameter: {
+ internalQueryStatsRateLimit: -1,
+ 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
+ }
+ },
+ });
+ const mongos = st.s;
+ const db = mongos.getDB("test");
+ const coll = db.coll;
+ coll.insert({v: 1});
+ coll.insert({v: 4});
+ return st;
+};
+
+const assertExpectedResults = (results,
+ expectedQueryStatsKey,
+ expectedExecCount,
+ expectedDocsReturnedSum,
+ expectedDocsReturnedMax,
+ expectedDocsReturnedMin,
+ expectedDocsReturnedSumOfSq,
+ getMores) => {
+ const {key, metrics} = results;
+ confirmAllExpectedFieldsPresent(expectedQueryStatsKey, key);
+ assert.eq(expectedExecCount, metrics.execCount);
+ assert.docEq({
+ sum: NumberLong(expectedDocsReturnedSum),
+ max: NumberLong(expectedDocsReturnedMax),
+ min: NumberLong(expectedDocsReturnedMin),
+ sumOfSquares: NumberLong(expectedDocsReturnedSumOfSq)
+ },
+ metrics.docsReturned);
+
+ const {
+ firstSeenTimestamp,
+ latestSeenTimestamp,
+ lastExecutionMicros,
+ totalExecMicros,
+ firstResponseExecMicros
+ } = metrics;
+
+ // This test can't predict exact timings, so just assert these three fields have been set (are
+ // non-zero).
+ assert.neq(lastExecutionMicros, NumberLong(0));
+ assert.neq(firstSeenTimestamp.getTime(), 0);
+ assert.neq(latestSeenTimestamp.getTime(), 0);
+
+ const distributionFields = ['sum', 'max', 'min', 'sumOfSquares'];
+ for (const field of distributionFields) {
+ assert.neq(totalExecMicros[field], NumberLong(0));
+ assert.neq(firstResponseExecMicros[field], NumberLong(0));
+ if (getMores) {
+ // If there are getMore calls, totalExecMicros fields should be greater than or equal to
+ // firstResponseExecMicros.
+ if (field == 'min' || field == 'max') {
+ // In the case that we've executed multiple queries with the same shape, it is
+ // possible for the min or max to be equal.
+ assert.gte(totalExecMicros[field], firstResponseExecMicros[field]);
+ } else {
+ assert.gt(totalExecMicros[field], firstResponseExecMicros[field]);
+ }
+ } else {
+ // If there are no getMore calls, totalExecMicros fields should be equal to
+ // firstResponseExecMicros.
+ assert.eq(totalExecMicros[field], firstResponseExecMicros[field]);
+ }
+ }
+};
+
+// Assert that, for find queries, no query stats results are written until a cursor has reached
+// exhaustion; ensure accurate results once they're written.
+{
+ const st = setup();
+ const db = st.s.getDB("test");
+ const collName = "coll";
+ const coll = db[collName];
+
+ const queryStatsKey = {
+ queryShape: {
+ cmdNs: {db: "test", coll: "coll"},
+ command: "find",
+ filter: {$and: [{v: {$gt: "?number"}}, {v: {$lt: "?number"}}]},
+ },
+ readConcern: {level: "local", provenance: "implicitDefault"},
+ batchSize: "?number",
+ client: {application: {name: "MongoDB Shell"}}
+ };
+
+ const cursor = coll.find({v: {$gt: 0, $lt: 5}}).batchSize(1); // returns 1 doc
+
+ // Since the cursor hasn't been exhausted yet, ensure no query stats results have been written
+ // yet.
+ let queryStats = getQueryStats(db);
+ assert.eq(0, queryStats.length, queryStats);
+
+ // Run a getMore to exhaust the cursor, then ensure query stats results have been written
+ // accurately. batchSize must be 2 so the cursor recognizes exhaustion.
+ assert.commandWorked(db.runCommand({
+ getMore: cursor.getId(),
+ collection: coll.getName(),
+ batchSize: 2
+ })); // returns 1 doc, exhausts the cursor
+ queryStats = getQueryStatsFindCmd(db);
+ assert.eq(1, queryStats.length, queryStats);
+ assertExpectedResults(queryStats[0],
+ queryStatsKey,
+ /* expectedExecCount */ 1,
+ /* expectedDocsReturnedSum */ 2,
+ /* expectedDocsReturnedMax */ 2,
+ /* expectedDocsReturnedMin */ 2,
+ /* expectedDocsReturnedSumOfSq */ 4,
+ /* getMores */ true);
+
+ // Run more queries (to exhaustion) with the same query shape, and ensure query stats results
+ // are accurate.
+ coll.find({v: {$gt: 2, $lt: 3}}).batchSize(10).toArray(); // returns 0 docs
+ coll.find({v: {$gt: 0, $lt: 1}}).batchSize(10).toArray(); // returns 0 docs
+ coll.find({v: {$gt: 0, $lt: 2}}).batchSize(10).toArray(); // return 1 doc
+ queryStats = getQueryStatsFindCmd(db);
+ assert.eq(1, queryStats.length, queryStats);
+ assertExpectedResults(queryStats[0],
+ queryStatsKey,
+ /* expectedExecCount */ 4,
+ /* expectedDocsReturnedSum */ 3,
+ /* expectedDocsReturnedMax */ 2,
+ /* expectedDocsReturnedMin */ 0,
+ /* expectedDocsReturnedSumOfSq */ 5,
+ /* getMores */ true);
+
+ st.stop();
+}
+
+// Assert that, for agg queries, no query stats results are written until a cursor has reached
+// exhaustion; ensure accurate results once they're written.
+{
+ const st = setup();
+ const db = st.s.getDB("test");
+ const coll = db.coll;
+
+ const queryStatsKey = {
+ queryShape: {
+ cmdNs: {db: "test", coll: "coll"},
+ command: "aggregate",
+ pipeline: [
+ {$match: {$and: [{v: {$gt: "?number"}}, {v: {$lt: "?number"}}]}},
+ {$project: {_id: true, hello: true}}
+ ]
+
+ },
+ cursor: {batchSize: "?number"},
+ applicationName: "MongoDB Shell",
+ };
+
+ const cursor = coll.aggregate(
+ [
+ {$match: {v: {$gt: 0, $lt: 5}}},
+ {$project: {hello: true}},
+ ],
+ {cursor: {batchSize: 1}}); // returns 1 doc
+
+ // Since the cursor hasn't been exhausted yet, ensure no query stats results have been written
+ // yet.
+ let queryStats = getQueryStats(db);
+ assert.eq(0, queryStats.length, queryStats);
+
+ // Run a getMore to exhaust the cursor, then ensure query stats results have been written
+ // accurately. batchSize must be 2 so the cursor recognizes exhaustion.
+ assert.commandWorked(db.runCommand({
+ getMore: cursor.getId(),
+ collection: coll.getName(),
+ batchSize: 2
+ })); // returns 1 doc, exhausts the cursor
+ queryStats = getQueryStatsAggCmd(db);
+ assert.eq(1, queryStats.length, queryStats);
+ assertExpectedResults(queryStats[0],
+ queryStatsKey,
+ /* expectedExecCount */ 1,
+ /* expectedDocsReturnedSum */ 2,
+ /* expectedDocsReturnedMax */ 2,
+ /* expectedDocsReturnedMin */ 2,
+ /* expectedDocsReturnedSumOfSq */ 4,
+ /* getMores */ true);
+
+ // Run more queries (to exhaustion) with the same query shape, and ensure query stats results
+ // are accurate.
+ coll.aggregate([
+ {$match: {v: {$gt: 0, $lt: 5}}},
+ {$project: {hello: true}},
+ ]); // returns 2 docs
+ coll.aggregate([
+ {$match: {v: {$gt: 2, $lt: 3}}},
+ {$project: {hello: true}},
+ ]); // returns 0 docs
+ coll.aggregate([
+ {$match: {v: {$gt: 0, $lt: 2}}},
+ {$project: {hello: true}},
+ ]); // returns 1 doc
+ queryStats = getQueryStatsAggCmd(db);
+ assert.eq(1, queryStats.length, queryStats);
+ assertExpectedResults(queryStats[0],
+ queryStatsKey,
+ /* expectedExecCount */ 4,
+ /* expectedDocsReturnedSum */ 5,
+ /* expectedDocsReturnedMax */ 2,
+ /* expectedDocsReturnedMin */ 0,
+ /* expectedDocsReturnedSumOfSq */ 9,
+ /* getMores */ true);
+
+ st.stop();
+}
+
+// Assert on batchSize-limited find queries that killCursors will write metrics with partial results
+// to the query stats store.
+{
+ const st = setup();
+ const db = st.s.getDB("test");
+ const collName = "coll";
+ const coll = db[collName];
+
+ const queryStatsKey = {
+ queryShape: {
+ cmdNs: {db: "test", coll: "coll"},
+ command: "find",
+ filter: {$and: [{v: {$gt: "?number"}}, {v: {$lt: "?number"}}]},
+ },
+ readConcern: {level: "local", provenance: "implicitDefault"},
+ batchSize: "?number",
+ client: {application: {name: "MongoDB Shell"}}
+ };
+
+ const cursor1 = coll.find({v: {$gt: 0, $lt: 5}}).batchSize(1); // returns 1 doc
+ const cursor2 = coll.find({v: {$gt: 0, $lt: 2}}).batchSize(1); // returns 1 doc
+
+ assert.commandWorked(
+ db.runCommand({killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]}));
+ const queryStats = getQueryStats(db);
+ assert.eq(1, queryStats.length);
+ assertExpectedResults(queryStats[0],
+ queryStatsKey,
+ /* expectedExecCount */ 2,
+ /* expectedDocsReturnedSum */ 2,
+ /* expectedDocsReturnedMax */ 1,
+ /* expectedDocsReturnedMin */ 1,
+ /* expectedDocsReturnedSumOfSq */ 2,
+ /* getMores */ false);
+ st.stop();
+}
+
+// Assert on batchSize-limited agg queries that killCursors will write metrics with partial results
+// to the query stats store.
+{
+ const st = setup();
+ const db = st.s.getDB("test");
+ const coll = db.coll;
+
+ const queryStatsKey = {
+ queryShape: {
+ cmdNs: {db: "test", coll: "coll"},
+ command: "aggregate",
+ pipeline: [{$match: {$and: [{v: {$gt: "?number"}}, {v: {$lt: "?number"}}]}}]
+ },
+ cursor: {batchSize: "?number"},
+ applicationName: "MongoDB Shell",
+ };
+
+ const cursor1 = coll.aggregate(
+ [
+ {$match: {v: {$gt: 0, $lt: 5}}},
+ ],
+ {cursor: {batchSize: 1}}); // returns 1 doc
+ const cursor2 = coll.aggregate(
+ [
+ {$match: {v: {$gt: 0, $lt: 2}}},
+ ],
+ {cursor: {batchSize: 1}}); // returns 1 doc
+
+ assert.commandWorked(
+ db.runCommand({killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]}));
+ const queryStats = getQueryStats(db);
+ assert.eq(1, queryStats.length);
+ assertExpectedResults(queryStats[0],
+ queryStatsKey,
+ /* expectedExecCount */ 2,
+ /* expectedDocsReturnedSum */ 2,
+ /* expectedDocsReturnedMax */ 1,
+ /* expectedDocsReturnedMin */ 1,
+ /* expectedDocsReturnedSumOfSq */ 2,
+ /* getMores */ false);
+ st.stop();
+}
+}());
diff --git a/jstests/noPassthrough/queryStats/query_stats_collectionType.js b/jstests/noPassthrough/queryStats/query_stats_collectionType.js
new file mode 100644
index 0000000000000..f86e6d11022b6
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/query_stats_collectionType.js
@@ -0,0 +1,111 @@
+/**
+ * Test that collectionType is returned properly in $queryStats.
+ * @tags: [featureFlagQueryStats]
+ */
+load("jstests/libs/query_stats_utils.js");
+(function() {
+"use strict";
+
+function runTest(conn) {
+ const testDB = conn.getDB('test');
+
+ // We create one collection for each corresponding type reported by query stats.
+ assert.commandWorked(testDB.createCollection(jsTestName() + "_collection"));
+ assert.commandWorked(testDB.createView(
+ jsTestName() + "_view", jsTestName() + "_collection", [{$match: {v: {$gt: 42}}}]));
+ assert.commandWorked(
+ testDB.createCollection(jsTestName() + "_timeseries", {timeseries: {timeField: "time"}}));
+
+ // Next we run queries over each of the collection types to generate query stats.
+
+ // Base _collection has a few simple documents.
+ var coll = testDB[jsTestName() + "_collection"];
+ coll.insert({v: 1});
+ coll.insert({v: 2});
+ coll.insert({v: 3});
+ coll.find({v: 3}).toArray();
+ coll.aggregate([]).toArray();
+
+ // View _view is over _collection.
+ coll = testDB[jsTestName() + "_view"];
+ coll.find({v: 5}).toArray();
+ coll.aggregate([{$match: {v: {$lt: 99}}}]).toArray();
+
+ // Timeseries collection _timeseries.
+ coll = testDB[jsTestName() + "_timeseries"];
+ coll.insert({v: 1, time: ISODate("2021-05-18T00:00:00.000Z")});
+ coll.insert({v: 2, time: ISODate("2021-05-18T01:00:00.000Z")});
+ coll.insert({v: 3, time: ISODate("2021-05-18T02:00:00.000Z")});
+ coll.find({v: 6}).toArray();
+ coll.aggregate().toArray();
+ // QueryStats should still be collected for queries run on nonexistent collections.
+ assert.commandWorked(testDB.runCommand({find: jsTestName() + "_nonExistent", filter: {v: 6}}));
+ assert.commandWorked(
+ testDB.runCommand({aggregate: jsTestName() + "_nonExistent", pipeline: [], cursor: {}}));
+
+ // Verify that we have two telemetry entries for the collection type. This assumes we have
+ // executed one find and one agg query for the given collection type.
+ function verifyTelemetryForCollectionType(collectionType) {
+ const telemetry = getQueryStats(conn, {
+ extraMatch: {
+ "key.collectionType": collectionType,
+ "key.queryShape.cmdNs.coll": jsTestName() + "_" + collectionType
+ }
+ });
+ // We should see one entry for find() and one for aggregate()
+ // for each collection type. The queries account for the fact
+ // that find() queries over views are rewritten to
+ // aggregate(). Ie, the query shapes are different because the
+ // queries are different.
+ assert.eq(2, telemetry.length, "Expected result for collection type " + collectionType);
+ }
+
+ verifyTelemetryForCollectionType("collection");
+ verifyTelemetryForCollectionType("view");
+ verifyTelemetryForCollectionType("timeseries");
+ verifyTelemetryForCollectionType("nonExistent");
+
+ // Verify that, for views, we capture the original query before it's rewritten. The view would
+ // include a $gt predicate on 'v'.
+ const findOnViewShape =
+ getQueryStats(
+ conn, {extraMatch: {"key.collectionType": "view", "key.queryShape.command": "find"}})[0]
+ .key.queryShape;
+ assert.eq(findOnViewShape.filter, {"v": {"$eq": "?number"}});
+
+ const aggOnViewShape =
+ getQueryStats(
+ conn,
+ {extraMatch: {"key.collectionType": "view", "key.queryShape.command": "aggregate"}})[0]
+ .key.queryShape;
+ assert.eq(aggOnViewShape.pipeline, [{"$match": {"v": {"$lt": "?number"}}}]);
+}
+
+const conn = MongoRunner.runMongod({
+ setParameter: {
+ internalQueryStatsRateLimit: -1,
+ featureFlagQueryStats: true,
+ }
+});
+runTest(conn);
+MongoRunner.stopMongod(conn);
+
+// TODO Implement this in SERVER-76263.
+if (false) {
+ const st = new ShardingTest({
+ mongos: 1,
+ shards: 1,
+ config: 1,
+ rs: {nodes: 1},
+ mongosOptions: {
+ setParameter: {
+ internalQueryStatsSamplingRate: -1,
+ featureFlagQueryStats: true,
+ 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
+ }
+ },
+ });
+ runTest(st.s);
+ st.stop();
+}
+}());
diff --git a/jstests/noPassthrough/queryStats/query_stats_feature_flag.js b/jstests/noPassthrough/queryStats/query_stats_feature_flag.js
new file mode 100644
index 0000000000000..59043687ea49e
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/query_stats_feature_flag.js
@@ -0,0 +1,29 @@
+/**
+ * Test that calls to read from telemetry store fail when feature flag is turned off.
+ */
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
+// This test specifically tests error handling when the feature flag is not on.
+// TODO SERVER-65800 this test can be removed when the feature flag is removed.
+const conn = MongoRunner.runMongod();
+const testDB = conn.getDB('test');
+if (FeatureFlagUtil.isEnabled(testDB, "QueryStats")) {
+ jsTestLog("Skipping test since query stats are enabled.");
+ MongoRunner.stopMongod(conn);
+ quit();
+}
+
+// Pipeline to read telemetry store should fail without feature flag turned on.
+assert.commandFailedWithCode(
+ testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}),
+ ErrorCodes.QueryFeatureNotAllowed);
+
+// Pipeline, with a filter, to read telemetry store fails without feature flag turned on.
+assert.commandFailedWithCode(testDB.adminCommand({
+ aggregate: 1,
+ pipeline: [{$queryStats: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}],
+ cursor: {}
+}),
+ ErrorCodes.QueryFeatureNotAllowed);
+
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/queryStats/query_stats_key.js b/jstests/noPassthrough/queryStats/query_stats_key.js
new file mode 100644
index 0000000000000..f4905e00e0b67
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/query_stats_key.js
@@ -0,0 +1,141 @@
+/**
+ * This test confirms that telemetry store key fields are properly nested and none are missing.
+ * @tags: [featureFlagQueryStats]
+ */
+load("jstests/libs/query_stats_utils.js");
+(function() {
+"use strict";
+
+function confirmAllMetaFieldsPresent(clientSubObj) {
+ const kApplicationName = "MongoDB Shell";
+ assert.eq(clientSubObj.application.name, kApplicationName);
+
+ {
+ assert(clientSubObj.hasOwnProperty('driver'), clientSubObj);
+ assert(clientSubObj.driver.hasOwnProperty("name"), clientSubObj);
+ assert(clientSubObj.driver.hasOwnProperty("version"), clientSubObj);
+ }
+
+ {
+ assert(clientSubObj.hasOwnProperty('os'), clientSubObj);
+ assert(clientSubObj.os.hasOwnProperty("type"), clientSubObj);
+ assert(clientSubObj.os.hasOwnProperty("name"), clientSubObj);
+ assert(clientSubObj.os.hasOwnProperty("architecture"), clientSubObj);
+ assert(clientSubObj.os.hasOwnProperty("version"), clientSubObj);
+ }
+}
+
+function confirmAllFieldsPresent(queryStatsEntries) {
+ const queryShapeFindFields = [
+ "cmdNs",
+ "command",
+ "filter",
+ "sort",
+ "projection",
+ "hint",
+ "skip",
+ "limit",
+ "singleBatch",
+ "max",
+ "min",
+ "returnKey",
+ "showRecordId",
+ "tailable",
+ "oplogReplay",
+ "awaitData",
+ "collation",
+ "allowDiskUse",
+ "let"
+ ];
+
+ // The outer fields not nested inside queryShape.
+ const queryStatsKeyFields = [
+ "queryShape",
+ "batchSize",
+ "comment",
+ "maxTimeMS",
+ "noCursorTimeout",
+ "readConcern",
+ "allowPartialResults",
+ "apiDeprecationErrors",
+ "apiVersion",
+ "apiStrict",
+ "collectionType",
+ "client"
+ ];
+
+ for (const entry of queryStatsEntries) {
+ let fieldCounter = 0;
+ assert.eq(entry.key.queryShape.command, "find");
+ confirmAllMetaFieldsPresent(entry.key.client);
+
+ for (const field in entry.key.queryShape) {
+ assert(queryShapeFindFields.includes(field));
+ fieldCounter++;
+ }
+ assert.eq(fieldCounter, queryShapeFindFields.length);
+
+ fieldCounter = 0;
+ for (const field in entry.key) {
+ assert(queryStatsKeyFields.includes(field));
+ fieldCounter++;
+ }
+ assert.eq(fieldCounter, queryStatsKeyFields.length, entry.key);
+ }
+}
+
+// Turn on the collecting of telemetry metrics.
+let options = {
+ setParameter: {internalQueryStatsRateLimit: -1},
+};
+
+const conn = MongoRunner.runMongod(options);
+const testDB = conn.getDB('test');
+var coll = testDB[jsTestName()];
+coll.drop();
+
+// Have to create an index for hint not to fail.
+assert.commandWorked(coll.createIndex({v: 1}));
+
+let commandObj = {
+ find: coll.getName(),
+ filter: {v: {$eq: 2}},
+ oplogReplay: true,
+ comment: "this is a test!!",
+ min: {"v": 0},
+ max: {"v": 4},
+ hint: {"v": 1},
+ sort: {a: -1},
+ returnKey: false,
+ noCursorTimeout: true,
+ showRecordId: false,
+ tailable: false,
+ awaitData: false,
+ allowPartialResults: true,
+ skip: 1,
+ limit: 2,
+ maxTimeMS: 500,
+ collation: {locale: "en_US", strength: 2},
+ allowDiskUse: true,
+ readConcern: {level: "local"},
+ batchSize: 2,
+ singleBatch: true,
+ let : {},
+ projection: {_id: 0},
+ apiDeprecationErrors: false,
+ apiVersion: "1",
+ apiStrict: false,
+};
+
+assert.commandWorked(testDB.runCommand(commandObj));
+let telemetry = getQueryStats(conn);
+assert.eq(1, telemetry.length);
+confirmAllFieldsPresent(telemetry);
+
+// $hint can only be string(index name) or object (index spec).
+assert.throwsWithCode(() => {
+ coll.find({v: {$eq: 2}}).hint({'v': 60, $hint: -128}).itcount();
+}, ErrorCodes.FailedToParse);
+
+MongoRunner.stopMongod(conn);
+}());
diff --git a/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js b/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js
new file mode 100644
index 0000000000000..aa6a679d5d94a
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js
@@ -0,0 +1,174 @@
+/**
+ * Test that the queryStats metrics are aggregated properly by distinct query shape over getMore
+ * calls.
+ * @tags: [featureFlagQueryStats]
+ */
+load("jstests/libs/query_stats_utils.js"); // For verifyMetrics and getQueryStatsAggCmd.
+
+(function() {
+"use strict";
+
+// Turn on the collecting of queryStats metrics.
+let options = {
+ setParameter: {internalQueryStatsRateLimit: -1},
+};
+
+const conn = MongoRunner.runMongod(options);
+const testDB = conn.getDB('test');
+var coll = testDB[jsTestName()];
+coll.drop();
+
+// Bulk insert documents to reduces roundtrips and make timeout on a slow machine less likely.
+const bulk = coll.initializeUnorderedBulkOp();
+const numDocs = 100;
+for (let i = 0; i < numDocs / 2; ++i) {
+ bulk.insert({foo: 0, bar: Math.floor(Math.random() * 3)});
+ bulk.insert({foo: 1, bar: Math.floor(Math.random() * -2)});
+}
+assert.commandWorked(bulk.execute());
+// Assert that two queries with identical structures are represented by the same key.
+{
+ // Note: toArray() is necessary for the batchSize-limited query to run to cursor exhaustion
+ // (when it writes to the queryStats store).
+ coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}}).toArray();
+ coll.aggregate([{$match: {foo: 0}}], {cursor: {batchSize: 2}}).toArray();
+
+ // This command will return all queryStats store entires.
+ const queryStatsResults = getQueryStatsAggCmd(testDB);
+ // Assert there is only one entry.
+ assert.eq(queryStatsResults.length, 1, queryStatsResults);
+ const queryStatsEntry = queryStatsResults[0];
+ jsTestLog(queryStatsEntry);
+ assert.eq(queryStatsEntry.key.queryShape.cmdNs.db, "test");
+ assert.eq(queryStatsEntry.key.queryShape.cmdNs.coll, jsTestName());
+ assert.eq(queryStatsEntry.key.client.application.name, "MongoDB Shell");
+
+ // Assert we update execution count for identically shaped queries.
+ assert.eq(queryStatsEntry.metrics.execCount, 2);
+
+ // Assert queryStats values are accurate for the two above queries.
+ assert.eq(queryStatsEntry.metrics.docsReturned.sum, numDocs);
+ assert.eq(queryStatsEntry.metrics.docsReturned.min, numDocs / 2);
+ assert.eq(queryStatsEntry.metrics.docsReturned.max, numDocs / 2);
+
+ verifyMetrics(queryStatsResults);
+}
+
+const fooEqBatchSize = 5;
+const fooNeBatchSize = 3;
+// Assert on batchSize-limited queries that killCursors will write metrics with partial results to
+// the queryStats store.
+{
+ let cursor1 = coll.find({foo: {$eq: 0}}).batchSize(fooEqBatchSize);
+ let cursor2 = coll.find({foo: {$ne: 0}}).batchSize(fooNeBatchSize);
+ // Issue one getMore for the first query, so 2 * fooEqBatchSize documents are returned total.
+ assert.commandWorked(testDB.runCommand(
+ {getMore: cursor1.getId(), collection: coll.getName(), batchSize: fooEqBatchSize}));
+
+ // Kill both cursors so the queryStats metrics are stored.
+ assert.commandWorked(testDB.runCommand(
+ {killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]}));
+
+ // This filters queryStats entires to just the ones entered when running above find queries.
+ const queryStatsResults = testDB.getSiblingDB("admin")
+ .aggregate([
+ {$queryStats: {}},
+ {$match: {"key.queryShape.filter.foo": {$exists: true}}},
+ {$sort: {key: 1}},
+ ])
+ .toArray();
+ assert.eq(queryStatsResults.length, 2, queryStatsResults);
+ assert.eq(queryStatsResults[0].key.queryShape.cmdNs.db, "test");
+ assert.eq(queryStatsResults[0].key.queryShape.cmdNs.coll, jsTestName());
+ assert.eq(queryStatsResults[0].key.client.application.name, "MongoDB Shell");
+ assert.eq(queryStatsResults[1].key.queryShape.cmdNs.db, "test");
+ assert.eq(queryStatsResults[1].key.queryShape.cmdNs.coll, jsTestName());
+ assert.eq(queryStatsResults[1].key.client.application.name, "MongoDB Shell");
+
+ assert.eq(queryStatsResults[0].metrics.execCount, 1);
+ assert.eq(queryStatsResults[1].metrics.execCount, 1);
+ assert.eq(queryStatsResults[0].metrics.docsReturned.sum, fooEqBatchSize * 2);
+ assert.eq(queryStatsResults[1].metrics.docsReturned.sum, fooNeBatchSize);
+
+ verifyMetrics(queryStatsResults);
+
+ const distributionFields = ['sum', 'max', 'min', 'sumOfSquares'];
+ for (const field of distributionFields) {
+ // If there are getMore calls, queryExecMicros should be greater than or equal to
+ // firstResponseExecMicros.
+ assert.gt(queryStatsResults[0].metrics.totalExecMicros[field],
+ queryStatsResults[0].metrics.firstResponseExecMicros[field]);
+
+ // If there is no getMore calls, firstResponseExecMicros and queryExecMicros should be
+ // equal.
+ assert.eq(queryStatsResults[1].metrics.totalExecMicros[field],
+ queryStatsResults[1].metrics.firstResponseExecMicros[field]);
+ }
+}
+
+// Assert that options such as limit/sort create different keys, and that repeating a query shape
+// ({foo: {$eq}}) aggregates metrics across executions.
+{
+ const query2Limit = 50;
+ coll.find({foo: {$eq: 0}}).batchSize(2).toArray();
+ coll.find({foo: {$eq: 1}}).limit(query2Limit).batchSize(2).toArray();
+ coll.find().sort({"foo": 1}).batchSize(2).toArray();
+ // This filters queryStats entires to just the ones entered when running above find queries.
+ let queryStatsResults =
+ testDB.getSiblingDB("admin")
+ .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.command": "find"}}])
+ .toArray();
+ assert.eq(queryStatsResults.length, 4, queryStatsResults);
+
+ verifyMetrics(queryStatsResults);
+
+ // This filters to just the queryStats for query coll.find().sort({"foo": 1}).batchSize(2).
+ queryStatsResults =
+ testDB.getSiblingDB("admin")
+ .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.sort.foo": 1}}])
+ .toArray();
+ assert.eq(queryStatsResults.length, 1, queryStatsResults);
+ assert.eq(queryStatsResults[0].key.queryShape.cmdNs.db, "test");
+ assert.eq(queryStatsResults[0].key.queryShape.cmdNs.coll, jsTestName());
+ assert.eq(queryStatsResults[0].key.client.application.name, "MongoDB Shell");
+ assert.eq(queryStatsResults[0].metrics.execCount, 1);
+ assert.eq(queryStatsResults[0].metrics.docsReturned.sum, numDocs);
+
+ // This filters to just the queryStats for query coll.find({foo: {$eq:
+ // 1}}).limit(query2Limit).batchSize(2).
+ queryStatsResults =
+ testDB.getSiblingDB("admin")
+ .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.limit": '?number'}}])
+ .toArray();
+ assert.eq(queryStatsResults.length, 1, queryStatsResults);
+ assert.eq(queryStatsResults[0].key.queryShape.cmdNs.db, "test");
+ assert.eq(queryStatsResults[0].key.queryShape.cmdNs.coll, jsTestName());
+ assert.eq(queryStatsResults[0].key.client.application.name, "MongoDB Shell");
+ assert.eq(queryStatsResults[0].metrics.execCount, 1);
+ assert.eq(queryStatsResults[0].metrics.docsReturned.sum, query2Limit);
+
+ // This filters to just the queryStats for query coll.find({foo: {$eq: 0}}).batchSize(2).
+ queryStatsResults = testDB.getSiblingDB("admin")
+ .aggregate([
+ {$queryStats: {}},
+ {
+ $match: {
+ "key.queryShape.filter.foo": {$eq: {$eq: "?number"}},
+ "key.queryShape.limit": {$exists: false},
+ "key.queryShape.sort": {$exists: false}
+ }
+ }
+ ])
+ .toArray();
+ assert.eq(queryStatsResults.length, 1, queryStatsResults);
+ assert.eq(queryStatsResults[0].key.queryShape.cmdNs.db, "test");
+ assert.eq(queryStatsResults[0].key.queryShape.cmdNs.coll, jsTestName());
+ assert.eq(queryStatsResults[0].key.client.application.name, "MongoDB Shell");
+ assert.eq(queryStatsResults[0].metrics.execCount, 2);
+ assert.eq(queryStatsResults[0].metrics.docsReturned.sum, numDocs / 2 + 2 * fooEqBatchSize);
+ assert.eq(queryStatsResults[0].metrics.docsReturned.max, numDocs / 2);
+ assert.eq(queryStatsResults[0].metrics.docsReturned.min, 2 * fooEqBatchSize);
+}
+
+MongoRunner.stopMongod(conn);
+}());
diff --git a/jstests/noPassthrough/queryStats/query_stats_regex.js b/jstests/noPassthrough/queryStats/query_stats_regex.js
new file mode 100644
index 0000000000000..b910df94f4edb
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/query_stats_regex.js
@@ -0,0 +1,36 @@
+/**
+ * Test that telemetry works properly for a find command that uses regex.
+ * @tags: [featureFlagQueryStats]
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/query_stats_utils.js"); // For getQueryStats.
+
+// Turn on the collecting of telemetry metrics.
+let options = {
+ setParameter: {internalQueryStatsRateLimit: -1},
+};
+
+const conn = MongoRunner.runMongod(options);
+const testDB = conn.getDB('test');
+var coll = testDB[jsTestName()];
+coll.drop();
+
+const bulk = coll.initializeUnorderedBulkOp();
+const numDocs = 100;
+for (let i = 0; i < numDocs / 2; ++i) {
+ bulk.insert({foo: "ABCDE"});
+ bulk.insert({foo: "CDEFG"});
+}
+assert.commandWorked(bulk.execute());
+
+{
+ coll.find({foo: {$regex: "/^ABC/i"}}).itcount();
+ let queryStats = getQueryStats(testDB);
+ assert.eq(1, queryStats.length, queryStats);
+ assert.eq({"foo": {"$regex": "?string"}}, queryStats[0].key.queryShape.filter);
+}
+
+MongoRunner.stopMongod(conn);
+}());
diff --git a/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js b/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js
new file mode 100644
index 0000000000000..481dc2ba49e04
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js
@@ -0,0 +1,32 @@
+/**
+ * Test that calls to read from telemetry store fail when sampling rate is not greater than 0 even
+ * if feature flag is on.
+ * @tags: [featureFlagQueryStats]
+ */
+let options = {
+ setParameter: {internalQueryStatsRateLimit: 0},
+};
+
+const conn = MongoRunner.runMongod(options);
+const testdb = conn.getDB('test');
+var coll = testdb[jsTestName()];
+coll.drop();
+for (var i = 0; i < 20; i++) {
+ coll.insert({foo: 0, bar: Math.floor(Math.random() * 3)});
+}
+
+coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}});
+
+// Reading telemetry store with a sampling rate of 0 should return 0 documents.
+let telStore = testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}});
+assert.eq(telStore.cursor.firstBatch.length, 0);
+
+// Reading telemetry store should work now with a sampling rate of greater than 0.
+assert.commandWorked(
+ testdb.adminCommand({setParameter: 1, internalQueryStatsRateLimit: 2147483647}));
+coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}});
+telStore = assert.commandWorked(
+ testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}));
+assert.eq(telStore.cursor.firstBatch.length, 1);
+
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js b/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js
new file mode 100644
index 0000000000000..0d9720a6040d5
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js
@@ -0,0 +1,214 @@
+/**
+ * Test the telemetry related serverStatus metrics.
+ * @tags: [featureFlagQueryStats]
+ */
+function runTestWithMongodOptions(mongodOptions, test, testOptions) {
+ const conn = MongoRunner.runMongod(mongodOptions);
+ const testDB = conn.getDB('test');
+ const coll = testDB[jsTestName()];
+
+ test(conn, testDB, coll, testOptions);
+
+ MongoRunner.stopMongod(conn);
+}
+
+// Helper to round up to the next highest power of 2 for our estimation.
+function align(number) {
+ return Math.pow(2, Math.ceil(Math.log2(number)));
+}
+
+function addApprox2MBOfStatsData(testDB, coll) {
+ const k2MB = 2 * 1024 * 1024;
+
+ const cmdObjTemplate = {
+ find: coll.getName(),
+ filter: {foo123: {$eq: "?"}},
+ };
+
+ const kEstimatedEntrySizeBytes = (() => {
+ // Metrics stored per shape.
+ const kNumCountersAndDates =
+ 4 /* top-level */ + (4 * 3) /* those with sum, min, max, sumOfSquares */;
+
+ // Just a sample, will change based on where the test is run - shouldn't be off by too much
+ // though.
+ const kClientMetadataEst = {
+ client: {application: {name: "MongoDB Shell"}},
+ driver: {name: "MongoDB Internal Client", version: "7.1.0-alpha"},
+ os: {type: "Linux", name: "Ubuntu", architecture: "aarch64", version: "22.04"}
+ };
+
+ const kCmdNsObj = {cmdNs: {db: testDB.getName(), coll: coll.getName()}};
+
+ // This is likely not to be exact - we are probably forgetting something. But we don't need
+ // to be exact, just "good enough."
+ return align(kNumCountersAndDates * 4 + Object.bsonsize(cmdObjTemplate) +
+ Object.bsonsize(kClientMetadataEst) + Object.bsonsize(kCmdNsObj));
+ })();
+ const nIterations = k2MB / kEstimatedEntrySizeBytes;
+ for (let i = 0; i <= nIterations; i++) {
+ let newQuery = {["foo" + i]: "bar"};
+ const cmdObj = cmdObjTemplate;
+ cmdObj.filter = newQuery;
+ const cmdRes = assert.commandWorked(testDB.runCommand(cmdObj));
+ new DBCommandCursor(testDB, cmdRes).itcount();
+ }
+}
+/**
+ * Test serverStatus metric which counts the number of evicted entries.
+ *
+ * testOptions must include `resetCacheSize` bool field; e.g., { resetCacheSize : true }
+ */
+function evictionTest(conn, testDB, coll, testOptions) {
+ const evictedBefore = testDB.serverStatus().metrics.queryStats.numEvicted;
+ assert.eq(evictedBefore, 0);
+ addApprox2MBOfStatsData(testDB, coll);
+ if (!testOptions.resetCacheSize) {
+ const evictedAfter = testDB.serverStatus().metrics.queryStats.numEvicted;
+ assert.gt(evictedAfter, 0);
+ return;
+ }
+ // Make sure number of evicted entries increases when the cache size is reset, which forces out
+ // least recently used entries to meet the new, smaller size requirement.
+ assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 0);
+ assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, internalQueryStatsCacheSize: "1MB"}));
+ const evictedAfter = testDB.serverStatus().metrics.queryStats.numEvicted;
+ assert.gt(evictedAfter, 0);
+}
+
+/**
+ * Test serverStatus metric which counts the number of requests for which telemetry is not collected
+ * due to rate-limiting.
+ *
+ * testOptions must include `samplingRate` and `numRequests` number fields;
+ * e.g., { samplingRate: -1, numRequests: 20 }
+ */
+function countRateLimitedRequestsTest(conn, testDB, coll, testOptions) {
+ const numRateLimitedRequestsBefore =
+ testDB.serverStatus().metrics.queryStats.numRateLimitedRequests;
+ assert.eq(numRateLimitedRequestsBefore, 0);
+
+ coll.insert({a: 0});
+
+ // Running numRequests / 2 times since we dispatch two requests per iteration
+ for (var i = 0; i < testOptions.numRequests / 2; i++) {
+ coll.find({a: 0}).toArray();
+ coll.aggregate([{$match: {a: 1}}]);
+ }
+
+ const numRateLimitedRequestsAfter =
+ testDB.serverStatus().metrics.queryStats.numRateLimitedRequests;
+
+ if (testOptions.samplingRate === 0) {
+ // Telemetry should not be collected for any requests.
+ assert.eq(numRateLimitedRequestsAfter, testOptions.numRequests);
+ } else if (testOptions.samplingRate >= testOptions.numRequests) {
+ // Telemetry should be collected for all requests.
+ assert.eq(numRateLimitedRequestsAfter, 0);
+ } else {
+ // Telemetry should be collected for some but not all requests.
+ assert.gt(numRateLimitedRequestsAfter, 0);
+ assert.lt(numRateLimitedRequestsAfter, testOptions.numRequests);
+ }
+}
+
+function telemetryStoreSizeEstimateTest(conn, testDB, coll, testOptions) {
+ assert.eq(testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes, 0);
+ let halfWayPointSize;
+ // Only using three digit numbers (eg 100, 101) means the string length will be the same for all
+ // entries and therefore the key size will be the same for all entries, which makes predicting
+ // the total size of the store clean and easy.
+ for (var i = 100; i < 200; i++) {
+ coll.aggregate([{$match: {["foo" + i]: "bar"}}]).itcount();
+ if (i == 150) {
+ halfWayPointSize =
+ testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes;
+ }
+ }
+ // Confirm that telemetry store has grown and size is non-zero.
+ assert.gt(halfWayPointSize, 0);
+ const fullSize = testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes;
+ assert.gt(fullSize, 0);
+ // Make sure the final telemetry store size is twice as much as the halfway point size (+/- 5%)
+ assert(fullSize >= halfWayPointSize * 1.95 && fullSize <= halfWayPointSize * 2.05,
+ tojson({fullSize, halfWayPointSize}));
+}
+
+function telemetryStoreWriteErrorsTest(conn, testDB, coll, testOptions) {
+ const debugBuild = testDB.adminCommand('buildInfo').debug;
+ if (debugBuild) {
+ jsTestLog("Skipping telemetry store write errors test because debug build will tassert.");
+ return;
+ }
+
+ const errorsBefore = testDB.serverStatus().metrics.queryStats.numQueryStatsStoreWriteErrors;
+ assert.eq(errorsBefore, 0);
+ for (let i = 0; i < 5; i++) {
+ // Command should succeed and record the error.
+ let query = {};
+ query["foo" + i] = "bar";
+ coll.aggregate([{$match: query}]).itcount();
+ }
+
+ // Make sure that we recorded a write error for each run.
+ assert.eq(testDB.serverStatus().metrics.queryStats.numQueryStatsStoreWriteErrors, 5);
+}
+
+/**
+ * In this configuration, we insert enough entries into the telemetry store to trigger LRU
+ * eviction.
+ */
+runTestWithMongodOptions({
+ setParameter: {internalQueryStatsCacheSize: "1MB", internalQueryStatsRateLimit: -1},
+},
+ evictionTest,
+ {resetCacheSize: false});
+/**
+ * In this configuration, eviction is triggered only when the telemetry store size is reset.
+ *
+ * Use an 8MB upper limit since our estimated size of the query stats entry is pretty rough and
+ * meant to give us some wiggle room so we don't have to keep adjusting this test as we tweak it.
+ */
+runTestWithMongodOptions({
+ setParameter: {internalQueryStatsCacheSize: "8MB", internalQueryStatsRateLimit: -1},
+},
+ evictionTest,
+ {resetCacheSize: true});
+
+/**
+ * In this configuration, every query is sampled, so no requests should be rate-limited.
+ */
+runTestWithMongodOptions({
+ setParameter: {internalQueryStatsRateLimit: -1},
+},
+ countRateLimitedRequestsTest,
+ {samplingRate: 2147483647, numRequests: 20});
+
+/**
+ * In this configuration, the sampling rate is set so that some but not all requests are
+ * rate-limited.
+ */
+runTestWithMongodOptions({
+ setParameter: {internalQueryStatsRateLimit: 10},
+},
+ countRateLimitedRequestsTest,
+ {samplingRate: 10, numRequests: 20});
+
+/**
+ * Sample all queries and assert that the size of telemetry store is equal to num entries * entry
+ * size
+ */
+runTestWithMongodOptions({
+ setParameter: {internalQueryStatsRateLimit: -1},
+},
+ telemetryStoreSizeEstimateTest);
+
+/**
+ * Use a very small telemetry store size and assert that errors in writing to the telemetry store
+ * are tracked.
+ */
+runTestWithMongodOptions({
+ setParameter: {internalQueryStatsCacheSize: "0.00001MB", internalQueryStatsRateLimit: -1},
+},
+ telemetryStoreWriteErrorsTest);
\ No newline at end of file
diff --git a/jstests/noPassthrough/queryStats/query_stats_upgrade.js b/jstests/noPassthrough/queryStats/query_stats_upgrade.js
new file mode 100644
index 0000000000000..1e48c768bf6de
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/query_stats_upgrade.js
@@ -0,0 +1,38 @@
+/**
+ * Test that telemetry doesn't work on a lower FCV version but works after an FCV upgrade.
+ * @tags: [featureFlagQueryStats]
+ */
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
+const dbpath = MongoRunner.dataPath + jsTestName();
+let conn = MongoRunner.runMongod({dbpath: dbpath});
+let testDB = conn.getDB(jsTestName());
+// This test should only be run with the flag enabled.
+assert(FeatureFlagUtil.isEnabled(testDB, "QueryStats"));
+
+function testLower(restart = false) {
+ let adminDB = conn.getDB("admin");
+ assert.commandWorked(adminDB.runCommand(
+ {setFeatureCompatibilityVersion: binVersionToFCV("last-lts"), confirm: true}));
+ if (restart) {
+ MongoRunner.stopMongod(conn);
+ conn = MongoRunner.runMongod({dbpath: dbpath, noCleanData: true});
+ testDB = conn.getDB(jsTestName());
+ adminDB = conn.getDB("admin");
+ }
+
+ assert.commandFailedWithCode(
+ testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}), 6579000);
+
+ // Upgrade FCV.
+ assert.commandWorked(adminDB.runCommand(
+ {setFeatureCompatibilityVersion: binVersionToFCV("latest"), confirm: true}));
+
+ // We should be able to run a telemetry pipeline now that the FCV is correct.
+ assert.commandWorked(
+ testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}),
+ );
+}
+testLower(true);
+testLower(false);
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/telemetry/redact_queries_with_nonobject_fields.js b/jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js
similarity index 91%
rename from jstests/noPassthrough/telemetry/redact_queries_with_nonobject_fields.js
rename to jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js
index 25cac47555efb..fb16331a3b5e6 100644
--- a/jstests/noPassthrough/telemetry/redact_queries_with_nonobject_fields.js
+++ b/jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js
@@ -1,15 +1,10 @@
/**
* Test that telemetry key generation works for queries with non-object fields.
- * @tags: [featureFlagTelemetry]
+ * @tags: [featureFlagQueryStats]
*/
-load('jstests/libs/analyze_plan.js');
-
-(function() {
-"use strict";
-
// Turn on the collecting of telemetry metrics.
let options = {
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
+ setParameter: {internalQueryStatsRateLimit: -1},
};
const conn = MongoRunner.runMongod(options);
@@ -72,5 +67,4 @@ confirmAggSuccess(
}
}]);
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/queryStats/repl_set_query_stats_key.js b/jstests/noPassthrough/queryStats/repl_set_query_stats_key.js
new file mode 100644
index 0000000000000..98c86f65cb155
--- /dev/null
+++ b/jstests/noPassthrough/queryStats/repl_set_query_stats_key.js
@@ -0,0 +1,89 @@
+/**
+ * This test confirms that queryStats store key fields specific to replica sets (readConcern and
+ * readPreference) are included and correctly shapified. General command fields related to api
+ * versioning are included for good measure.
+ * @tags: [featureFlagQueryStats]
+ */
+load("jstests/libs/query_stats_utils.js");
+(function() {
+"use strict";
+
+const replTest = new ReplSetTest({name: 'reindexTest', nodes: 2});
+
+// Turn on the collecting of telemetry metrics.
+replTest.startSet({setParameter: {internalQueryStatsRateLimit: -1}});
+replTest.initiate();
+
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+
+const dbName = jsTestName();
+const collName = "foobar";
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB.getCollection(collName);
+const secondaryDB = secondary.getDB(dbName);
+const secondaryColl = secondaryDB.getCollection(collName);
+
+primaryColl.drop();
+
+assert.commandWorked(primaryColl.insert({a: 1000}));
+
+replTest.awaitReplication();
+
+function confirmCommandFieldsPresent(queryStatsKey, commandObj) {
+ for (const field in queryStatsKey) {
+ if (field == "queryShape" || field == "client" || field == "command") {
+ continue;
+ }
+ assert(commandObj.hasOwnProperty(field),
+ `${field} is present in the query stats key but not present in command obj: ${
+ tojson(queryStatsKey)}, ${tojson(commandObj)}`);
+ }
+ assert.eq(Object.keys(queryStatsKey).length, Object.keys(commandObj).length, queryStatsKey);
+}
+
+let commandObj = {
+ find: collName,
+ filter: {v: {$eq: 2}},
+ readConcern: {level: "local", afterClusterTime: new Timestamp(0, 1)},
+ $readPreference: {mode: "primary"},
+ apiDeprecationErrors: false,
+ apiVersion: "1",
+ apiStrict: false,
+};
+const replSetConn = new Mongo(replTest.getURL());
+assert.commandWorked(replSetConn.getDB(dbName).runCommand(commandObj));
+let telemetry = getQueryStats(replSetConn, {collName: collName});
+delete telemetry[0].key["collectionType"];
+confirmCommandFieldsPresent(telemetry[0].key, commandObj);
+// check that readConcern afterClusterTime is normalized.
+assert.eq(telemetry[0].key.readConcern.afterClusterTime, "?timestamp");
+
+// check that readPreference not populated and readConcern just has an afterClusterTime field.
+commandObj["readConcern"] = {
+ afterClusterTime: new Timestamp(1, 0)
+};
+delete commandObj["$readPreference"];
+assert.commandWorked(replSetConn.getDB(dbName).runCommand(commandObj));
+telemetry = getQueryStats(replSetConn, {collName});
+// We're not concerned with this field here.
+delete telemetry[0].key["collectionType"];
+confirmCommandFieldsPresent(telemetry[0].key, commandObj);
+assert.eq(telemetry[0].key["readConcern"], {"afterClusterTime": "?timestamp"});
+
+// check that readConcern has no afterClusterTime and fields related to api usage are not present.
+commandObj["readConcern"] = {
+ level: "local"
+};
+delete commandObj["apiDeprecationErrors"];
+delete commandObj["apiVersion"];
+delete commandObj["apiStrict"];
+assert.commandWorked(replSetConn.getDB(dbName).runCommand(commandObj));
+telemetry = getQueryStats(replSetConn, {collName: collName});
+assert.eq(telemetry[1].key["readConcern"], {level: "local"});
+// We're not concerned with this field here.
+delete telemetry[1].key["collectionType"];
+confirmCommandFieldsPresent(telemetry[1].key, commandObj);
+
+replTest.stopSet();
+})();
diff --git a/jstests/noPassthrough/query_engine_stats.js b/jstests/noPassthrough/query_engine_stats.js
index 15965be071a87..8ec6cb3183104 100644
--- a/jstests/noPassthrough/query_engine_stats.js
+++ b/jstests/noPassthrough/query_engine_stats.js
@@ -3,11 +3,8 @@
* serverStatus.
*/
-(function() {
-"use strict";
-
load("jstests/libs/profiler.js"); // For 'getLatestProfilerEntry()'.
-load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
let conn = MongoRunner.runMongod({});
assert.neq(null, conn, "mongod was unable to start up");
@@ -18,7 +15,7 @@ let db = conn.getDB(jsTestName());
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
function initializeTestCollection() {
@@ -229,16 +226,15 @@ verifyProfiler(queryComment, framework.find.sbe);
MongoRunner.stopMongod(conn);
-conn = MongoRunner.runMongod({
- restart: conn,
- setParameter:
- {featureFlagCommonQueryFramework: true, internalQueryFrameworkControl: "tryBonsai"}
-});
+conn =
+ MongoRunner.runMongod({restart: conn, setParameter: {featureFlagCommonQueryFramework: true}});
assert.neq(null, conn, "mongod was unable to start up");
db = conn.getDB(jsTestName());
coll = initializeTestCollection();
+assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"}));
// Run find using CQF
expectedCounters = generateExpectedCounters(framework.find.cqf);
@@ -281,5 +277,4 @@ verifyProfiler(queryComment, "cqf");
cursor.next(); // getMore performed
verifyProfiler(queryComment, "cqf");
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/query_knobs_validation.js b/jstests/noPassthrough/query_knobs_validation.js
index ac1fc5eefaf48..76a947612ff78 100644
--- a/jstests/noPassthrough/query_knobs_validation.js
+++ b/jstests/noPassthrough/query_knobs_validation.js
@@ -1,14 +1,11 @@
/**
* Tests to validate the input values accepted by internal query server parameters. The test
- * verfies that the system responds with the expected error code for input values that fall outside
+ * verifies that the system responds with the expected error code for input values that fall outside
* each parameter's valid bounds, and correctly applies input values which fall within that
* parameter's valid bounds.
*/
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled
-
-(function() {
-"use strict";
+import {checkCascadesFeatureFlagEnabled} from "jstests/libs/optimizer_utils.js";
const conn = MongoRunner.runMongod();
const testDB = conn.getDB("admin");
@@ -65,6 +62,11 @@ const expectedParamDefaults = {
internalQueryColumnScanMinAvgDocSizeBytes: 1024,
internalQueryColumnScanMinCollectionSizeBytes: -1,
internalQueryColumnScanMinNumColumnFilters: 3,
+ internalQueryMaxSpoolMemoryUsageBytes: 100 * 1024 * 1024,
+ internalQueryMaxSpoolDiskUsageBytes: 10 * 100 * 1024 * 1024,
+ deprioritizeUnboundedUserCollectionScans: true,
+ deprioritizeUnboundedUserIndexScans: true,
+ internalQueryDocumentSourceWriterBatchExtraReservedBytes: 0,
};
function assertDefaultParameterValues() {
@@ -259,14 +261,15 @@ assertSetParameterFails("internalQueryFLERewriteMemoryLimit", 0);
// Need to have the CQF feature flag enabled in order to set tryBonsai or forceBonsai.
assertSetParameterSucceeds("internalQueryFrameworkControl", "forceClassicEngine");
assertSetParameterSucceeds("internalQueryFrameworkControl", "trySbeEngine");
-if (checkCascadesOptimizerEnabled(testDB)) {
+if (checkCascadesFeatureFlagEnabled(testDB)) {
assertSetParameterSucceeds("internalQueryFrameworkControl", "tryBonsai");
+ assertSetParameterSucceeds("internalQueryFrameworkControl", "tryBonsaiExperimental");
assertSetParameterSucceeds("internalQueryFrameworkControl", "forceBonsai");
} else {
assert.commandFailed(
testDB.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"}));
- assert.commandFailed(
- testDB.adminCommand({setParameter: 1, internalQueryFrameworkControl: "forceBonsai"}));
+ assertSetParameterSucceeds("internalQueryFrameworkControl", "tryBonsaiExperimental");
+ assertSetParameterSucceeds("internalQueryFrameworkControl", "forceBonsai");
}
assertSetParameterFails("internalQueryFrameworkControl", "tryCascades");
assertSetParameterFails("internalQueryFrameworkControl", 1);
@@ -283,5 +286,24 @@ assertSetParameterSucceeds("internalQueryColumnScanMinNumColumnFilters", 100);
assertSetParameterSucceeds("internalQueryColumnScanMinNumColumnFilters", 0);
assertSetParameterFails("internalQueryColumnScanMinNumColumnFilters", -1);
+assertSetParameterSucceeds("internalQueryMaxSpoolMemoryUsageBytes", 100);
+assertSetParameterSucceeds("internalQueryMaxSpoolMemoryUsageBytes", 1);
+assertSetParameterFails("internalQueryMaxSpoolMemoryUsageBytes", 0);
+
+assertSetParameterSucceeds("internalQueryMaxSpoolDiskUsageBytes", 100);
+assertSetParameterSucceeds("internalQueryMaxSpoolDiskUsageBytes", 1);
+assertSetParameterFails("internalQueryMaxSpoolDiskUsageBytes", 0);
+
+assertSetParameterSucceeds("deprioritizeUnboundedUserCollectionScans", true);
+assertSetParameterSucceeds("deprioritizeUnboundedUserCollectionScans", false);
+assertSetParameterSucceeds("deprioritizeUnboundedUserIndexScans", true);
+assertSetParameterSucceeds("deprioritizeUnboundedUserIndexScans", false);
+
+assertSetParameterSucceeds("internalQueryDocumentSourceWriterBatchExtraReservedBytes", 10);
+assertSetParameterSucceeds("internalQueryDocumentSourceWriterBatchExtraReservedBytes",
+ 4 * 1024 * 1024);
+assertSetParameterFails("internalQueryDocumentSourceWriterBatchExtraReservedBytes", -1);
+assertSetParameterFails("internalQueryDocumentSourceWriterBatchExtraReservedBytes",
+ 9 * 1024 * 1024);
+
MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/noPassthrough/query_oplogreplay.js b/jstests/noPassthrough/query_oplogreplay.js
index 4fba7c108b74e..9300574e8fbc2 100644
--- a/jstests/noPassthrough/query_oplogreplay.js
+++ b/jstests/noPassthrough/query_oplogreplay.js
@@ -1,10 +1,8 @@
// Test oplog queries that can be optimized with oplogReplay.
// @tags: [requires_replication, requires_capped]
-(function() {
-"use strict";
+import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js";
-load("jstests/libs/analyze_plan.js");
load("jstests/libs/storage_engine_utils.js");
let replSet = new ReplSetTest({nodes: 1});
@@ -37,24 +35,26 @@ for (let i = 1; i <= 100; i++) {
assert.commandWorked(res);
}
+const collNs = `test.${jsTestName()}`;
+
// A $gt query on just the 'ts' field should return the next document after the timestamp.
-var cursor = oplog.find({ts: {$gt: timestamps[20]}});
+var cursor = oplog.find({ns: collNs, ts: {$gt: timestamps[20]}});
assert.eq(21, cursor.next().o["_id"]);
assert.eq(22, cursor.next().o["_id"]);
// A $gte query on the 'ts' field should include the timestamp.
-cursor = oplog.find({ts: {$gte: timestamps[20]}});
+cursor = oplog.find({ns: collNs, ts: {$gte: timestamps[20]}});
assert.eq(20, cursor.next().o["_id"]);
assert.eq(21, cursor.next().o["_id"]);
// An $eq query on the 'ts' field should return the single record with the timestamp.
-cursor = oplog.find({ts: {$eq: timestamps[20]}});
+cursor = oplog.find({ns: collNs, ts: {$eq: timestamps[20]}});
assert.eq(20, cursor.next().o["_id"]);
assert(!cursor.hasNext());
// An AND with both a $gt and $lt query on the 'ts' field will correctly return results in
// the proper bounds.
-cursor = oplog.find({$and: [{ts: {$lt: timestamps[5]}}, {ts: {$gt: timestamps[1]}}]});
+cursor = oplog.find({$and: [{ns: collNs}, {ts: {$lt: timestamps[5]}}, {ts: {$gt: timestamps[1]}}]});
assert.eq(2, cursor.next().o["_id"]);
assert.eq(3, cursor.next().o["_id"]);
assert.eq(4, cursor.next().o["_id"]);
@@ -64,6 +64,7 @@ assert(!cursor.hasNext());
// tightest range.
cursor = oplog.find({
$and: [
+ {ns: collNs},
{ts: {$gte: timestamps[2]}},
{ts: {$gt: timestamps[3]}},
{ts: {$lte: timestamps[7]}},
@@ -79,6 +80,7 @@ assert(!cursor.hasNext());
// result.
cursor = oplog.find({
$and: [
+ {ns: collNs},
{ts: {$gte: timestamps[1]}},
{ts: {$gt: timestamps[2]}},
{ts: {$eq: timestamps[5]}},
@@ -90,46 +92,49 @@ assert.eq(5, cursor.next().o["_id"]);
assert(!cursor.hasNext());
// An $eq query stops scanning after passing the max timestamp.
-let res = oplog.find({ts: {$eq: timestamps[10]}}).explain("executionStats");
+let res = oplog.find({ns: collNs, ts: {$eq: timestamps[10]}}).explain("executionStats");
assert.commandWorked(res);
// We expect to be able to seek directly to the entry with a 'ts' of 10.
-assert.lte(res.executionStats.totalDocsExamined, 2, tojson(res));
+assert.lte(res.executionStats.totalDocsExamined, 2, res);
let collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN");
assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
-assert.eq(timestamps[10], longToTs(collScanStage.maxRecord), tojson(res));
+assert.eq(timestamps[10], longToTs(collScanStage.maxRecord), res);
// An AND with an $lt predicate stops scanning after passing the max timestamp.
-res = oplog.find({$and: [{ts: {$gte: timestamps[1]}}, {ts: {$lt: timestamps[10]}}]})
+res = oplog.find({$and: [{ts: {$gte: timestamps[51]}}, {ts: {$lt: timestamps[60]}}]})
.explain("executionStats");
assert.commandWorked(res);
-assert.lte(res.executionStats.totalDocsExamined, 11, tojson(res));
+assert.lte(res.executionStats.totalDocsExamined, res.executionStats.nReturned + 2, res);
collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN");
assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
-assert.eq(timestamps[10], longToTs(collScanStage.maxRecord), tojson(res));
+assert.eq(timestamps[60], longToTs(collScanStage.maxRecord), res);
+assert.eq(timestamps[51], longToTs(collScanStage.minRecord), res);
// An AND with an $lte predicate stops scanning after passing the max timestamp.
-res = oplog.find({$and: [{ts: {$gte: timestamps[1]}}, {ts: {$lte: timestamps[10]}}]})
+res = oplog.find({$and: [{ts: {$gte: timestamps[51]}}, {ts: {$lte: timestamps[60]}}]})
.explain("executionStats");
assert.commandWorked(res);
-assert.lte(res.executionStats.totalDocsExamined, 12, tojson(res));
+assert.lte(res.executionStats.totalDocsExamined, res.executionStats.nReturned + 2, res);
collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN");
assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
-assert.eq(timestamps[10], longToTs(collScanStage.maxRecord), tojson(res));
+assert.eq(timestamps[60], longToTs(collScanStage.maxRecord), res);
+assert.eq(timestamps[51], longToTs(collScanStage.minRecord), res);
// The max timestamp is respected even when the min timestamp is smaller than the lowest
// timestamp in the collection.
-res = oplog.find({$and: [{ts: {$gte: timestamps[0]}}, {ts: {$lte: timestamps[10]}}]})
+res = oplog.find({$and: [{ns: collNs}, {ts: {$gte: timestamps[0]}}, {ts: {$lte: timestamps[10]}}]})
.explain("executionStats");
assert.commandWorked(res);
collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN");
assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
-assert.eq(timestamps[10], longToTs(collScanStage.maxRecord), tojson(res));
+assert.eq(timestamps[10], longToTs(collScanStage.maxRecord), res);
// An AND with redundant $eq/$lt/$lte predicates stops scanning after passing the max
// timestamp.
res = oplog
.find({
$and: [
+ {ns: collNs},
{ts: {$gte: timestamps[0]}},
{ts: {$lte: timestamps[10]}},
{ts: {$eq: timestamps[5]}},
@@ -141,37 +146,37 @@ assert.commandWorked(res);
// We expect to be able to seek directly to the entry with a 'ts' of 5.
collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN");
assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
-assert.eq(timestamps[5], longToTs(collScanStage.maxRecord), tojson(res));
-assert.eq(timestamps[5], longToTs(collScanStage.minRecord), tojson(res));
+assert.eq(timestamps[5], longToTs(collScanStage.maxRecord), res);
+assert.eq(timestamps[5], longToTs(collScanStage.minRecord), res);
// An $eq query for a non-existent timestamp scans a single oplog document.
-res = oplog.find({ts: {$eq: makeTS(200)}}).explain("executionStats");
+res = oplog.find({ns: collNs, ts: {$eq: makeTS(200)}}).explain("executionStats");
assert.commandWorked(res);
// We expect to be able to seek directly to the end of the oplog.
collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN");
assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
-assert.eq(makeTS(200), longToTs(collScanStage.maxRecord), tojson(res));
+assert.eq(makeTS(200), longToTs(collScanStage.maxRecord), res);
// When the filter matches the last document within the timestamp range, the collection scan
// examines at most one more document.
-res = oplog.find({$and: [{ts: {$gte: timestamps[4]}}, {ts: {$lte: timestamps[8]}}]})
+res = oplog.find({$and: [{ns: collNs}, {ts: {$gte: timestamps[4]}}, {ts: {$lte: timestamps[8]}}]})
.explain("executionStats");
assert.commandWorked(res);
// We expect to be able to seek directly to the start of the 'ts' range.
collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN");
assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
-assert.eq(timestamps[8], longToTs(collScanStage.maxRecord), tojson(res));
+assert.eq(timestamps[8], longToTs(collScanStage.maxRecord), res);
// A filter with only an upper bound predicate on 'ts' stops scanning after
// passing the max timestamp.
-res = oplog.find({ts: {$lt: timestamps[4]}}).explain("executionStats");
+res = oplog.find({ns: collNs, ts: {$lt: timestamps[4]}}).explain("executionStats");
assert.commandWorked(res);
collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN");
assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
-assert.eq(timestamps[4], longToTs(collScanStage.maxRecord), tojson(res));
+assert.eq(timestamps[4], longToTs(collScanStage.maxRecord), res);
// Oplog replay optimization should work with projection.
-res = oplog.find({ts: {$lte: timestamps[4]}}).projection({op: 0});
+res = oplog.find({ns: collNs, ts: {$lte: timestamps[4]}}).projection({op: 0});
while (res.hasNext()) {
const next = res.next();
assert(!next.hasOwnProperty('op'));
@@ -180,7 +185,7 @@ while (res.hasNext()) {
res = res.explain("executionStats");
assert.commandWorked(res);
-res = oplog.find({ts: {$gte: timestamps[90]}}).projection({'op': 0});
+res = oplog.find({ns: collNs, ts: {$gte: timestamps[90]}}).projection({'op': 0});
while (res.hasNext()) {
const next = res.next();
assert(!next.hasOwnProperty('op'));
@@ -190,7 +195,7 @@ res = res.explain("executionStats");
assert.commandWorked(res);
// Oplog replay optimization should work with limit.
-res = oplog.find({$and: [{ts: {$gte: timestamps[4]}}, {ts: {$lte: timestamps[8]}}]})
+res = oplog.find({$and: [{ns: collNs}, {ts: {$gte: timestamps[4]}}, {ts: {$lte: timestamps[8]}}]})
.limit(2)
.explain("executionStats");
assert.commandWorked(res);
@@ -200,7 +205,7 @@ assert.eq(2, collScanStage.nReturned, res);
// A query over both 'ts' and '_id' should only pay attention to the 'ts' field for finding
// the oplog start (SERVER-13566).
-cursor = oplog.find({ts: {$gte: timestamps[20]}, "o._id": 25});
+cursor = oplog.find({ns: collNs, ts: {$gte: timestamps[20]}, "o._id": 25});
assert.eq(25, cursor.next().o["_id"]);
assert(!cursor.hasNext());
@@ -221,11 +226,12 @@ assert.commandWorked(res);
assert.eq(res.executionStats.totalDocsExamined, 100);
// Ensure oplog replay hack does not work for backward scans.
-res = oplog.find({ts: {$lt: timestamps[4]}}).sort({$natural: -1}).explain("executionStats");
+res = oplog.find({ns: collNs, ts: {$lt: timestamps[4]}})
+ .sort({$natural: -1})
+ .explain("executionStats");
assert.commandWorked(res);
-assert.gte(res.executionStats.totalDocsExamined, 100, tojson(res));
+assert.gte(res.executionStats.totalDocsExamined, 100, res);
collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN");
assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res));
-replSet.stopSet();
-}());
+replSet.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/query_yields_catch_index_corruption.js b/jstests/noPassthrough/query_yields_catch_index_corruption.js
index 1360641154116..b27b1e4bdb58c 100644
--- a/jstests/noPassthrough/query_yields_catch_index_corruption.js
+++ b/jstests/noPassthrough/query_yields_catch_index_corruption.js
@@ -1,7 +1,7 @@
// @tags: [
// requires_persistence,
-// # TODO: SERVER-64007 Plans produced by Cascades don't yield
-// cqf_incompatible,
+// # TODO: SERVER-70446 Enable yielding for index plans in CQF.
+// cqf_experimental_incompatible,
// ]
(function() {
"use strict";
diff --git a/jstests/noPassthrough/quiet_shell.js b/jstests/noPassthrough/quiet_shell.js
index 1f55d0e06f9ea..2ab7880587b93 100644
--- a/jstests/noPassthrough/quiet_shell.js
+++ b/jstests/noPassthrough/quiet_shell.js
@@ -23,4 +23,4 @@ clearRawMongoProgramOutput();
}
MongoRunner.stopMongod(mongo);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/readConcern_atClusterTime.js b/jstests/noPassthrough/readConcern_atClusterTime.js
index 37c644b8d353b..8b1d4c2061e9f 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime.js
@@ -4,6 +4,8 @@
// requires_persistence,
// uses_atclustertime,
// uses_transactions,
+// # Tests running with experimental CQF behavior require test commands to be enabled.
+// cqf_experimental_incompatible,
// ]
function _getClusterTime(rst) {
diff --git a/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js b/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js
index 31b2e513b3154..ee9a1d9432c95 100644
--- a/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js
+++ b/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js
@@ -1,6 +1,7 @@
// Verifies that snapshot readConcern on mongos is not gated by the enableTestCommands flag.
//
-// @tags: [requires_sharding]
+// Tests running with experimental CQF behavior require test commands to be enabled.
+// @tags: [requires_sharding, cqf_experimental_incompatible]
(function() {
"use strict";
diff --git a/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js b/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js
index 43d1bd4a8140a..94c79ec95b2af 100644
--- a/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js
+++ b/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js
@@ -1,11 +1,7 @@
// Tests that snapshot reads return an error when accessing a collection whose metadata is invalid
// for the snapshot's point in time.
// @tags: [uses_transactions]
-(function() {
-"use strict";
-
-load("jstests/libs/curop_helpers.js"); // For waitForCurOpByFailPoint().
-load("jstests/libs/feature_flag_util.js"); // For FeatureFlagUtil.isEnabled().
+load("jstests/libs/curop_helpers.js"); // For waitForCurOpByFailPoint().
const kDbName = "test";
const kCollName = "coll";
@@ -52,7 +48,6 @@ function testCommand(cmd, curOpFilter, expectSucceed) {
// Execute command in parallel shell. Read commands should work even if catalog changes has
// occured since opening the snapshot.
- expectSucceed = expectSucceed && FeatureFlagUtil.isEnabled(testDB, "PointInTimeCatalogLookups");
const awaitCommand = execCommand(cmd, expectSucceed);
waitForCurOpByFailPointNoNS(testDB, "hangAfterPreallocateSnapshot", curOpFilter);
@@ -113,5 +108,4 @@ testCommand({update: kCollName, updates: [{q: {x: 1}, u: {$set: {x: 2}}}]},
{"command.update": kCollName, "command.readConcern.level": "snapshot"},
false /*write is expected to fail*/);
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/read_majority.js b/jstests/noPassthrough/read_majority.js
index 7b1e3867d5107..b9b8f9d69c92c 100644
--- a/jstests/noPassthrough/read_majority.js
+++ b/jstests/noPassthrough/read_majority.js
@@ -16,11 +16,7 @@
* ]
*/
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/feature_flag_util.js");
-
-(function() {
-"use strict";
+import {getWinningPlan, isCollscan, isIxscan} from "jstests/libs/analyze_plan.js";
// Tests the functionality for committed reads for the given read concern level.
function testReadConcernLevel(level) {
@@ -51,7 +47,6 @@ function testReadConcernLevel(level) {
// Point-in-time reads on a collection before it was created behaves like reading from a
// non-existent collection.
- assert(FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups"));
assert.commandWorked(res);
assert(res.cursor.firstBatch.length == 0);
}
@@ -71,7 +66,6 @@ function testReadConcernLevel(level) {
// Point-in-time reads on a collection before it was created behaves like reading from a
// non-existent collection.
- assert(FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups"));
assert.commandWorked(res);
assert(res.cursor.firstBatch.length == 0);
}
@@ -217,42 +211,18 @@ function testReadConcernLevel(level) {
assert.eq(cursor.next().version, 4);
assert(!cursor.objsLeftInBatch());
- // Even though renaming advances the minimum visible snapshot, we're querying by a namespace
- // that no longer exists. Because of this, the query surprisingly returns no results instead of
- // timing out. This violates read-committed semantics but is allowed by the current
- // specification. This is not the case for point-in-time reads as the collection instance is
- // recreated internally to support reads at this time.
+ // Even though the collection is renamed, point-in-time reads reconstruct the prior collection
+ // internally.
const tempNs = db.getName() + '.temp';
assert.commandWorked(db.adminCommand({renameCollection: t.getFullName(), to: tempNs}));
- if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) {
- assert.eq(getCursorForReadConcernLevel().itcount(), 10);
-
- // Snapshot is available.
- assertSnapshotAvailableForReadConcernLevel();
- assertSnapshotAvailableForReadConcernLevelByUUID(collUuid);
- } else {
- assert.eq(getCursorForReadConcernLevel().itcount(), 0);
-
- // Trigger a getMore that should fail due to the rename.
- let error = assert.throws(() => {
- cursor.next();
- });
- assert.eq(error.code, ErrorCodes.QueryPlanKilled);
-
- // Starting a new query by UUID will block because the minimum visible timestamp is ahead of
- // the majority-committed snapshot.
- assertNoSnapshotAvailableForReadConcernLevelByUUID(collUuid);
- }
+ assert.eq(getCursorForReadConcernLevel().itcount(), 10);
+
+ // Snapshot is available.
+ assertSnapshotAvailableForReadConcernLevel();
+ assertSnapshotAvailableForReadConcernLevelByUUID(collUuid);
- // Renaming back will cause queries to block again because the original namespace exists, and
- // its minimum visible timestamp is ahead of the current majority-committed snapshot when not
- // using point-in-time reads.
assert.commandWorked(db.adminCommand({renameCollection: tempNs, to: t.getFullName()}));
- if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) {
- assertSnapshotAvailableForReadConcernLevel();
- } else {
- assertNoSnapshotAvailableForReadConcernLevel();
- }
+ assertSnapshotAvailableForReadConcernLevel();
newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
@@ -264,25 +234,11 @@ function testReadConcernLevel(level) {
// violates strict read-committed semantics since we don't guarantee them on metadata
// operations.
t.drop();
- if (!FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) {
- assert.eq(getCursorForReadConcernLevel().itcount(), 0);
- assert.eq(getAggCursorForReadConcernLevel().itcount(), 0);
- }
- // Creating a new collection with the same name hides the collection until that operation is
- // in the committed view when not using point-in-time reads.
t.insert({_id: 0, version: 8});
- if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) {
- assertSnapshotAvailableForReadConcernLevel();
- } else {
- assertNoSnapshotAvailableForReadConcernLevel();
- }
+ assertSnapshotAvailableForReadConcernLevel();
newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) {
- assertSnapshotAvailableForReadConcernLevel();
- } else {
- assertNoSnapshotAvailableForReadConcernLevel();
- }
+ assertSnapshotAvailableForReadConcernLevel();
assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
assert.eq(getCursorForReadConcernLevel().itcount(), 1);
assert.eq(getAggCursorForReadConcernLevel().itcount(), 1);
@@ -312,4 +268,3 @@ MongoRunner.stopMongod(conn);
if (supportsCommittedReads) {
testReadConcernLevel("majority");
}
-}());
diff --git a/jstests/noPassthrough/read_majority_reads.js b/jstests/noPassthrough/read_majority_reads.js
index b5f9328b8ac45..b5b4ddf3603ae 100644
--- a/jstests/noPassthrough/read_majority_reads.js
+++ b/jstests/noPassthrough/read_majority_reads.js
@@ -19,8 +19,9 @@
(function() {
'use strict';
-// Skip metadata consistency check since the sharded clsuter is started with 0 shards
+// Skip metadata consistency checks since the sharded cluster is started with 0 shards
TestData.skipCheckMetadataConsistency = true;
+TestData.skipCheckRoutingTableConsistency = true;
var testServer = MongoRunner.runMongod();
var db = testServer.getDB("test");
diff --git a/jstests/noPassthrough/read_only_allow_disk_use.js b/jstests/noPassthrough/read_only_allow_disk_use.js
index a6fc5ea4013ef..f7fb3c62da37e 100644
--- a/jstests/noPassthrough/read_only_allow_disk_use.js
+++ b/jstests/noPassthrough/read_only_allow_disk_use.js
@@ -9,11 +9,7 @@
* requires_replication
* ]
*/
-
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const memoryLimitMb = 1;
const memoryLimitBytes = 1 * 1024 * 1024;
@@ -159,4 +155,3 @@ runTest(connRecoverStandalone, true);
runTest(connRecoverStandalone, false);
MongoRunner.stopMongod(connRecoverStandalone);
-})();
\ No newline at end of file
diff --git a/jstests/noPassthrough/read_ticket_exhaustion_with_stepdown.js b/jstests/noPassthrough/read_ticket_exhaustion_with_stepdown.js
index 702ddd95c1186..535fa60f924cf 100644
--- a/jstests/noPassthrough/read_ticket_exhaustion_with_stepdown.js
+++ b/jstests/noPassthrough/read_ticket_exhaustion_with_stepdown.js
@@ -21,13 +21,13 @@
* arriving reads are serviced without deadlocking.
* queuedLongReadsFunc - Issues long read commands until told to stop.
* newLongReadsFunc - When told to begin, issues long read commands until told
- to stop.
+ * to stop.
*
* Test Steps:
* 0) Start ReplSet with special params:
* - lower read ticket concurrency
* - increase yielding
- * 1) Insert 100 documents.
+ * 1) Insert 1000 documents.
* 2) Kick off parallel readers that perform long collection scans, subject to yields.
* 3) Sleep with global X Lock (including RSTL), thus queuing up reads.
* 4) Signal new readers that will be received after the global lock is released.
@@ -36,21 +36,26 @@
* <>
* 6) Stop Readers.
*
- * @tags: [multiversion_incompatible]
+ * @tags: [
+ * multiversion_incompatible,
+ * requires_replication,
+ * requires_wiredtiger,
+ * ]
*/
(function() {
"use strict";
load("jstests/libs/parallel_shell_helpers.js");
+const kNumReadTickets = 5;
const replTest = new ReplSetTest({
name: jsTestName(),
nodes: 1,
nodeOptions: {
setParameter: {
// This test seeks the minimum amount of concurrency to force ticket exhaustion.
- storageEngineConcurrencyAdjustmentAlgorithm: "",
- storageEngineConcurrentReadTransactions: 5,
+ storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions",
+ storageEngineConcurrentReadTransactions: kNumReadTickets,
// Make yielding more common.
internalQueryExecYieldPeriodMS: 1,
internalQueryExecYieldIterations: 1
@@ -126,17 +131,11 @@ function runStepDown() {
let stats = db.runCommand({serverStatus: 1});
jsTestLog(stats.locks);
jsTestLog(stats.wiredTiger.concurrentTransactions);
- const stepDownSecs = 5;
- assert.commandWorked(primaryAdmin.runCommand({"replSetStepDown": stepDownSecs, "force": true}));
-
- // Wait until the primary transitioned to SECONDARY state.
- replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // Enforce the replSetStepDown timer.
- sleep(stepDownSecs * 1000);
-
- replTest.waitForState(primary, ReplSetTest.State.PRIMARY);
- replTest.getPrimary();
+ // Force primary to step down, then unfreeze and allow it to step up.
+ assert.commandWorked(
+ primaryAdmin.runCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true}));
+ assert.commandWorked(primaryAdmin.runCommand({replSetFreeze: 0}));
+ return replTest.getPrimary();
}
/****************************************************/
@@ -153,12 +152,12 @@ let primaryColl = db[collName];
let queuedReaders = [];
let newReaders = [];
-// 1) Insert 100 documents.
-jsTestLog("Fill collection [" + dbName + "." + collName + "] with 100 docs");
-for (let i = 0; i < 100; i++) {
+// 1) Insert 1000 documents.
+jsTestLog("Fill collection [" + dbName + "." + collName + "] with 1000 docs");
+for (let i = 0; i < 1000; i++) {
assert.commandWorked(primaryColl.insert({"x": i}));
}
-jsTestLog("100 inserts done");
+jsTestLog("1000 inserts done");
// 2) Kick off parallel readers that perform long collection scans, subject to yields.
for (let i = 0; i < nQueuedReaders; i++) {
@@ -192,9 +191,10 @@ assert.soon(
() => db.getSiblingDB("admin")
.aggregate([{$currentOp: {}}, {$match: {"command.aggregate": TestData.collName}}])
.toArray()
- .length > 5,
+ .length >= kNumReadTickets,
"Expected more readers than read tickets.");
-runStepDown();
+
+primary = runStepDown();
// 6) Stop Readers.
jsTestLog("Stopping Readers");
diff --git a/jstests/noPassthrough/reconfig_restarts_collection_scan.js b/jstests/noPassthrough/reconfig_restarts_collection_scan.js
index 4d38c2750cda6..9e6c0c638c3b1 100644
--- a/jstests/noPassthrough/reconfig_restarts_collection_scan.js
+++ b/jstests/noPassthrough/reconfig_restarts_collection_scan.js
@@ -74,4 +74,4 @@ awaitCreateIndex();
IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/resize_tickets.js b/jstests/noPassthrough/resize_tickets.js
index b5d6f4af66f6f..5603ad01095b3 100644
--- a/jstests/noPassthrough/resize_tickets.js
+++ b/jstests/noPassthrough/resize_tickets.js
@@ -7,11 +7,9 @@
* requires_wiredtiger,
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+jsTestLog("Start a replica set with execution control enabled by default");
let replTest = new ReplSetTest({
name: jsTestName(),
nodes: 1,
@@ -21,6 +19,7 @@ replTest.initiate();
let mongod = replTest.getPrimary();
// TODO (SERVER-67104): Remove the feature flag check.
if (FeatureFlagUtil.isPresentAndEnabled(mongod, 'ExecutionControl')) {
+ // Users cannot manually adjust read/write tickets once execution control is enabled at startup.
assert.commandFailedWithCode(
mongod.adminCommand({setParameter: 1, wiredTigerConcurrentWriteTransactions: 10}),
ErrorCodes.IllegalOperation);
@@ -30,14 +29,48 @@ if (FeatureFlagUtil.isPresentAndEnabled(mongod, 'ExecutionControl')) {
}
replTest.stopSet();
+const gfixedConcurrentTransactions = "fixedConcurrentTransactions";
+jsTestLog("Start a replica set with execution control explicitly disabled on startup");
+replTest = new ReplSetTest({
+ name: jsTestName(),
+ nodes: 1,
+ nodeOptions: {
+ // Users can opt out of execution control by specifying the 'fixedConcurrentTransactions'
+ // option on startup.
+ setParameter: {storageEngineConcurrencyAdjustmentAlgorithm: gfixedConcurrentTransactions}
+ },
+});
+replTest.startSet();
+replTest.initiate();
+mongod = replTest.getPrimary();
+
+assert.commandWorked(
+ mongod.adminCommand({setParameter: 1, wiredTigerConcurrentWriteTransactions: 20}));
+assert.commandWorked(
+ mongod.adminCommand({setParameter: 1, wiredTigerConcurrentReadTransactions: 20}));
+replTest.stopSet();
+
+jsTestLog("Start a replica set with execution control implicitly disabled on startup");
replTest = new ReplSetTest({
name: jsTestName(),
nodes: 1,
- nodeOptions: {setParameter: {storageEngineConcurrencyAdjustmentAlgorithm: ""}},
+ nodeOptions: {
+ // If a user manually sets read/write tickets on startup, implicitly set the
+ // 'storageEngineConcurrencyAdjustmentAlgorithm' parameter to 'fixedConcurrentTransactions'
+ // and disable execution control.
+ setParameter: {wiredTigerConcurrentReadTransactions: 20}
+ },
});
replTest.startSet();
replTest.initiate();
mongod = replTest.getPrimary();
+
+const getParameterResult =
+ mongod.adminCommand({getParameter: 1, storageEngineConcurrencyAdjustmentAlgorithm: 1});
+assert.commandWorked(getParameterResult);
+assert.eq(getParameterResult.storageEngineConcurrencyAdjustmentAlgorithm,
+ gfixedConcurrentTransactions);
+
// The 20, 10, 30 sequence of ticket resizes are just arbitrary numbers in order to test a decrease
// (20 -> 10) and an increase (10 -> 30) of tickets.
assert.commandWorked(
@@ -52,5 +85,4 @@ assert.commandWorked(
mongod.adminCommand({setParameter: 1, wiredTigerConcurrentReadTransactions: 10}));
assert.commandWorked(
mongod.adminCommand({setParameter: 1, wiredTigerConcurrentReadTransactions: 30}));
-replTest.stopSet();
-}());
+replTest.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/restart_index_build_if_resume_fails.js b/jstests/noPassthrough/restart_index_build_if_resume_fails.js
index bcd1e3a50ce6d..5cce87d4f7d1a 100644
--- a/jstests/noPassthrough/restart_index_build_if_resume_fails.js
+++ b/jstests/noPassthrough/restart_index_build_if_resume_fails.js
@@ -8,12 +8,9 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const dbName = "test";
const collName = jsTestName();
@@ -107,5 +104,4 @@ if (columnstoreEnabled) {
[{a: 24}, {a: 25}]);
}
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js b/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js
index 46f5fd8d80a2b..593c556eaafab 100644
--- a/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js
+++ b/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js
@@ -9,12 +9,9 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const dbName = "test";
const collName = jsTestName();
@@ -91,5 +88,4 @@ if (columnstoreEnabled) {
}),
[{a: 99}, {a: 100}]);
}
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js b/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js
index 147c4e4281ea8..cc34310beb07d 100644
--- a/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js
+++ b/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js
@@ -8,12 +8,9 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const dbName = "test";
@@ -68,5 +65,4 @@ if (columnstoreEnabled) {
runTests(testDocs, [{"$**": "columnstore"}, {b: 1}], "_columnstore");
}
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js b/jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js
index 8cbdcb18268b1..86b171fdecb17 100644
--- a/jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js
+++ b/jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js
@@ -9,12 +9,9 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const dbName = "test";
@@ -57,5 +54,4 @@ if (columnstoreEnabled) {
["bulk load"],
[{skippedPhaseLogID: 20391}]);
}
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js b/jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js
index c94db0b2ee7ea..58f6d49b5da81 100644
--- a/jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js
+++ b/jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js
@@ -8,12 +8,9 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const dbName = "test";
@@ -63,5 +60,4 @@ ResumableIndexBuildTest.run(
const files = listFiles(tmpDir);
assert.eq(files.length, 0, files);
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js b/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js
index 49ec48f5ced6a..54d1549a93833 100644
--- a/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js
+++ b/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js
@@ -9,12 +9,9 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const dbName = "test";
@@ -64,5 +61,4 @@ if (columnstoreEnabled) {
runTests([{a: 1, b: 1}, {a: 2, b: 2}], [{"$**": "columnstore"}, {b: 1}], "_columnstore");
}
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js b/jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js
index d1dd867cb3ea0..c59ac0784620f 100644
--- a/jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js
+++ b/jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js
@@ -9,12 +9,9 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const dbName = "test";
@@ -70,5 +67,4 @@ if (columnstoreEnabled) {
["collection scan"],
[{numScannedAfterResume: numDocuments - maxIndexBuildMemoryUsageMB}]);
}
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js b/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js
index 485abf0a758b2..070b6218c7a26 100644
--- a/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js
+++ b/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js
@@ -8,11 +8,8 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const dbName = "test";
@@ -85,5 +82,4 @@ if (columnstoreEnabled) {
}),
"_subdocument_columnstore");
}
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js
index 52def8c8bde8e..a365ba911a836 100644
--- a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js
+++ b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js
@@ -9,11 +9,8 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const dbName = "test";
const collName = jsTestName();
@@ -158,5 +155,4 @@ if (columnstoreEnabled) {
[{a: 32}, {a: 33}]);
}
-rst.stopSet();
-})();
\ No newline at end of file
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js
index cb859855d342f..a576fcecfa5a3 100644
--- a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js
+++ b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js
@@ -12,11 +12,8 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const dbName = "test";
const collName = jsTestName();
@@ -163,5 +160,4 @@ if (columnstoreEnabled) {
[{a: 28}, {a: 29}]);
}
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_index_build_initialized.js b/jstests/noPassthrough/resumable_index_build_initialized.js
index aa672b3260d27..dc771c7751d0a 100644
--- a/jstests/noPassthrough/resumable_index_build_initialized.js
+++ b/jstests/noPassthrough/resumable_index_build_initialized.js
@@ -9,12 +9,9 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const dbName = "test";
@@ -54,5 +51,4 @@ if (columnstoreEnabled) {
runTests({foo: 1, b: 10}, [{"$**": "columnstore"}, {b: 1}], "_columnstore");
}
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_index_build_mixed_phases.js b/jstests/noPassthrough/resumable_index_build_mixed_phases.js
index 463d481d5e21e..81212f59e42cc 100644
--- a/jstests/noPassthrough/resumable_index_build_mixed_phases.js
+++ b/jstests/noPassthrough/resumable_index_build_mixed_phases.js
@@ -8,12 +8,9 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
load("jstests/noPassthrough/libs/index_build.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const dbName = "test";
@@ -115,5 +112,4 @@ runTests(
],
["bulk load", "drain writes"],
[{skippedPhaseLogID: 20391}, {skippedPhaseLogID: 20392}]);
-rst.stopSet();
-})();
\ No newline at end of file
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/resumable_timeseries_index_build_collection_scan_phase.js b/jstests/noPassthrough/resumable_timeseries_index_build_collection_scan_phase.js
index 749735e62f0cd..66f46afea909d 100644
--- a/jstests/noPassthrough/resumable_timeseries_index_build_collection_scan_phase.js
+++ b/jstests/noPassthrough/resumable_timeseries_index_build_collection_scan_phase.js
@@ -9,10 +9,7 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
load("jstests/noPassthrough/libs/index_build.js");
const rst = new ReplSetTest({nodes: 1});
@@ -59,4 +56,3 @@ ResumableIndexBuildTest.run(
[{numScannedAfterResume: 2}]);
rst.stopSet();
-})();
diff --git a/jstests/noPassthrough/retry_network_error_test.js b/jstests/noPassthrough/retry_network_error_test.js
index e8fe4a78047f2..25651027a836e 100644
--- a/jstests/noPassthrough/retry_network_error_test.js
+++ b/jstests/noPassthrough/retry_network_error_test.js
@@ -43,4 +43,4 @@ try {
jsTestLog("Caught exception after exhausting retries: " + e);
}
assert.eq(attempts, numRetries + 1);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/noPassthrough/rolling_index_builds_interrupted.js b/jstests/noPassthrough/rolling_index_builds_interrupted.js
index d98bc5cdeb94e..86a966907ce47 100644
--- a/jstests/noPassthrough/rolling_index_builds_interrupted.js
+++ b/jstests/noPassthrough/rolling_index_builds_interrupted.js
@@ -50,7 +50,6 @@ IndexBuildTest.buildIndexOnNodeAsStandalone(
replTest.awaitNodesAgreeOnPrimary(
replTest.kDefaultTimeoutMS, replTest.nodes, replTest.getNodeId(primary));
-// TODO(SERVER-71768): fix the index build stall.
jsTestLog('Build index on the primary as part of the replica set: ' + primary.host);
let createIdx = IndexBuildTest.startIndexBuild(
primary, primaryColl.getFullName(), {x: 1}, {name: 'x_1'}, [ErrorCodes.Interrupted]);
@@ -81,7 +80,6 @@ assert.commandWorked(primaryDB.killOp(opId));
createIdx();
-// TODO(SERVER-71768): Check dbHash.
TestData.skipCheckDBHashes = true;
replTest.stopSet();
}());
diff --git a/jstests/noPassthrough/router_transactions_metrics.js b/jstests/noPassthrough/router_transactions_metrics.js
index 21677c30dfa08..77bf9005b6b6a 100644
--- a/jstests/noPassthrough/router_transactions_metrics.js
+++ b/jstests/noPassthrough/router_transactions_metrics.js
@@ -1,7 +1,6 @@
// Tests multi-statement transactions metrics in the serverStatus output from mongos in various
// basic cases.
// @tags: [
-// requires_fcv_70,
// uses_multi_shard_transaction,
// uses_transactions,
// ]
diff --git a/jstests/noPassthrough/sample_pushdown_transaction.js b/jstests/noPassthrough/sample_pushdown_transaction.js
index 21a85f5da1b6e..8ae60aa0dddc9 100644
--- a/jstests/noPassthrough/sample_pushdown_transaction.js
+++ b/jstests/noPassthrough/sample_pushdown_transaction.js
@@ -5,10 +5,7 @@
* Requires random cursor support.
* @tags: [requires_replication]
*/
-(function() {
-'use strict';
-
-load('jstests/libs/analyze_plan.js'); // For planHasStage.
+import {aggPlanHasStage} from "jstests/libs/analyze_plan.js";
// Set up.
const rst = new ReplSetTest({nodes: 1});
@@ -48,5 +45,4 @@ assert.gt(randDocs.length, 0, tojson(randDocs));
// Clean up.
assert.commandWorked(session.abortTransaction_forTesting());
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/sample_pushdown_with_set_cluster_param.js b/jstests/noPassthrough/sample_pushdown_with_set_cluster_param.js
index 5af0867d12446..65359760c5dbb 100644
--- a/jstests/noPassthrough/sample_pushdown_with_set_cluster_param.js
+++ b/jstests/noPassthrough/sample_pushdown_with_set_cluster_param.js
@@ -5,10 +5,7 @@
* Requires random cursor support.
* @tags: [requires_replication]
*/
-(function() {
-'use strict';
-
-load('jstests/libs/analyze_plan.js'); // For planHasStage.
+import {aggPlanHasStage} from "jstests/libs/analyze_plan.js";
const numDocs = 1000;
const sampleSize = numDocs * .06;
@@ -99,5 +96,4 @@ const pipeline = [{$sample: {size: sampleSize}}, {$match: {a: {$gte: 0}}}];
})();
// // Clean up.
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/sbe_multiplanner_trial_termination.js b/jstests/noPassthrough/sbe_multiplanner_trial_termination.js
index 0be140981edcd..3c147be29c441 100644
--- a/jstests/noPassthrough/sbe_multiplanner_trial_termination.js
+++ b/jstests/noPassthrough/sbe_multiplanner_trial_termination.js
@@ -3,10 +3,7 @@
* demonstrates that unlike the classic multiplanner, the SBE multiplanner's end condition is by
* default not proportional to the size of the collection.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const numDocs = 1000;
const dbName = "sbe_multiplanner_db";
@@ -28,7 +25,7 @@ const db = conn.getDB(dbName);
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
const coll = db[collName];
@@ -119,5 +116,4 @@ assert.commandWorked(db.adminCommand({setParameter: 1, [collFracKnobSbe]: defaul
allPlans = getAllPlansExecution("2");
verifySbeNumReads(allPlans, trialLengthFromCollFrac);
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/sbe_plan_cache_api_version.js b/jstests/noPassthrough/sbe_plan_cache_api_version.js
index 2a3a0d184e3fa..431cb01c345e7 100644
--- a/jstests/noPassthrough/sbe_plan_cache_api_version.js
+++ b/jstests/noPassthrough/sbe_plan_cache_api_version.js
@@ -7,11 +7,7 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js");
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod({});
assert.neq(conn, null, "mongod failed to start");
@@ -145,5 +141,4 @@ for (const testcase of testcases) {
});
}
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js b/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
index 09b94049c97dc..622fc4d1c99a0 100644
--- a/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
+++ b/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
@@ -3,14 +3,11 @@
* cache is cleared.
* @tags: [
* # TODO SERVER-67607: Test plan cache with CQF enabled.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js");
+import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
// Lists the names of the setParameters which should result in the SBE plan cache being cleared when
// the parameter is modified. Along with each parameter, includes a valid new value of the parameter
@@ -48,6 +45,15 @@ const paramList = [
{name: "internalQueryColumnScanMinAvgDocSizeBytes", value: 2048},
{name: "internalQueryColumnScanMinCollectionSizeBytes", value: 2048},
{name: "internalQueryColumnScanMinNumColumnFilters", value: 5},
+ {name: "internalQueryCardinalityEstimatorMode", value: "sampling"},
+ {name: "internalCascadesOptimizerDisableScan", value: true},
+ {name: "internalCascadesOptimizerDisableIndexes", value: true},
+ {name: "internalCascadesOptimizerDisableMergeJoinRIDIntersect", value: true},
+ {name: "internalCascadesOptimizerDisableHashJoinRIDIntersect", value: true},
+ {name: "internalCascadesOptimizerDisableGroupByAndUnionRIDIntersect", value: true},
+ {name: "internalCascadesOptimizerFastIndexNullHandling", value: true},
+ {name: "internalCascadesOptimizerMinIndexEqPrefixes", value: 2},
+ {name: "internalCascadesOptimizerMaxIndexEqPrefixes", value: 2},
];
const conn = MongoRunner.runMongod();
@@ -60,7 +66,7 @@ const db = conn.getDB(dbName);
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
assert.commandWorked(db.dropDatabase());
@@ -106,4 +112,3 @@ for (let param of paramList) {
}
MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/sbe_plan_cache_key_reporting.js b/jstests/noPassthrough/sbe_plan_cache_key_reporting.js
index 0cf0546a6bd18..06e68b4286103 100644
--- a/jstests/noPassthrough/sbe_plan_cache_key_reporting.js
+++ b/jstests/noPassthrough/sbe_plan_cache_key_reporting.js
@@ -7,13 +7,10 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js";
load("jstests/libs/log.js");
load("jstests/libs/profiler.js");
-load("jstests/libs/sbe_util.js");
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod({});
assert.neq(conn, null, "mongod failed to start");
@@ -23,7 +20,7 @@ const coll = db.coll;
if (!checkSBEEnabled(db)) {
jsTest.log("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
assert.commandWorked(db.createCollection(coll.getName()));
@@ -224,4 +221,3 @@ function assertQueryHashAndPlanCacheKey(sbe, classic) {
})();
MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js b/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js
index d07a44560026d..3194fa69e88b7 100644
--- a/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js
+++ b/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js
@@ -6,14 +6,11 @@
* stripping debug info even though the size of the classic cache may be below the threshold.
* @tags: [
* # TODO SERVER-67607: Test plan cache with CQF enabled.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod({});
assert.neq(conn, null, "mongod failed to start");
@@ -22,7 +19,7 @@ const db = conn.getDB("sbe_plan_cache_memory_debug_info");
if (!checkSBEEnabled(db)) {
jsTest.log("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
function createTestCollection(collectionName) {
@@ -103,4 +100,3 @@ assert.eq(0, classicColl.find({a: 2, b: 4}).itcount());
assertCacheEntryIsMissingDebugInfo(classicColl, {a: 2, b: 4});
MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/sbe_plan_cache_size_metric.js b/jstests/noPassthrough/sbe_plan_cache_size_metric.js
index 102a971727097..9692e6864f45e 100644
--- a/jstests/noPassthrough/sbe_plan_cache_size_metric.js
+++ b/jstests/noPassthrough/sbe_plan_cache_size_metric.js
@@ -9,15 +9,12 @@
* assumes_balancer_off,
* does_not_support_stepdowns,
* # TODO SERVER-67607: Test plan cache with CQF enabled.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
-load("jstests/libs/analyze_plan.js"); // For 'getQueryHashFromExplain()'.
+import {getQueryHashFromExplain} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod();
assert.neq(conn, null, "mongod failed to start");
@@ -26,7 +23,7 @@ const db = conn.getDB("sbe_plan_cache_size_metric");
if (!checkSBEEnabled(db)) {
jsTest.log("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
function getCacheEntriesByQueryHashKey(coll, queryHash) {
@@ -108,5 +105,4 @@ assert.commandWorked(db.runCommand({planCacheClear: collectionName, query: sbeQu
// Assert metric is decremented back to initial value.
assert.eq(initialPlanCacheSize, getPlanCacheSize());
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js b/jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js
index c1fef519d4acd..8869b0dbb4fc3 100644
--- a/jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js
+++ b/jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js
@@ -1,9 +1,6 @@
// Test server parameter behavior upon FCV downgrade/upgrade.
-(function() {
-'use strict';
-
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
function assertParamExistenceInGetParamStar(output, param, expected) {
if (output.hasOwnProperty('clusterParameters')) {
@@ -193,5 +190,4 @@ function runDowngradeUpgradeTestForCWSP(conn, isMongod, isStandalone, verifyStat
s.s0, false /* isMongod */, false /* isStandalone */, verifyParameterState);
s.stop();
jsTest.log('END sharding');
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/noPassthrough/server_status_change_stream_metrics.js b/jstests/noPassthrough/server_status_change_stream_metrics.js
new file mode 100644
index 0000000000000..fd9feb89a1d3e
--- /dev/null
+++ b/jstests/noPassthrough/server_status_change_stream_metrics.js
@@ -0,0 +1,42 @@
+/**
+ * Tests for serverStatus metrics about change streams.
+ */
+(function() {
+"use strict";
+
+function getChangeStreamMetrics(db) {
+ const metrics = db.serverStatus().metrics;
+ return {
+ total: metrics.aggStageCounters["$changeStream"],
+ withExpandedEvents: metrics.changeStreams.showExpandedEvents,
+ };
+}
+
+function checkChangeStreamMetrics(db, expectedTotal, expectedWithExpandedEvents) {
+ const metrics = getChangeStreamMetrics(db);
+ assert.eq(expectedTotal, metrics.total);
+ assert.eq(expectedWithExpandedEvents, metrics.withExpandedEvents);
+}
+
+const rst = new ReplSetTest({name: jsTest.name(), nodes: 1});
+rst.startSet();
+rst.initiate();
+const db = rst.getPrimary().getDB(jsTest.name());
+const coll = db.getCollection(jsTest.name());
+
+checkChangeStreamMetrics(db, 0, 0);
+
+db.coll.aggregate([{$changeStream: {}}]);
+checkChangeStreamMetrics(db, 1, 0);
+
+db.coll.aggregate([{$changeStream: {showExpandedEvents: true}}]);
+checkChangeStreamMetrics(db, 2, 1);
+
+db.coll.explain().aggregate([{$changeStream: {}}]);
+checkChangeStreamMetrics(db, 3, 1);
+
+db.coll.explain().aggregate([{$changeStream: {showExpandedEvents: true}}]);
+checkChangeStreamMetrics(db, 4, 2);
+
+rst.stopSet();
+}());
diff --git a/jstests/noPassthrough/server_status_metrics_hello_command.js b/jstests/noPassthrough/server_status_metrics_hello_command.js
index 8aacfedae2f06..1e06f8f1060f1 100644
--- a/jstests/noPassthrough/server_status_metrics_hello_command.js
+++ b/jstests/noPassthrough/server_status_metrics_hello_command.js
@@ -8,37 +8,40 @@ const mongod = MongoRunner.runMongod();
const dbName = "server_status_metrics_hello_command";
const db = mongod.getDB(dbName);
let serverStatusMetrics = db.serverStatus().metrics.commands;
-const initialIsMasterTotal = serverStatusMetrics.isMaster.total;
-const initialHelloTotal = 0;
+
+function getCommandCount(cmdName) {
+ return serverStatusMetrics.hasOwnProperty(cmdName) ? serverStatusMetrics[cmdName].total : 0;
+}
+
+let currentIsMasterTotal = getCommandCount("isMaster");
+let currentHelloTotal = getCommandCount("hello");
// Running hello command.
jsTestLog("Running hello command");
assert.commandWorked(db.runCommand({hello: 1}));
serverStatusMetrics = db.serverStatus().metrics.commands;
+assert.eq(getCommandCount("hello"), currentHelloTotal + 1, "commands.hello should increment");
+++currentHelloTotal;
assert.eq(
- serverStatusMetrics.hello.total, initialHelloTotal + 1, "commands.hello should increment");
-assert.eq(serverStatusMetrics.isMaster.total,
- initialIsMasterTotal,
- "commands.isMaster should not increment");
+ getCommandCount("isMaster"), currentIsMasterTotal, "commands.isMaster should not increment");
// Running isMaster command.
jsTestLog("Running isMaster command");
assert.commandWorked(db.runCommand({isMaster: 1}));
serverStatusMetrics = db.serverStatus().metrics.commands;
+assert.eq(getCommandCount("hello"), currentHelloTotal, "commands.hello should not increment");
assert.eq(
- serverStatusMetrics.hello.total, initialHelloTotal + 1, "commands.hello should not increment");
-assert.eq(serverStatusMetrics.isMaster.total,
- initialIsMasterTotal + 1,
- "commands.isMaster should increment");
+ getCommandCount("isMaster"), currentIsMasterTotal + 1, "commands.isMaster should increment");
+++currentIsMasterTotal;
// Running ismaster command.
jsTestLog("Running ismaster command");
assert.commandWorked(db.runCommand({ismaster: 1}));
serverStatusMetrics = db.serverStatus().metrics.commands;
+assert.eq(getCommandCount("hello"), currentHelloTotal, "commands.hello should not increment");
assert.eq(
- serverStatusMetrics.hello.total, initialHelloTotal + 1, "commands.hello should not increment");
-assert.eq(serverStatusMetrics.isMaster.total,
- initialIsMasterTotal + 2,
- "commands.isMaster should increment");
+ getCommandCount("isMaster"), currentIsMasterTotal + 1, "commands.isMaster should increment");
+++currentIsMasterTotal;
+
MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/noPassthrough/server_status_multiplanner.js b/jstests/noPassthrough/server_status_multiplanner.js
index db4c528f031ff..0f06a32ba37b6 100644
--- a/jstests/noPassthrough/server_status_multiplanner.js
+++ b/jstests/noPassthrough/server_status_multiplanner.js
@@ -1,9 +1,6 @@
/**
* Tests the serverStatus and FTDC metrics for multi planner execution (both classic and SBE).
*/
-(function() {
-"use strict";
-
function sumHistogramBucketCounts(histogram) {
let sum = 0;
for (const bucket of histogram) {
@@ -13,7 +10,7 @@ function sumHistogramBucketCounts(histogram) {
}
load("jstests/libs/ftdc.js");
-load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const collName = jsTestName();
const dbName = jsTestName();
@@ -27,7 +24,7 @@ const db = conn.getDB(dbName);
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
let coll = db.getCollection(collName);
@@ -109,5 +106,4 @@ assert.soon(() => {
return true;
}, "FTDC output should eventually reflect observed serverStatus metrics.");
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/server_transaction_metrics_secondary.js b/jstests/noPassthrough/server_transaction_metrics_secondary.js
index a6a03f760f5c0..de936fb4ee449 100644
--- a/jstests/noPassthrough/server_transaction_metrics_secondary.js
+++ b/jstests/noPassthrough/server_transaction_metrics_secondary.js
@@ -1,6 +1,8 @@
// Test that transactions run on secondaries do not change the serverStatus transaction metrics.
// @tags: [
// uses_transactions,
+// # Tests running with experimental CQF behavior require test commands to be enabled.
+// cqf_experimental_incompatible,
// ]
(function() {
"use strict";
diff --git a/jstests/noPassthrough/setparameter_config_alias_not_overwritten_by_default.js b/jstests/noPassthrough/setparameter_config_alias_not_overwritten_by_default.js
new file mode 100644
index 0000000000000..9275e18f12d32
--- /dev/null
+++ b/jstests/noPassthrough/setparameter_config_alias_not_overwritten_by_default.js
@@ -0,0 +1,34 @@
+// Verify setParameters paramaters which are an alias to a config parameter do not have the value
+// passed with setParameter as a startup argument overwritten by the config default.
+
+(function() {
+'use strict';
+
+const defaultsConn = MongoRunner.runMongod();
+function getDefaultValue(parameterName) {
+ const res =
+ assert.commandWorked(defaultsConn.adminCommand({getParameter: 1, [parameterName]: 1}));
+ return res[parameterName];
+}
+
+let paramsDict = {};
+const parameters = ['journalCommitInterval', 'syncdelay'];
+parameters.forEach(param => {
+ const defaultValue = getDefaultValue(param);
+ const setValue = defaultValue + 1;
+ paramsDict[param] = setValue;
+});
+MongoRunner.stopMongod(defaultsConn);
+
+function runTestOnConn(conn, setParams) {
+ Object.keys(setParams).forEach(param => {
+ const res = assert.commandWorked(conn.adminCommand({getParameter: 1, [param]: 1}));
+ assert.eq(res[param], setParams[param]);
+ });
+}
+
+// Run the test on a standalone mongod.
+const standaloneConn = MongoRunner.runMongod({setParameter: paramsDict});
+runTestOnConn(standaloneConn, paramsDict);
+MongoRunner.stopMongod(standaloneConn);
+}());
diff --git a/jstests/noPassthrough/shard_filtering.js b/jstests/noPassthrough/shard_filtering.js
index 0883b2365d7ce..eb0beca6abc1c 100644
--- a/jstests/noPassthrough/shard_filtering.js
+++ b/jstests/noPassthrough/shard_filtering.js
@@ -5,13 +5,10 @@
* @tags: [
* requires_sharding,
* # TODO SERVER-71169: Implement shard filtering for CQF.
- * cqf_incompatible,
+ * cqf_experimental_incompatible,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {isIndexOnly, isIxscan, planHasStage} from "jstests/libs/analyze_plan.js";
// Deliberately inserts orphans outside of migration.
TestData.skipCheckOrphans = true;
@@ -149,5 +146,4 @@ assert.sameMembers(
mongosColl.find({$or: [{a: 0, b: 0}, {a: 25, b: 0}]}, {_id: 0, a: 1, b: 1}).toArray(),
[{a: 0, b: 0}]);
-st.stop();
-})();
+st.stop();
\ No newline at end of file
diff --git a/jstests/noPassthrough/shard_router_handle_staleconfig.js b/jstests/noPassthrough/shard_router_handle_staleconfig.js
new file mode 100644
index 0000000000000..3804afa4b6cd7
--- /dev/null
+++ b/jstests/noPassthrough/shard_router_handle_staleconfig.js
@@ -0,0 +1,68 @@
+/**
+ * Tests that mongos can detect stale routing information before checking for UUID mismatches and
+ * redirect the request to the appropriate shard.
+ *
+ * @tags: [requires_sharding]
+ */
+(function() {
+"use strict";
+const st = new ShardingTest({shards: 2, mongos: 2});
+const dbName = "db";
+
+function checkCommand(cmd, collName, withinTransaction) {
+ const db = st.getDB(dbName);
+ const coll = db[collName];
+ coll.drop();
+
+ // Create a sharded collection and move it to the secondary shard.
+ assert.commandWorked(st.s0.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
+ const nonPrimaryShard = st.getOther(st.getPrimaryShard(dbName)).name;
+ assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: `${dbName}.${collName}`, find: {a: 0}, to: nonPrimaryShard}));
+ // We now proceed to insert one document on each mongos connection. This will register cache
+ // information about where to route the requests to that particular shard key.
+ let i = 0;
+ st.forEachMongos(mongos => {
+ mongos.getDB(dbName)[collName].insert({a: 0, x: i++});
+ });
+
+ let session;
+ if (withinTransaction) {
+ session = st.s1.getDB(dbName).getMongo().startSession();
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ }
+ // Drop and recreate the collection on the primary shard. Now the collection resides on the
+ // primary shard rather than the secondary. Note that we are only doing this in one mongos so
+ // that the other one has stale information.
+ const sDb = st.s0.getDB(dbName);
+ assert.commandWorked(sDb.runCommand({drop: coll.getName()}));
+ assert.commandWorked(sDb.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}}));
+ const newUuid = sDb.getCollectionInfos({name: coll.getName()})[0].info.uuid;
+
+ // Proceed to make a request on the other mongos for the new collection. We expect this request
+ // to get sent to the wrong shard as the router is stale. mongos should detect this and retry
+ // the request with the correct shard. No exception should be passed to the user in this case.
+ if (withinTransaction) {
+ const sessionColl = session.getDatabase(dbName).getCollection(collName);
+ assert.commandWorked(sessionColl.runCommand(Object.extend(cmd, {collectionUUID: newUuid})));
+ session.commitTransaction();
+ } else {
+ assert.commandWorked(st.s1.getDB(dbName)[collName].runCommand(
+ Object.extend(cmd, {collectionUUID: newUuid})));
+ }
+}
+
+let collName = jsTestName() + "_find";
+checkCommand({find: collName, filter: {}}, collName, false);
+checkCommand({find: collName, filter: {}}, collName, true);
+collName = jsTestName() + "_insert";
+checkCommand({insert: collName, documents: [{x: 1}]}, collName, false);
+checkCommand({insert: collName, documents: [{x: 1}]}, collName, true);
+collName = jsTestName() + "_agg";
+checkCommand(
+ {aggregate: collName, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}}, collName, false);
+checkCommand(
+ {aggregate: collName, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}}, collName, true);
+
+st.stop();
+})();
diff --git a/jstests/noPassthrough/sharded_distinct.js b/jstests/noPassthrough/sharded_distinct.js
index 4b50586e6fbf6..c57804cc20969 100644
--- a/jstests/noPassthrough/sharded_distinct.js
+++ b/jstests/noPassthrough/sharded_distinct.js
@@ -20,4 +20,4 @@ assert.commandFailed(coll.runCommand("distinct", {help: helpFn, foo: 1}));
assert.commandFailed(coll.runCommand(
{explain: {distinct: coll.getName(), help: helpFn, foo: 1}, verbosity: 'queryPlanner'}));
st.stop();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/sharded_index_projection_verbatim_persistence.js b/jstests/noPassthrough/sharded_index_projection_verbatim_persistence.js
index 042916e6f8061..de765e9df26ca 100644
--- a/jstests/noPassthrough/sharded_index_projection_verbatim_persistence.js
+++ b/jstests/noPassthrough/sharded_index_projection_verbatim_persistence.js
@@ -9,10 +9,7 @@
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
const st = new ShardingTest({shards: 3, rs: {nodes: 1}});
const dbName = "test";
@@ -75,5 +72,4 @@ if (setUpServerForColumnStoreIndexTest(st.s.getDB(dbName))) {
assert.eq(catEntry.columnstoreProjection, kProjectionDoc, shardCatalogs);
}
}
-st.stop();
-})();
+st.stop();
\ No newline at end of file
diff --git a/jstests/noPassthrough/sharded_timeseries_bucketing_parameters_downgrade.js b/jstests/noPassthrough/sharded_timeseries_bucketing_parameters_downgrade.js
deleted file mode 100644
index 0e1b77aff6259..0000000000000
--- a/jstests/noPassthrough/sharded_timeseries_bucketing_parameters_downgrade.js
+++ /dev/null
@@ -1,124 +0,0 @@
-/**
- * Tests that bucketing parameters are disallowed after downgrading to versions where the parameters
- * are not supported.
- */
-(function() {
-'use strict';
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/catalog_shard_util.js");
-load("jstests/libs/fail_point_util.js");
-
-const dbName = 'testDB';
-const collName = 'testColl';
-const timeField = 'tm';
-const metaField = 'mt';
-
-const st = new ShardingTest({shards: 2});
-const mongos = st.s0;
-
-function useBucketingParametersOnLowerFCV() {
- const db = mongos.getDB(dbName);
- if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) {
- jsTestLog(
- "Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled.");
- return;
- }
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
- const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st);
-
- let coll = db.getCollection(collName);
- coll.drop();
- assert.commandWorked(db.createCollection(collName, {
- timeseries: {
- timeField: timeField,
- metaField: metaField,
- bucketMaxSpanSeconds: 60,
- bucketRoundingSeconds: 60
- }
- }));
-
- const configDirectDb = st.configRS.getPrimary().getDB(dbName);
- const configDirectColl = configDirectDb.getCollection(collName);
- if (isCatalogShardEnabled) {
- // Verify we cannot downgrade if the config server has a timeseries collection with
- // bucketing.
- assert.commandWorked(configDirectDb.createCollection(collName, {
- timeseries: {
- timeField: timeField,
- metaField: metaField,
- bucketMaxSpanSeconds: 60,
- bucketRoundingSeconds: 60
- }
- }));
- }
-
- // On the latestFCV, we should not be able to use collMod with incomplete bucketing parameters.
- assert.commandFailedWithCode(
- db.runCommand({collMod: collName, timeseries: {bucketMaxSpanSeconds: 3600}}),
- ErrorCodes.InvalidOptions);
-
- // We should fail to downgrade if we have a collection with custom bucketing parameters set.
- assert.commandFailedWithCode(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}),
- ErrorCodes.CannotDowngrade);
-
- coll = db.getCollection(collName);
- coll.drop();
-
- if (isCatalogShardEnabled) {
- // We should still fail to downgrade if we have a collection on the config server with
- // custom bucketing parameters set.
- assert.commandFailedWithCode(
- mongos.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}),
- ErrorCodes.CannotDowngrade);
-
- configDirectColl.drop();
- }
-
- // Successfully downgrade to latest FCV.
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
-
- // On the latest FCV, we should not be able to create a collection with bucketing parameters.
- assert.commandFailedWithCode(db.createCollection(collName, {
- timeseries: {
- timeField: timeField,
- metaField: metaField,
- bucketMaxSpanSeconds: 60,
- bucketRoundingSeconds: 60
- }
- }),
- ErrorCodes.InvalidOptions);
-
- assert.commandWorked(
- db.createCollection(collName, {timeseries: {timeField: timeField, metaField: metaField}}));
-
- // On the latest FCV we should not be able to use collMod with the bucketing parameters.
- assert.commandFailedWithCode(db.runCommand({
- collMod: collName,
- timeseries: {bucketMaxSpanSeconds: 3600, bucketRoundingSeconds: 3600}
- }),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(
- db.runCommand({collMod: collName, timeseries: {bucketMaxSpanSeconds: 3600}}),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(
- db.runCommand({collMod: collName, timeseries: {bucketRoundingSeconds: 3600}}),
- ErrorCodes.InvalidOptions);
-
- // Verify the time-series options are valid.
- let collections = assert.commandWorked(db.runCommand({listCollections: 1})).cursor.firstBatch;
- let collectionEntry = collections.find(entry => entry.name === 'system.buckets.' + collName);
- assert(collectionEntry);
-
- assert.eq(collectionEntry.options.timeseries.granularity, "seconds");
- // Downgrading does not remove the 'bucketMaxSpanSeconds' parameter. It should correspond with
- // the "seconds" granularity.
- assert.eq(collectionEntry.options.timeseries.bucketMaxSpanSeconds, 3600);
- assert.isnull(collectionEntry.options.timeseries.bucketRoundingSeconds);
-}
-
-useBucketingParametersOnLowerFCV();
-
-st.stop();
-})();
diff --git a/jstests/noPassthrough/shell_bson_obj_to_array.js b/jstests/noPassthrough/shell_bson_obj_to_array.js
index bc49396c27bb2..af3a7b1f77f01 100644
--- a/jstests/noPassthrough/shell_bson_obj_to_array.js
+++ b/jstests/noPassthrough/shell_bson_obj_to_array.js
@@ -30,4 +30,4 @@ tests.forEach((test) => {
});
MongoRunner.stopMongod(conn);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/shell_can_use_read_concern.js b/jstests/noPassthrough/shell_can_use_read_concern.js
index 8f092efcf25ec..3d1ae1214f60d 100644
--- a/jstests/noPassthrough/shell_can_use_read_concern.js
+++ b/jstests/noPassthrough/shell_can_use_read_concern.js
@@ -202,4 +202,4 @@ runTests({withSession: false});
runTests({withSession: true});
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/shutdown_with_fsync.js b/jstests/noPassthrough/shutdown_with_fsync.js
new file mode 100644
index 0000000000000..42bd97311b2ec
--- /dev/null
+++ b/jstests/noPassthrough/shutdown_with_fsync.js
@@ -0,0 +1,29 @@
+/**
+ * Tests that shutdown can succeed even if the server is fsync locked.
+ */
+
+(function() {
+"use strict";
+
+const conn = MongoRunner.runMongod();
+assert.neq(conn, null);
+
+const dbName = jsTestName();
+const collName = "testColl";
+const testDB = conn.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+jsTestLog("Insert some data to create a collection.");
+assert.commandWorked(testColl.insert({x: 1}));
+
+jsTestLog("Set fsync lock to block server writes. Create some nesting for extra test coverage");
+testDB.fsyncLock();
+testDB.fsyncLock();
+
+jsTestLog("Check that the fsync lock is working: no writes should be possible.");
+assert.commandFailed(testDB.runCommand({insert: {z: 1}, maxTimeMS: 30}));
+
+jsTestLog("Check that shutdown can succeed with an fsync lock: the fsync lock should be cleared.");
+// Skipping validation because the fsync lock causes the validate command to hang.
+MongoRunner.stopMongod(conn, null, {skipValidation: true});
+}());
diff --git a/jstests/noPassthrough/slow_query_log_stats_not_block_on_RSTL.js b/jstests/noPassthrough/slow_query_log_stats_not_block_on_RSTL.js
new file mode 100644
index 0000000000000..1065962b072a7
--- /dev/null
+++ b/jstests/noPassthrough/slow_query_log_stats_not_block_on_RSTL.js
@@ -0,0 +1,61 @@
+/**
+ * Tests that storage stats reporting on slow query logging does acquire the RSTL.
+ *
+ * @tags: [
+ * requires_replication,
+ * ]
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/parallel_shell_helpers.js"); // startParallelShell
+load("jstests/libs/wait_for_command.js"); // waitForCommand
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const testDB = rst.getPrimary().getDB("test");
+const testCollection = testDB.getCollection("c");
+
+const fieldValue = "slow query logging reporting storage statistics";
+
+assert.commandWorked(testCollection.insertOne({a: fieldValue}));
+
+jsTestLog("Starting the sleep command in a parallel thread to take the RSTL MODE_X lock");
+let rstlXLockSleepJoin = startParallelShell(() => {
+ jsTestLog("Parallel Shell: about to start sleep command");
+ assert.commandFailedWithCode(db.adminCommand({
+ sleep: 1,
+ secs: 60 * 60,
+ // RSTL MODE_X lock.
+ lockTarget: "RSTL",
+ $comment: "RSTL lock sleep"
+ }),
+ ErrorCodes.Interrupted);
+}, testDB.getMongo().port);
+
+jsTestLog("Waiting for the sleep command to start and fetch the opID");
+const sleepCmdOpID =
+ waitForCommand("RSTL lock", op => (op["command"]["$comment"] == "RSTL lock sleep"), testDB);
+
+jsTestLog("Wait for the sleep command to log that the RSTL MODE_X lock was acquired");
+checkLog.containsJson(testDB, 6001600);
+
+try {
+ jsTestLog("Running the query while the RSTL is being held");
+
+ // Log any query regardless of its completion time.
+ assert.commandWorked(testDB.setProfilingLevel(0, -1));
+
+ const loggedQuery = RegExp("Slow query.*\"find\":\"c\".*" + fieldValue + ".*\"storage\":{");
+ assert.eq(false, checkLog.checkContainsOnce(rst.getPrimary(), loggedQuery));
+ assert.eq(1, testCollection.find({a: fieldValue}).itcount());
+ assert.eq(true, checkLog.checkContainsOnce(rst.getPrimary(), loggedQuery));
+} finally {
+ jsTestLog("Ensure the sleep cmd releases the lock so that the server can shutdown");
+ assert.commandWorked(testDB.killOp(sleepCmdOpID)); // kill the sleep cmd
+ rstlXLockSleepJoin(); // wait for the thread running the sleep cmd to finish
+}
+
+rst.stopSet();
+})();
diff --git a/jstests/noPassthrough/sort_spill_estimate_data_size.js b/jstests/noPassthrough/sort_spill_estimate_data_size.js
index 9f7fc62bac608..a0cd67dd039ef 100644
--- a/jstests/noPassthrough/sort_spill_estimate_data_size.js
+++ b/jstests/noPassthrough/sort_spill_estimate_data_size.js
@@ -3,9 +3,7 @@
*
* This test was originally designed to reproduce SERVER-53760.
*/
-(function() {
-"use strict";
-load('jstests/libs/analyze_plan.js'); // For 'getAggPlanStage()'.
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod was unable to start up");
@@ -69,5 +67,4 @@ assert.lt(dataBytesSorted, 3 * totalSize, explain);
assert.eq(createPipeline(coll).toArray(), [{_id: 0, sumTop900UniqueValues: 94550}], explain);
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/spill_to_disk_secondary_read.js b/jstests/noPassthrough/spill_to_disk_secondary_read.js
index f143aa8852422..771fd5a630123 100644
--- a/jstests/noPassthrough/spill_to_disk_secondary_read.js
+++ b/jstests/noPassthrough/spill_to_disk_secondary_read.js
@@ -3,11 +3,8 @@
* writeConcern greater than w:1.
* @tags: [requires_replication, requires_majority_read_concern, requires_persistence]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_explain_helpers.js"); // For getSbePlanStages.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getSbePlanStages} from "jstests/libs/sbe_explain_helpers.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const kNumNodes = 3;
const replTest = new ReplSetTest({
@@ -214,4 +211,3 @@ const readColl = secondary.getDB("test").foo;
})();
replTest.stopSet();
-})();
diff --git a/jstests/noPassthrough/ssl_cipher_default.js b/jstests/noPassthrough/ssl_cipher_default.js
index d1a6f6fd5a415..4b5f1a32e1e1f 100644
--- a/jstests/noPassthrough/ssl_cipher_default.js
+++ b/jstests/noPassthrough/ssl_cipher_default.js
@@ -46,4 +46,4 @@ assertCorrectConfig({
tlsCipherConfig: "HIGH"
},
"HIGH");
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/standalone_cluster_parameters.js b/jstests/noPassthrough/standalone_cluster_parameters.js
index 7ab53f704c539..6a962923cff6b 100644
--- a/jstests/noPassthrough/standalone_cluster_parameters.js
+++ b/jstests/noPassthrough/standalone_cluster_parameters.js
@@ -1,14 +1,15 @@
/**
* Checks that set/getClusterParameter run as expected on standalone.
* @tags: [
- * # Standalone cluster parameters enabled only under this flag.
- * featureFlagAuditConfigClusterParameter,
+ * # Standalone cluster parameters enabled in 7.1+.
+ * requires_fcv_71,
* ]
*/
-(function() {
-'use strict';
-
-load('jstests/libs/cluster_server_parameter_utils.js');
+import {
+ setupNode,
+ testInvalidClusterParameterCommands,
+ testValidClusterParameterCommands,
+} from "jstests/libs/cluster_server_parameter_utils.js";
const conn = MongoRunner.runMongod({});
@@ -21,5 +22,4 @@ testInvalidClusterParameterCommands(conn);
// Then, ensure that set/getClusterParameter set and retrieve expected values.
testValidClusterParameterCommands(conn);
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/start_up_with_custom_cost_model.js b/jstests/noPassthrough/start_up_with_custom_cost_model.js
index 4158eee4c94b1..6fda80e2193d5 100644
--- a/jstests/noPassthrough/start_up_with_custom_cost_model.js
+++ b/jstests/noPassthrough/start_up_with_custom_cost_model.js
@@ -1,10 +1,11 @@
/**
* Tests that 'internalCostModelCoefficients' can be set on startup.
*/
-(function() {
-'use strict';
-
-load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
+import {
+ assertValueOnPath,
+ checkCascadesOptimizerEnabled,
+ navigateToPlanPath
+} from "jstests/libs/optimizer_utils.js";
function getScanCostWith(customScanCost) {
const costStr = `{"scanIncrementalCost": ${customScanCost}}`;
@@ -51,8 +52,7 @@ function getScanCostWith(customScanCost) {
const scanCost1 = getScanCostWith(0.2);
const scanCost2 = getScanCostWith(0.4);
if (scanCost1 === undefined) {
- return;
+ quit();
}
-assert.lt(scanCost1, scanCost2);
-}());
+assert.lt(scanCost1, scanCost2);
\ No newline at end of file
diff --git a/jstests/noPassthrough/store_retryable_find_and_modify_images_in_side_collection.js b/jstests/noPassthrough/store_retryable_find_and_modify_images_in_side_collection.js
index fde47c8956d9e..cb05e40fdc8e5 100644
--- a/jstests/noPassthrough/store_retryable_find_and_modify_images_in_side_collection.js
+++ b/jstests/noPassthrough/store_retryable_find_and_modify_images_in_side_collection.js
@@ -1,6 +1,6 @@
/**
* Test that retryable findAndModify commands will store pre- and post- images in the appropriate
- * collections for `storeFindAndModifyImagesInSideCollection=true`.
+ * collections.
*
* @tags: [requires_replication]
*/
@@ -56,9 +56,6 @@ function assertRetryCommand(cmdResponse, retryResponse) {
}
function checkProfilingLogs(primary) {
- assert.commandWorked(
- primary.adminCommand({setParameter: 1, storeFindAndModifyImagesInSideCollection: true}));
-
let db = primary.getDB('for_profiling');
let configDB = primary.getDB('config');
assert.commandWorked(db.user.insert({_id: 1}));
@@ -98,9 +95,6 @@ function checkProfilingLogs(primary) {
}
function runTests(lsid, mainConn, primary, secondary, docId) {
- const setParam = {setParameter: 1, storeFindAndModifyImagesInSideCollection: true};
- primary.adminCommand(setParam);
-
let txnNumber = NumberLong(docId);
let incrementTxnNumber = function() {
txnNumber = NumberLong(txnNumber + 1);
diff --git a/jstests/noPassthrough/supports_read_concern_majority.js b/jstests/noPassthrough/supports_read_concern_majority.js
index 5bf5bba6fa869..dc8af14014d52 100644
--- a/jstests/noPassthrough/supports_read_concern_majority.js
+++ b/jstests/noPassthrough/supports_read_concern_majority.js
@@ -8,4 +8,4 @@ const conn = MongoRunner.runMongod({enableMajorityReadConcern: false});
assert(!conn);
const logContents = rawMongoProgramOutput();
assert(logContents.indexOf("enableMajorityReadConcern:false is no longer supported") > 0);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/telemetry/application_name_find.js b/jstests/noPassthrough/telemetry/application_name_find.js
deleted file mode 100644
index 13c70e6d70f2d..0000000000000
--- a/jstests/noPassthrough/telemetry/application_name_find.js
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Test that applicationName and namespace appear in telemetry for the find command.
- * @tags: [featureFlagTelemetry]
- */
-(function() {
-"use strict";
-
-const kApplicationName = "MongoDB Shell";
-const kHashedApplicationName = "dXRuJCwctavU";
-
-const getTelemetry = (conn, redactIdentifiers = false) => {
- const result = assert.commandWorked(conn.adminCommand({
- aggregate: 1,
- pipeline: [
- {$telemetry: {redactIdentifiers}},
- // Sort on telemetry key so entries are in a deterministic order.
- {$sort: {key: 1}},
- {$match: {"key.applicationName": {$in: [kApplicationName, kHashedApplicationName]}}},
- {$match: {"key.find": {$exists: true}}}
- ],
- cursor: {batchSize: 10}
- }));
- return result.cursor.firstBatch;
-};
-
-// Turn on the collecting of telemetry metrics.
-let options = {
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
-};
-
-const conn = MongoRunner.runMongod(options);
-conn.setLogLevel(3, "query");
-const testDB = conn.getDB('test');
-var coll = testDB[jsTestName()];
-coll.drop();
-
-coll.insert({v: 1});
-coll.insert({v: 2});
-coll.insert({v: 3});
-
-coll.find({v: 1}).toArray();
-
-let telemetry = getTelemetry(conn);
-assert.eq(1, telemetry.length, telemetry);
-assert.eq({
- cmdNs: {db: testDB.getName(), coll: coll.getName()},
- find: coll.getName(),
- filter: {v: {"$eq": "?"}},
- applicationName: kApplicationName
-},
- telemetry[0].key,
- telemetry);
-
-telemetry = getTelemetry(conn, true);
-assert.eq(1, telemetry.length, telemetry);
-const hashedColl = "zF15YAUWbyIP";
-assert.eq({
- cmdNs: {db: "n4bQgYhMfWWa", coll: hashedColl},
- find: hashedColl,
- filter: {"TJRIXgwhrmxB": {"$eq": "?"}},
- applicationName: kHashedApplicationName
-},
- telemetry[0].key,
- telemetry);
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/telemetry/clear_telemetry_store.js b/jstests/noPassthrough/telemetry/clear_telemetry_store.js
deleted file mode 100644
index b2409cc0bbb60..0000000000000
--- a/jstests/noPassthrough/telemetry/clear_telemetry_store.js
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Test that the telemetry store can be cleared when the cache size is reset to 0.
- * @tags: [featureFlagTelemetry]
- */
-load("jstests/libs/telemetry_utils.js"); // For verifyMetrics.
-
-(function() {
-"use strict";
-
-// Turn on the collecting of telemetry metrics.
-let options = {
- setParameter: {
- internalQueryConfigureTelemetrySamplingRate: -1,
- internalQueryConfigureTelemetryCacheSize: "10MB"
- },
-};
-
-const conn = MongoRunner.runMongod(options);
-const testDB = conn.getDB('test');
-var coll = testDB[jsTestName()];
-coll.drop();
-
-let query = {};
-for (var j = 0; j < 10; ++j) {
- query["foo.field.xyz." + j] = 1;
- query["bar.field.xyz." + j] = 2;
- query["baz.field.xyz." + j] = 3;
- coll.aggregate([{$match: query}]).itcount();
-}
-
-// Confirm number of entries in the store and that none have been evicted.
-let telemetryResults = testDB.getSiblingDB("admin").aggregate([{$telemetry: {}}]).toArray();
-assert.eq(telemetryResults.length, 10, telemetryResults);
-assert.eq(testDB.serverStatus().metrics.telemetry.numEvicted, 0);
-
-// Command to clear the cache.
-assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryConfigureTelemetryCacheSize: "0MB"}));
-
-// 10 regular queries plus the $telemetry query, means 11 entries evicted when the cache is cleared.
-assert.eq(testDB.serverStatus().metrics.telemetry.numEvicted, 11);
-
-// Calling $telemetry should fail when the telemetry store size is 0 bytes.
-assert.throwsWithCode(() => testDB.getSiblingDB("admin").aggregate([{$telemetry: {}}]), 6579000);
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/telemetry/feature_flag_off_sampling_rate_on.js b/jstests/noPassthrough/telemetry/feature_flag_off_sampling_rate_on.js
deleted file mode 100644
index 8dee55a109bac..0000000000000
--- a/jstests/noPassthrough/telemetry/feature_flag_off_sampling_rate_on.js
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Test that calls to read from telemetry store fail when feature flag is turned off and sampling
- * rate > 0.
- */
-load('jstests/libs/analyze_plan.js');
-load("jstests/libs/feature_flag_util.js");
-
-(function() {
-"use strict";
-
-// Set sampling rate to -1.
-let options = {
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
-};
-const conn = MongoRunner.runMongod(options);
-const testdb = conn.getDB('test');
-
-// This test specifically tests error handling when the feature flag is not on.
-// TODO SERVER-65800 This test can be deleted when the feature is on by default.
-if (!conn || FeatureFlagUtil.isEnabled(testdb, "Telemetry")) {
- jsTestLog(`Skipping test since feature flag is disabled. conn: ${conn}`);
- if (conn) {
- MongoRunner.stopMongod(conn);
- }
- return;
-}
-
-var coll = testdb[jsTestName()];
-coll.drop();
-
-// Bulk insert documents to reduces roundtrips and make timeout on a slow machine less likely.
-const bulk = coll.initializeUnorderedBulkOp();
-for (let i = 1; i <= 20; i++) {
- bulk.insert({foo: 0, bar: Math.floor(Math.random() * 3)});
-}
-assert.commandWorked(bulk.execute());
-
-// Pipeline to read telemetry store should fail without feature flag turned on even though sampling
-// rate is > 0.
-assert.commandFailedWithCode(
- testdb.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}),
- ErrorCodes.QueryFeatureNotAllowed);
-
-// Pipeline, with a filter, to read telemetry store fails without feature flag turned on even though
-// sampling rate is > 0.
-assert.commandFailedWithCode(testdb.adminCommand({
- aggregate: 1,
- pipeline: [{$telemetry: {}}, {$match: {"key.find.find": {$eq: "###"}}}],
- cursor: {}
-}),
- ErrorCodes.QueryFeatureNotAllowed);
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/telemetry/redactFieldnames_parameter.js b/jstests/noPassthrough/telemetry/redactFieldnames_parameter.js
deleted file mode 100644
index ca2a9aa348d51..0000000000000
--- a/jstests/noPassthrough/telemetry/redactFieldnames_parameter.js
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Test that the $telemetry.redactionFieldNames parameter correctly sets the redaction stratgey for
- * telemetry store keys.
- * @tags: [featureFlagTelemetry]
- */
-(function() {
-"use strict";
-
-load("jstests/aggregation/extras/utils.js"); // For assertAdminDBErrCodeAndErrMsgContains.
-
-// Turn on the collecting of telemetry metrics.
-let options = {
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
-};
-
-const conn = MongoRunner.runMongod(options);
-const testDB = conn.getDB('test');
-var coll = testDB[jsTestName()];
-coll.drop();
-
-coll.aggregate([{$sort: {bar: -1}}, {$limit: 2}, {$match: {foo: {$lte: 2}}}]);
-// Default is no redaction.
-let telStore = assert.commandWorked(
- testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}));
-assert.eq(telStore.cursor.firstBatch[0]["key"]["pipeline"],
- [{"$sort": {"bar": "###"}}, {"$limit": "###"}, {"$match": {"foo": {"$lte": "###"}}}]);
-
-// Turning on redaction should redact field names on all entries, even previously cached ones.
-telStore = assert.commandWorked(testDB.adminCommand(
- {aggregate: 1, pipeline: [{$telemetry: {redactIdentifiers: true}}], cursor: {}}));
-telStore.cursor.firstBatch.forEach(element => {
- // Find the non-telemetry query and check its key to assert it matches requested redaction
- // strategy.
- if (!telStore.cursor.firstBatch[0]["key"]["pipeline"][0]["$telemetry"]) {
- assert.eq(telStore.cursor.firstBatch[0]["key"]["pipeline"], [
- {"$sort": {"/N4rLtula/QI": "###"}},
- {"$limit": "###"},
- {"$match": {"LCa0a2j/xo/5": {"TmACc7vp8cv6": "###"}}}
- ]);
- }
-});
-
-// Turning redaction back off should preserve field names on all entries, even previously cached
-// ones.
-telStore = assert.commandWorked(
- testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}));
-telStore.cursor.firstBatch.forEach(element => {
- // Find the non-telemetry query and check its key to assert it matches requested redaction
- // strategy.
- if (!telStore.cursor.firstBatch[0]["key"]["pipeline"][0]["$telemetry"]) {
- assert.eq(
- telStore.cursor.firstBatch[0]["key"]["pipeline"],
- [{"$sort": {"bar": "###"}}, {"$limit": "###"}, {"$match": {"foo": {"$lte": "###"}}}]);
- }
-});
-
-// Explicitly set redactIdentifiers to false.
-telStore = assert.commandWorked(testDB.adminCommand(
- {aggregate: 1, pipeline: [{$telemetry: {redactIdentifiers: false}}], cursor: {}}));
-telStore.cursor.firstBatch.forEach(element => {
- // Find the non-telemetry query and check its key to assert it matches requested redaction
- // strategy.
- if (!telStore.cursor.firstBatch[0]["key"]["pipeline"][0]["$telemetry"]) {
- assert.eq(
- telStore.cursor.firstBatch[0]["key"]["pipeline"],
- [{"$sort": {"bar": "###"}}, {"$limit": "###"}, {"$match": {"foo": {"$lte": "###"}}}]);
- }
-});
-
-// Wrong parameter name throws error.
-let pipeline = [{$telemetry: {redactFields: true}}];
-assertAdminDBErrCodeAndErrMsgContains(
- coll,
- pipeline,
- ErrorCodes.FailedToParse,
- "$telemetry parameters object may only contain 'redactIdentifiers' option. Found: redactFields");
-
-// Wrong parameter type throws error.
-pipeline = [{$telemetry: {redactIdentifiers: 1}}];
-assertAdminDBErrCodeAndErrMsgContains(
- coll,
- pipeline,
- ErrorCodes.FailedToParse,
- "$telemetry redactIdentifiers parameter must be boolean. Found type: double");
-
-// Parameter object with unrecognized key throws error.
-pipeline = [{$telemetry: {redactIdentifiers: true, redactionStrategy: "on"}}];
-assertAdminDBErrCodeAndErrMsgContains(
- coll,
- pipeline,
- ErrorCodes.FailedToParse,
- "$telemetry parameters object may only contain one field, 'redactIdentifiers'. Found: { redactIdentifiers: true, redactionStrategy: \"on\" }");
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js b/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js
deleted file mode 100644
index 5882ab529f00a..0000000000000
--- a/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js
+++ /dev/null
@@ -1,273 +0,0 @@
-/**
- * Test that mongos is collecting telemetry metrics.
- * @tags: [requires_fcv_70, featureFlagTelemetry]
- */
-
-load('jstests/libs/telemetry_utils.js');
-
-(function() {
-"use strict";
-
-// Redacted literal replacement string. This may change in the future, so it's factored out.
-const aggRedactString = "###";
-const findRedactString = "?";
-
-const setup = () => {
- const st = new ShardingTest({
- mongos: 1,
- shards: 1,
- config: 1,
- rs: {nodes: 1},
- mongosOptions: {
- setParameter: {
- internalQueryConfigureTelemetrySamplingRate: -1,
- 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
- }
- },
- });
- const mongos = st.s;
- const db = mongos.getDB("test");
- const coll = db.coll;
- coll.insert({v: 1});
- coll.insert({v: 4});
- return st;
-};
-
-const assertExpectedResults = (results,
- expectedTelemetryKey,
- expectedExecCount,
- expectedDocsReturnedSum,
- expectedDocsReturnedMax,
- expectedDocsReturnedMin,
- expectedDocsReturnedSumOfSq) => {
- const {key, metrics} = results;
- assert.eq(expectedTelemetryKey, key);
- assert.eq(expectedExecCount, metrics.execCount);
- assert.docEq({
- sum: NumberLong(expectedDocsReturnedSum),
- max: NumberLong(expectedDocsReturnedMax),
- min: NumberLong(expectedDocsReturnedMin),
- sumOfSquares: NumberLong(expectedDocsReturnedSumOfSq)
- },
- metrics.docsReturned);
-
- // This test can't predict exact timings, so just assert these three fields have been set (are
- // non-zero).
- const {firstSeenTimestamp, lastExecutionMicros, queryExecMicros} = metrics;
-
- assert.neq(timestampCmp(firstSeenTimestamp, Timestamp(0, 0)), 0);
- assert.neq(lastExecutionMicros, NumberLong(0));
-
- const distributionFields = ['sum', 'max', 'min', 'sumOfSquares'];
- for (const field of distributionFields) {
- assert.neq(queryExecMicros[field], NumberLong(0));
- }
-};
-
-// Assert that, for find queries, no telemetry results are written until a cursor has reached
-// exhaustion; ensure accurate results once they're written.
-{
- const st = setup();
- const db = st.s.getDB("test");
- const collName = "coll";
- const coll = db[collName];
-
- const telemetryKey = {
- cmdNs: {db: "test", coll: "coll"},
- find: collName,
- filter: {$and: [{v: {$gt: findRedactString}}, {v: {$lt: findRedactString}}]},
- batchSize: findRedactString,
- readConcern: {level: "local", provenance: "implicitDefault"},
- applicationName: "MongoDB Shell",
- };
-
- const cursor = coll.find({v: {$gt: 0, $lt: 5}}).batchSize(1); // returns 1 doc
-
- // Since the cursor hasn't been exhausted yet, ensure no telemetry results have been written
- // yet.
- let telemetry = getTelemetry(db);
- assert.eq(0, telemetry.length);
-
- // Run a getMore to exhaust the cursor, then ensure telemetry results have been written
- // accurately. batchSize must be 2 so the cursor recognizes exhaustion.
- assert.commandWorked(db.runCommand({
- getMore: cursor.getId(),
- collection: coll.getName(),
- batchSize: 2
- })); // returns 1 doc, exhausts the cursor
- // The $telemetry query for the previous `getTelemetry` is included in this call to $telemetry.
- telemetry = getTelemetry(db);
- assert.eq(2, telemetry.length);
- assertExpectedResults(telemetry[0],
- telemetryKey,
- /* expectedExecCount */ 1,
- /* expectedDocsReturnedSum */ 2,
- /* expectedDocsReturnedMax */ 2,
- /* expectedDocsReturnedMin */ 2,
- /* expectedDocsReturnedSumOfSq */ 4);
-
- // Run more queries (to exhaustion) with the same query shape, and ensure telemetry results are
- // accurate.
- coll.find({v: {$gt: 2, $lt: 3}}).batchSize(10).toArray(); // returns 0 docs
- coll.find({v: {$gt: 0, $lt: 1}}).batchSize(10).toArray(); // returns 0 docs
- coll.find({v: {$gt: 0, $lt: 2}}).batchSize(10).toArray(); // return 1 doc
- telemetry = getTelemetry(db);
- assert.eq(2, telemetry.length);
- assertExpectedResults(telemetry[0],
- telemetryKey,
- /* expectedExecCount */ 4,
- /* expectedDocsReturnedSum */ 3,
- /* expectedDocsReturnedMax */ 2,
- /* expectedDocsReturnedMin */ 0,
- /* expectedDocsReturnedSumOfSq */ 5);
-
- st.stop();
-}
-
-// Assert that, for agg queries, no telemetry results are written until a cursor has reached
-// exhaustion; ensure accurate results once they're written.
-{
- const st = setup();
- const db = st.s.getDB("test");
- const coll = db.coll;
-
- const telemetryKey = {
- pipeline: [
- {$match: {v: {$gt: aggRedactString, $lt: aggRedactString}}},
- {$project: {hello: aggRedactString}},
- ],
- namespace: "test.coll",
- applicationName: "MongoDB Shell"
- };
-
- const cursor = coll.aggregate(
- [
- {$match: {v: {$gt: 0, $lt: 5}}},
- {$project: {hello: "$world"}},
- ],
- {cursor: {batchSize: 1}}); // returns 1 doc
-
- // Since the cursor hasn't been exhausted yet, ensure no telemetry results have been written
- // yet.
- let telemetry = getTelemetry(db);
- assert.eq(0, telemetry.length);
-
- // Run a getMore to exhaust the cursor, then ensure telemetry results have been written
- // accurately. batchSize must be 2 so the cursor recognizes exhaustion.
- assert.commandWorked(db.runCommand({
- getMore: cursor.getId(),
- collection: coll.getName(),
- batchSize: 2
- })); // returns 1 doc, exhausts the cursor
- // The $telemetry query for the previous `getTelemetry` is included in this call to $telemetry.
- telemetry = getTelemetry(db);
- assert.eq(2, telemetry.length);
- assertExpectedResults(telemetry[0],
- telemetryKey,
- /* expectedExecCount */ 1,
- /* expectedDocsReturnedSum */ 2,
- /* expectedDocsReturnedMax */ 2,
- /* expectedDocsReturnedMin */ 2,
- /* expectedDocsReturnedSumOfSq */ 4);
-
- // Run more queries (to exhaustion) with the same query shape, and ensure telemetry results are
- // accurate.
- coll.aggregate([
- {$match: {v: {$gt: 0, $lt: 5}}},
- {$project: {hello: "$world"}},
- ]); // returns 2 docs
- coll.aggregate([
- {$match: {v: {$gt: 2, $lt: 3}}},
- {$project: {hello: "$universe"}},
- ]); // returns 0 docs
- coll.aggregate([
- {$match: {v: {$gt: 0, $lt: 2}}},
- {$project: {hello: "$galaxy"}},
- ]); // returns 1 doc
- telemetry = getTelemetry(db);
- assert.eq(2, telemetry.length);
- assertExpectedResults(telemetry[0],
- telemetryKey,
- /* expectedExecCount */ 4,
- /* expectedDocsReturnedSum */ 5,
- /* expectedDocsReturnedMax */ 2,
- /* expectedDocsReturnedMin */ 0,
- /* expectedDocsReturnedSumOfSq */ 9);
-
- st.stop();
-}
-
-// Assert on batchSize-limited find queries that killCursors will write metrics with partial results
-// to the telemetry store.
-{
- const st = setup();
- const db = st.s.getDB("test");
- const collName = "coll";
- const coll = db[collName];
-
- const telemetryKey = {
- cmdNs: {db: "test", coll: "coll"},
- find: collName,
- filter: {$and: [{v: {$gt: findRedactString}}, {v: {$lt: findRedactString}}]},
- batchSize: findRedactString,
- readConcern: {level: "local", provenance: "implicitDefault"},
- applicationName: "MongoDB Shell"
- };
-
- const cursor1 = coll.find({v: {$gt: 0, $lt: 5}}).batchSize(1); // returns 1 doc
- const cursor2 = coll.find({v: {$gt: 0, $lt: 2}}).batchSize(1); // returns 1 doc
-
- assert.commandWorked(
- db.runCommand({killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]}));
-
- const telemetry = getTelemetry(db);
- assert.eq(1, telemetry.length);
- assertExpectedResults(telemetry[0],
- telemetryKey,
- /* expectedExecCount */ 2,
- /* expectedDocsReturnedSum */ 2,
- /* expectedDocsReturnedMax */ 1,
- /* expectedDocsReturnedMin */ 1,
- /* expectedDocsReturnedSumOfSq */ 2);
- st.stop();
-}
-
-// Assert on batchSize-limited agg queries that killCursors will write metrics with partial results
-// to the telemetry store.
-{
- const st = setup();
- const db = st.s.getDB("test");
- const coll = db.coll;
-
- const telemetryKey = {
- pipeline: [{$match: {v: {$gt: aggRedactString, $lt: aggRedactString}}}],
- namespace: `test.${coll.getName()}`,
- applicationName: "MongoDB Shell"
- };
-
- const cursor1 = coll.aggregate(
- [
- {$match: {v: {$gt: 0, $lt: 5}}},
- ],
- {cursor: {batchSize: 1}}); // returns 1 doc
- const cursor2 = coll.aggregate(
- [
- {$match: {v: {$gt: 0, $lt: 2}}},
- ],
- {cursor: {batchSize: 1}}); // returns 1 doc
-
- assert.commandWorked(
- db.runCommand({killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]}));
-
- const telemetry = getTelemetry(db);
- assert.eq(1, telemetry.length);
- assertExpectedResults(telemetry[0],
- telemetryKey,
- /* expectedExecCount */ 2,
- /* expectedDocsReturnedSum */ 2,
- /* expectedDocsReturnedMax */ 1,
- /* expectedDocsReturnedMin */ 1,
- /* expectedDocsReturnedSumOfSq */ 2);
- st.stop();
-}
-}());
diff --git a/jstests/noPassthrough/telemetry/telemetry_feature_flag.js b/jstests/noPassthrough/telemetry/telemetry_feature_flag.js
deleted file mode 100644
index 04377ca661010..0000000000000
--- a/jstests/noPassthrough/telemetry/telemetry_feature_flag.js
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Test that calls to read from telemetry store fail when feature flag is turned off.
- */
-load('jstests/libs/analyze_plan.js');
-load("jstests/libs/feature_flag_util.js");
-
-(function() {
-"use strict";
-
-// This test specifically tests error handling when the feature flag is not on.
-// TODO SERVER-65800 this test can be removed when the feature flag is removed.
-const conn = MongoRunner.runMongod();
-const testDB = conn.getDB('test');
-if (FeatureFlagUtil.isEnabled(testDB, "Telemetry")) {
- jsTestLog("Skipping test since telemetry is enabled.");
- MongoRunner.stopMongod(conn);
- return;
-}
-
-// Pipeline to read telemetry store should fail without feature flag turned on.
-assert.commandFailedWithCode(
- testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}),
- ErrorCodes.QueryFeatureNotAllowed);
-
-// Pipeline, with a filter, to read telemetry store fails without feature flag turned on.
-assert.commandFailedWithCode(testDB.adminCommand({
- aggregate: 1,
- pipeline: [{$telemetry: {}}, {$match: {"key.find.find": {$eq: "###"}}}],
- cursor: {}
-}),
- ErrorCodes.QueryFeatureNotAllowed);
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/telemetry/telemetry_metrics_across_getMore_calls.js b/jstests/noPassthrough/telemetry/telemetry_metrics_across_getMore_calls.js
deleted file mode 100644
index 87fc54a3360e7..0000000000000
--- a/jstests/noPassthrough/telemetry/telemetry_metrics_across_getMore_calls.js
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Test that the telemetry metrics are aggregated properly by distinct query shape over getMore
- * calls.
- * @tags: [featureFlagTelemetry]
- */
-load("jstests/libs/telemetry_utils.js"); // For verifyMetrics.
-
-(function() {
-"use strict";
-
-// Turn on the collecting of telemetry metrics.
-let options = {
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
-};
-
-const conn = MongoRunner.runMongod(options);
-const testDB = conn.getDB('test');
-var coll = testDB[jsTestName()];
-coll.drop();
-
-// Bulk insert documents to reduces roundtrips and make timeout on a slow machine less likely.
-const bulk = coll.initializeUnorderedBulkOp();
-const numDocs = 100;
-for (let i = 0; i < numDocs / 2; ++i) {
- bulk.insert({foo: 0, bar: Math.floor(Math.random() * 3)});
- bulk.insert({foo: 1, bar: Math.floor(Math.random() * -2)});
-}
-assert.commandWorked(bulk.execute());
-
-// Assert that two queries with identical structures are represented by the same key.
-{
- // Note: toArray() is necessary for the batchSize-limited query to run to cursor exhaustion
- // (when it writes to the telemetry store).
- coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}}).toArray();
- coll.aggregate([{$match: {foo: 0}}], {cursor: {batchSize: 2}}).toArray();
-
- // This command will return all telemetry store entires.
- const telemetryResults = testDB.getSiblingDB("admin").aggregate([{$telemetry: {}}]).toArray();
- // Assert there is only one entry.
- assert.eq(telemetryResults.length, 1, telemetryResults);
- const telemetryEntry = telemetryResults[0];
- assert.eq(telemetryEntry.key.namespace, `test.${jsTestName()}`);
- assert.eq(telemetryEntry.key.applicationName, "MongoDB Shell");
-
- // Assert we update execution count for identically shaped queries.
- assert.eq(telemetryEntry.metrics.execCount, 2);
-
- // Assert telemetry values are accurate for the two above queries.
- assert.eq(telemetryEntry.metrics.docsReturned.sum, numDocs);
- assert.eq(telemetryEntry.metrics.docsReturned.min, numDocs / 2);
- assert.eq(telemetryEntry.metrics.docsReturned.max, numDocs / 2);
-
- verifyMetrics(telemetryResults);
-}
-
-const fooEqBatchSize = 5;
-const fooNeBatchSize = 3;
-// Assert on batchSize-limited queries that killCursors will write metrics with partial results to
-// the telemetry store.
-{
- let cursor1 = coll.find({foo: {$eq: 0}}).batchSize(fooEqBatchSize);
- let cursor2 = coll.find({foo: {$ne: 0}}).batchSize(fooNeBatchSize);
- // Issue one getMore for the first query, so 2 * fooEqBatchSize documents are returned total.
- assert.commandWorked(testDB.runCommand(
- {getMore: cursor1.getId(), collection: coll.getName(), batchSize: fooEqBatchSize}));
-
- // Kill both cursors so the telemetry metrics are stored.
- assert.commandWorked(testDB.runCommand(
- {killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]}));
-
- // This filters telemetry entires to just the ones entered when running above find queries.
- const telemetryResults = testDB.getSiblingDB("admin")
- .aggregate([
- {$telemetry: {}},
- {$match: {"key.filter.foo": {$exists: true}}},
- {$sort: {key: 1}},
- ])
- .toArray();
- assert.eq(telemetryResults.length, 2, telemetryResults);
- assert.eq(telemetryResults[0].key.cmdNs.db, "test");
- assert.eq(telemetryResults[0].key.cmdNs.coll, jsTestName());
- assert.eq(telemetryResults[0].key.applicationName, "MongoDB Shell");
- assert.eq(telemetryResults[1].key.cmdNs.db, "test");
- assert.eq(telemetryResults[1].key.cmdNs.coll, jsTestName());
- assert.eq(telemetryResults[1].key.applicationName, "MongoDB Shell");
-
- assert.eq(telemetryResults[0].metrics.execCount, 1);
- assert.eq(telemetryResults[1].metrics.execCount, 1);
- assert.eq(telemetryResults[0].metrics.docsReturned.sum, fooEqBatchSize * 2);
- assert.eq(telemetryResults[1].metrics.docsReturned.sum, fooNeBatchSize);
-
- verifyMetrics(telemetryResults);
-}
-
-// Assert that options such as limit/sort create different keys, and that repeating a query shape
-// ({foo: {$eq}}) aggregates metrics across executions.
-{
- const query2Limit = 50;
- coll.find({foo: {$eq: 0}}).batchSize(2).toArray();
- coll.find({foo: {$eq: 1}}).limit(query2Limit).batchSize(2).toArray();
- coll.find().sort({"foo": 1}).batchSize(2).toArray();
- // This filters telemetry entires to just the ones entered when running above find queries.
- let telemetryResults =
- testDB.getSiblingDB("admin")
- .aggregate([{$telemetry: {}}, {$match: {"key.find": {$exists: true}}}])
- .toArray();
- assert.eq(telemetryResults.length, 4, telemetryResults);
-
- verifyMetrics(telemetryResults);
-
- // This filters to just the telemetry for query coll.find().sort({"foo": 1}).batchSize(2).
- telemetryResults = testDB.getSiblingDB("admin")
- .aggregate([{$telemetry: {}}, {$match: {"key.sort.foo": 1}}])
- .toArray();
- assert.eq(telemetryResults.length, 1, telemetryResults);
- assert.eq(telemetryResults[0].key.cmdNs.db, "test");
- assert.eq(telemetryResults[0].key.cmdNs.coll, jsTestName());
- assert.eq(telemetryResults[0].key.applicationName, "MongoDB Shell");
- assert.eq(telemetryResults[0].metrics.execCount, 1);
- assert.eq(telemetryResults[0].metrics.docsReturned.sum, numDocs);
-
- // This filters to just the telemetry for query coll.find({foo: {$eq:
- // 1}}).limit(query2Limit).batchSize(2).
- telemetryResults = testDB.getSiblingDB("admin")
- .aggregate([{$telemetry: {}}, {$match: {"key.limit": '?'}}])
- .toArray();
- assert.eq(telemetryResults.length, 1, telemetryResults);
- assert.eq(telemetryResults[0].key.cmdNs.db, "test");
- assert.eq(telemetryResults[0].key.cmdNs.coll, jsTestName());
- assert.eq(telemetryResults[0].key.applicationName, "MongoDB Shell");
- assert.eq(telemetryResults[0].metrics.execCount, 1);
- assert.eq(telemetryResults[0].metrics.docsReturned.sum, query2Limit);
-
- // This filters to just the telemetry for query coll.find({foo: {$eq: 0}}).batchSize(2).
- telemetryResults = testDB.getSiblingDB("admin")
- .aggregate([
- {$telemetry: {}},
- {
- $match: {
- "key.filter.foo": {$eq: {$eq: "?"}},
- "key.limit": {$exists: false},
- "key.sort": {$exists: false}
- }
- }
- ])
- .toArray();
- assert.eq(telemetryResults.length, 1, telemetryResults);
- assert.eq(telemetryResults[0].key.cmdNs.db, "test");
- assert.eq(telemetryResults[0].key.cmdNs.coll, jsTestName());
- assert.eq(telemetryResults[0].key.applicationName, "MongoDB Shell");
- assert.eq(telemetryResults[0].metrics.execCount, 2);
- assert.eq(telemetryResults[0].metrics.docsReturned.sum, numDocs / 2 + 2 * fooEqBatchSize);
- assert.eq(telemetryResults[0].metrics.docsReturned.max, numDocs / 2);
- assert.eq(telemetryResults[0].metrics.docsReturned.min, 2 * fooEqBatchSize);
-}
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js b/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js
deleted file mode 100644
index 6bbf55f08ea7a..0000000000000
--- a/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Test that $telemetry properly redacts find commands, on mongod and mongos.
- * @tags: [requires_fcv_70]
- */
-load("jstests/libs/telemetry_utils.js");
-(function() {
-"use strict";
-
-function runTest(conn) {
- const db = conn.getDB("test");
- const admin = conn.getDB("admin");
-
- db.test.drop();
- db.test.insert({v: 1});
-
- db.test.find({v: 1}).toArray();
-
- let telemetry = getTelemetryRedacted(admin);
-
- assert.eq(1, telemetry.length);
- assert.eq("n4bQgYhMfWWa", telemetry[0].key.find);
- assert.eq({"TJRIXgwhrmxB": {$eq: "?"}}, telemetry[0].key.filter);
-
- db.test.insert({v: 2});
-
- const cursor = db.test.find({v: {$gt: 0, $lt: 3}}).batchSize(1);
- telemetry = getTelemetryRedacted(admin);
- // Cursor isn't exhausted, so there shouldn't be another entry yet.
- assert.eq(1, telemetry.length);
-
- assert.commandWorked(
- db.runCommand({getMore: cursor.getId(), collection: db.test.getName(), batchSize: 2}));
-
- telemetry = getTelemetryRedacted(admin);
- assert.eq(2, telemetry.length);
- assert.eq("n4bQgYhMfWWa", telemetry[1].key.find);
- assert.eq({"$and": [{"TJRIXgwhrmxB": {"$gt": "?"}}, {"TJRIXgwhrmxB": {"$lt": "?"}}]},
- telemetry[1].key.filter);
-}
-
-const conn = MongoRunner.runMongod({
- setParameter: {
- internalQueryConfigureTelemetrySamplingRate: -1,
- featureFlagTelemetry: true,
- }
-});
-runTest(conn);
-MongoRunner.stopMongod(conn);
-
-const st = new ShardingTest({
- mongos: 1,
- shards: 1,
- config: 1,
- rs: {nodes: 1},
- mongosOptions: {
- setParameter: {
- internalQueryConfigureTelemetrySamplingRate: -1,
- featureFlagTelemetry: true,
- 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"
- }
- },
-});
-runTest(st.s);
-st.stop();
-}());
diff --git a/jstests/noPassthrough/telemetry/telemetry_sampling_rate.js b/jstests/noPassthrough/telemetry/telemetry_sampling_rate.js
deleted file mode 100644
index 1bada398a0378..0000000000000
--- a/jstests/noPassthrough/telemetry/telemetry_sampling_rate.js
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Test that calls to read from telemetry store fail when sampling rate is not greater than 0 even
- * if feature flag is on.
- * @tags: [featureFlagTelemetry]
- */
-load('jstests/libs/analyze_plan.js');
-
-(function() {
-"use strict";
-
-let options = {
- setParameter: {internalQueryConfigureTelemetrySamplingRate: 0},
-};
-
-const conn = MongoRunner.runMongod(options);
-const testdb = conn.getDB('test');
-var coll = testdb[jsTestName()];
-coll.drop();
-for (var i = 0; i < 20; i++) {
- coll.insert({foo: 0, bar: Math.floor(Math.random() * 3)});
-}
-
-coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}});
-
-// Reading telemetry store with a sampling rate of 0 should return 0 documents.
-let telStore = testdb.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}});
-assert.eq(telStore.cursor.firstBatch.length, 0);
-
-// Reading telemetry store should work now with a sampling rate of greater than 0.
-assert.commandWorked(testdb.adminCommand(
- {setParameter: 1, internalQueryConfigureTelemetrySamplingRate: 2147483647}));
-coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}});
-telStore = assert.commandWorked(
- testdb.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}));
-assert.eq(telStore.cursor.firstBatch.length, 1);
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/telemetry/telemetry_server_status_metrics.js b/jstests/noPassthrough/telemetry/telemetry_server_status_metrics.js
deleted file mode 100644
index 84ac1717d6994..0000000000000
--- a/jstests/noPassthrough/telemetry/telemetry_server_status_metrics.js
+++ /dev/null
@@ -1,155 +0,0 @@
-/**
- * Test the telemetry related serverStatus metrics.
- * @tags: [featureFlagTelemetry]
- */
-load('jstests/libs/analyze_plan.js');
-
-(function() {
-"use strict";
-
-function runTestWithMongodOptions(mongodOptions, test, testOptions) {
- const conn = MongoRunner.runMongod(mongodOptions);
- const testDB = conn.getDB('test');
- const coll = testDB[jsTestName()];
-
- test(conn, testDB, coll, testOptions);
-
- MongoRunner.stopMongod(conn);
-}
-
-/**
- * Test serverStatus metric which counts the number of evicted entries.
- *
- * testOptions must include `resetCacheSize` bool field; e.g., { resetCacheSize : true }
- */
-function evictionTest(conn, testDB, coll, testOptions) {
- const evictedBefore = testDB.serverStatus().metrics.telemetry.numEvicted;
- assert.eq(evictedBefore, 0);
- for (var i = 0; i < 4000; i++) {
- let query = {};
- query["foo" + i] = "bar";
- coll.aggregate([{$match: query}]).itcount();
- }
- if (!testOptions.resetCacheSize) {
- const evictedAfter = testDB.serverStatus().metrics.telemetry.numEvicted;
- assert.gt(evictedAfter, 0);
- return;
- }
- // Make sure number of evicted entries increases when the cache size is reset, which forces out
- // least recently used entries to meet the new, smaller size requirement.
- assert.eq(testDB.serverStatus().metrics.telemetry.numEvicted, 0);
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryConfigureTelemetryCacheSize: "1MB"}));
- const evictedAfter = testDB.serverStatus().metrics.telemetry.numEvicted;
- assert.gt(evictedAfter, 0);
-}
-
-/**
- * Test serverStatus metric which counts the number of requests for which telemetry is not collected
- * due to rate-limiting.
- *
- * testOptions must include `samplingRate` and `numRequests` number fields;
- * e.g., { samplingRate: 2147483647, numRequests: 20 }
- */
-function countRateLimitedRequestsTest(conn, testDB, coll, testOptions) {
- const numRateLimitedRequestsBefore =
- testDB.serverStatus().metrics.telemetry.numRateLimitedRequests;
- assert.eq(numRateLimitedRequestsBefore, 0);
-
- coll.insert({a: 0});
-
- // Running numRequests / 2 times since we dispatch two requests per iteration
- for (var i = 0; i < testOptions.numRequests / 2; i++) {
- coll.find({a: 0}).toArray();
- coll.aggregate([{$match: {a: 1}}]);
- }
-
- const numRateLimitedRequestsAfter =
- testDB.serverStatus().metrics.telemetry.numRateLimitedRequests;
-
- if (testOptions.samplingRate === 0) {
- // Telemetry should not be collected for any requests.
- assert.eq(numRateLimitedRequestsAfter, testOptions.numRequests);
- } else if (testOptions.samplingRate >= testOptions.numRequests) {
- // Telemetry should be collected for all requests.
- assert.eq(numRateLimitedRequestsAfter, 0);
- } else {
- // Telemetry should be collected for some but not all requests.
- assert.gt(numRateLimitedRequestsAfter, 0);
- assert.lt(numRateLimitedRequestsAfter, testOptions.numRequests);
- }
-}
-
-function telemetryStoreSizeEstimateTest(conn, testDB, coll, testOptions) {
- assert.eq(testDB.serverStatus().metrics.telemetry.telemetryStoreSizeEstimateBytes, 0);
- let halfWayPointSize;
- // Only using three digit numbers (eg 100, 101) means the string length will be the same for all
- // entries and therefore the key size will be the same for all entries, which makes predicting
- // the total size of the store clean and easy.
- for (var i = 100; i < 200; i++) {
- coll.aggregate([{$match: {["foo" + i]: "bar"}}]).itcount();
- if (i == 150) {
- halfWayPointSize =
- testDB.serverStatus().metrics.telemetry.telemetryStoreSizeEstimateBytes;
- }
- }
- // Confirm that telemetry store has grown and size is non-zero.
- assert.gt(halfWayPointSize, 0);
- const fullSize = testDB.serverStatus().metrics.telemetry.telemetryStoreSizeEstimateBytes;
- assert.gt(fullSize, 0);
- // Make sure the final telemetry store size is twice as much as the halfway point size (+/- 5%)
- assert(fullSize >= halfWayPointSize * 1.95 && fullSize <= halfWayPointSize * 2.05,
- tojson({fullSize, halfWayPointSize}));
-}
-/**
- * In this configuration, we insert enough entries into the telemetry store to trigger LRU
- * eviction.
- */
-runTestWithMongodOptions({
- setParameter: {
- internalQueryConfigureTelemetryCacheSize: "1MB",
- internalQueryConfigureTelemetrySamplingRate: -1
- },
-},
- evictionTest,
- {resetCacheSize: false});
-/**
- * In this configuration, eviction is triggered only when the telemetry store size is reset.
- * */
-runTestWithMongodOptions({
- setParameter: {
- internalQueryConfigureTelemetryCacheSize: "4MB",
- internalQueryConfigureTelemetrySamplingRate: -1
- },
-},
- evictionTest,
- {resetCacheSize: true});
-
-/**
- * In this configuration, every query is sampled, so no requests should be rate-limited.
- */
-runTestWithMongodOptions({
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
-},
- countRateLimitedRequestsTest,
- {samplingRate: 2147483647, numRequests: 20});
-
-/**
- * In this configuration, the sampling rate is set so that some but not all requests are
- * rate-limited.
- */
-runTestWithMongodOptions({
- setParameter: {internalQueryConfigureTelemetrySamplingRate: 10},
-},
- countRateLimitedRequestsTest,
- {samplingRate: 10, numRequests: 20});
-
-/**
- * Sample all queries and assert that the size of telemetry store is equal to num entries * entry
- * size
- */
-runTestWithMongodOptions({
- setParameter: {internalQueryConfigureTelemetrySamplingRate: -1},
-},
- telemetryStoreSizeEstimateTest);
-}());
diff --git a/jstests/noPassthrough/timeseries_bucket_limit_size.js b/jstests/noPassthrough/timeseries_bucket_limit_size.js
index de1719ec8da96..6449e9bf931ee 100644
--- a/jstests/noPassthrough/timeseries_bucket_limit_size.js
+++ b/jstests/noPassthrough/timeseries_bucket_limit_size.js
@@ -7,10 +7,8 @@
* requires_fcv_61,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const conn = MongoRunner.runMongod({setParameter: {timeseriesBucketMinCount: 1}});
@@ -18,6 +16,8 @@ const dbName = jsTestName();
const db = conn.getDB(dbName);
TimeseriesTest.run((insert) => {
+ const alwaysUseCompressedBuckets =
+ FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets");
const areTimeseriesScalabilityImprovementsEnabled =
TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db);
@@ -81,7 +81,7 @@ TimeseriesTest.run((insert) => {
assert.eq(largeValue,
bucketDocs[0].control.max.x,
'invalid control.max for x in first bucket: ' + tojson(bucketDocs[0].control));
- assert.eq(2,
+ assert.eq(alwaysUseCompressedBuckets ? 1 : 2,
bucketDocs[0].control.version,
'unexpected control.version in first bucket: ' + tojson(bucketDocs));
@@ -114,4 +114,3 @@ TimeseriesTest.run((insert) => {
});
MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/noPassthrough/timeseries_bucketing_parameters_downgrade.js b/jstests/noPassthrough/timeseries_bucketing_parameters_downgrade.js
deleted file mode 100644
index 5a50f30c6d51a..0000000000000
--- a/jstests/noPassthrough/timeseries_bucketing_parameters_downgrade.js
+++ /dev/null
@@ -1,212 +0,0 @@
-/**
- * Tests behavior with the bucketing parameters on time-series collections when downgrading. If we
- * are using custom bucketing parameters we expect to fail the downgrade but if we use default
- * granularity values the downgrade should succeed.
- */
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/feature_flag_util.js"); // For isEnabled.
-
-const conn = MongoRunner.runMongod();
-const db = conn.getDB("test");
-
-if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(conn)) {
- jsTestLog(
- "Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled.");
- MongoRunner.stopMongod(conn);
- return;
-}
-
-const collName = "timeseries_bucketing_parameters";
-const coll = db.getCollection(collName);
-const bucketsColl = db.getCollection("system.buckets." + collName);
-
-const timeFieldName = "tm";
-const metaFieldName = "mm";
-
-const resetCollection = function(extraOptions = {}) {
- coll.drop();
-
- const tsOpts = {timeField: timeFieldName, metaField: metaFieldName};
- assert.commandWorked(
- db.createCollection(coll.getName(), {timeseries: Object.merge(tsOpts, extraOptions)}));
- assert.commandWorked(coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: 1}));
-};
-
-const secondsMaxSpan = TimeseriesTest.getBucketMaxSpanSecondsFromGranularity('seconds');
-const secondsRoundingSeconds = TimeseriesTest.getBucketRoundingSecondsFromGranularity('seconds');
-const minutesMaxSpan = TimeseriesTest.getBucketMaxSpanSecondsFromGranularity('minutes');
-const minutesRoundingSeconds = TimeseriesTest.getBucketRoundingSecondsFromGranularity('minutes');
-const hoursMaxSpan = TimeseriesTest.getBucketMaxSpanSecondsFromGranularity('hours');
-const hoursRoundingSeconds = TimeseriesTest.getBucketRoundingSecondsFromGranularity('hours');
-
-const getNearestGranularity = function(bucketingParams) {
- assert(bucketingParams.hasOwnProperty('bucketMaxSpanSeconds') &&
- bucketingParams.hasOwnProperty('bucketRoundingSeconds'));
-
- if (bucketingParams.bucketMaxSpanSeconds <= secondsMaxSpan &&
- bucketingParams.bucketRoundingSeconds <= secondsRoundingSeconds) {
- return 'seconds';
- }
-
- if (bucketingParams.bucketMaxSpanSeconds <= minutesMaxSpan &&
- bucketingParams.bucketRoundingSeconds <= minutesRoundingSeconds) {
- return 'minutes';
- }
-
- if (bucketingParams.bucketMaxSpanSeconds <= hoursMaxSpan &&
- bucketingParams.bucketRoundingSeconds <= hoursRoundingSeconds) {
- return 'hours';
- }
-
- return null;
-};
-
-// Checks if the downgrade command succeeds and reset the version to latestFCV.
-function checkDowngradeSucceeds() {
- // Verify that downgrade succeeds.
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
-
- // Check that the 'granularity' and 'bucketMaxSpanSeconds' are correctly set and that
- // 'bucketRoundingSeconds' is not set to any value.
- let collections = assert.commandWorked(db.runCommand({listCollections: 1})).cursor.firstBatch;
- let collectionEntry =
- collections.find(entry => entry.name === 'system.buckets.' + coll.getName());
- assert(collectionEntry);
-
- let granularity = collectionEntry.options.timeseries.granularity;
- assert(granularity);
- assert.isnull(collectionEntry.options.timeseries.bucketRoundingSeconds);
- assert.eq(collectionEntry.options.timeseries.bucketMaxSpanSeconds,
- TimeseriesTest.getBucketMaxSpanSecondsFromGranularity(granularity));
-
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-}
-
-// Checks that downgrade fails but tries again by using the collMod command to modify the collection
-// into a downgradable state. Will drop the collection if there is no possible granularity to
-// update.
-function checkDowngradeFailsAndTryAgain(bucketingParams) {
- assert.commandFailedWithCode(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}),
- ErrorCodes.CannotDowngrade);
-
- let nextGranularity = getNearestGranularity(bucketingParams);
-
- if (nextGranularity) {
- assert.commandWorked(
- db.runCommand({collMod: collName, timeseries: {granularity: nextGranularity}}));
- } else {
- // If the bucketMaxSpanSeconds and bucketRoundingSeconds are both greater than the values
- // corresponding to the 'hours' granularity, the only way to successfully downgrade is to
- // drop the collection.
- resetCollection();
- }
-
- checkDowngradeSucceeds();
-}
-
-const checkBucketCount = function(count = 1) {
- let stats = assert.commandWorked(coll.stats());
- assert(stats.timeseries);
- assert.eq(stats.timeseries['bucketCount'], count);
-};
-
-// 1. We expect downgrade to work seamlessly when a standard granularity is used.
-{
- resetCollection();
-
- // When we create collections with no granularity specified, we should default to 'seconds'
- // meaning we should be able to downgrade successfully.
- checkDowngradeSucceeds();
-
- // If we explicitly set the granularity of a collection we expect to succesfully downgrade.
- resetCollection({granularity: 'seconds'});
- checkDowngradeSucceeds();
-
- // We expect to successfully downgrade with different granularity values.
- assert.commandWorked(db.runCommand({collMod: collName, timeseries: {granularity: "seconds"}}));
- checkDowngradeSucceeds();
- assert.commandWorked(db.runCommand({collMod: collName, timeseries: {granularity: "minutes"}}));
- checkDowngradeSucceeds();
- assert.commandWorked(db.runCommand({collMod: collName, timeseries: {granularity: "hours"}}));
- checkDowngradeSucceeds();
-}
-
-// 2. We expect to successfully downgrade if 'bucketMaxSpanSeconds' corresponds to a granularity.
-{
- resetCollection({granularity: 'seconds', bucketMaxSpanSeconds: secondsMaxSpan});
- checkDowngradeSucceeds();
-
- resetCollection({granularity: 'seconds', bucketMaxSpanSeconds: secondsMaxSpan});
- checkDowngradeSucceeds();
-
- resetCollection({granularity: 'minutes', bucketMaxSpanSeconds: minutesMaxSpan});
- checkDowngradeSucceeds();
-
- resetCollection({granularity: 'hours', bucketMaxSpanSeconds: hoursMaxSpan});
- checkDowngradeSucceeds();
-}
-
-// 3. When we set values for 'bucketMaxSpanSeconds' and 'bucketRoundingSeconds' we expect downgrade
-// to fail. Changing the collection's granularity to the next possible granularity should allow
-// downgrade to succeed.
-{
- // Use custom bucketing parameters (less than the 'seconds' granularity).
- let bucketingParams = {
- bucketMaxSpanSeconds: secondsRoundingSeconds,
- bucketRoundingSeconds: secondsRoundingSeconds
- };
- resetCollection(bucketingParams);
-
- // Insert a few measurements to create a total of 3 buckets.
- assert.commandWorked(coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: 2}));
- assert.commandWorked(coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: 3}));
- checkBucketCount(3);
-
- // Expect downgrade to fail but when the granularity is changed to 'seconds' we should
- // successfully downgrade.
- checkDowngradeFailsAndTryAgain(bucketingParams);
-
- // Use custom bucketing parameters (less than the 'minutes' granularity).
- bucketingParams = {bucketMaxSpanSeconds: secondsMaxSpan, bucketRoundingSeconds: secondsMaxSpan};
- assert.commandWorked(db.runCommand({collMod: collName, timeseries: bucketingParams}));
-
- // Expect downgrade to fail but when the granularity is changed to 'minutes' we should
- // successfully downgrade.
- checkDowngradeFailsAndTryAgain(bucketingParams);
-
- // Use custom bucketing parameters (less than the 'hours' granularity).
- bucketingParams = {bucketMaxSpanSeconds: minutesMaxSpan, bucketRoundingSeconds: minutesMaxSpan};
- assert.commandWorked(db.runCommand({collMod: collName, timeseries: bucketingParams}));
-
- // Expect downgrade to fail but when the granularity is changed to 'hours' we should
- // successfully downgrade.
- checkDowngradeFailsAndTryAgain(bucketingParams);
-
- // Make sure the collection did not get dropped in the process to successfully downgrade by
- // checking the number of buckets in the collection.
- checkBucketCount(3);
-}
-
-// 4. In cases where the bucketing parameters are higher than the possible granularities, the only
-// way to downgrade is to drop the collection.
-{
- let bucketingParams = {bucketMaxSpanSeconds: hoursMaxSpan, bucketRoundingSeconds: hoursMaxSpan};
- resetCollection(bucketingParams);
-
- // Insert a few measurements to create a total of 3 buckets.
- assert.commandWorked(coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: 2}));
- assert.commandWorked(coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: 3}));
- checkBucketCount(3);
-
- // Expect the downgrade to fail and drops the collection for the downgrade to succeed.
- checkDowngradeFailsAndTryAgain(bucketingParams);
-
- // Verify the original collection had to be dropped in order to downgrade.
- checkBucketCount(1);
-}
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/timeseries_collStats.js b/jstests/noPassthrough/timeseries_collStats.js
index 362fc891c4a04..e8430a57a76c8 100644
--- a/jstests/noPassthrough/timeseries_collStats.js
+++ b/jstests/noPassthrough/timeseries_collStats.js
@@ -7,10 +7,9 @@
* requires_getmore,
* ]
*/
-(function() {
-"use strict";
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
-load("jstests/core/timeseries/libs/timeseries.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const kIdleBucketExpiryMemoryUsageThreshold = 1024 * 1024 * 10;
const conn = MongoRunner.runMongod({
@@ -22,6 +21,8 @@ const conn = MongoRunner.runMongod({
const dbName = jsTestName();
const testDB = conn.getDB(dbName);
+const alwaysUseCompressedBuckets =
+ FeatureFlagUtil.isEnabled(testDB, "TimeseriesAlwaysUseCompressedBuckets");
const isTimeseriesScalabilityImprovementsEnabled =
TimeseriesTest.timeseriesScalabilityImprovementsEnabled(testDB);
@@ -182,7 +183,7 @@ if (isTimeseriesScalabilityImprovementsEnabled) {
expectedStats.numBucketsClosedDueToTimeBackward++;
}
expectedStats.numMeasurementsCommitted++;
-if (!isTimeseriesScalabilityImprovementsEnabled) {
+if (!isTimeseriesScalabilityImprovementsEnabled && !alwaysUseCompressedBuckets) {
expectedStats.numCompressedBuckets++;
}
if (isTimeseriesScalabilityImprovementsEnabled) {
@@ -203,7 +204,9 @@ expectedStats.numCommits += 2;
expectedStats.numMeasurementsCommitted += numDocs;
expectedStats.avgNumMeasurementsPerCommit =
Math.floor(expectedStats.numMeasurementsCommitted / expectedStats.numCommits);
-expectedStats.numCompressedBuckets++;
+if (!alwaysUseCompressedBuckets) {
+ expectedStats.numCompressedBuckets++;
+}
if (isTimeseriesScalabilityImprovementsEnabled) {
expectedStats.numBucketQueriesFailed++;
}
@@ -226,8 +229,10 @@ expectedStats.numCommits += 2;
expectedStats.numMeasurementsCommitted += 1001;
expectedStats.avgNumMeasurementsPerCommit =
Math.floor(expectedStats.numMeasurementsCommitted / expectedStats.numCommits);
-expectedStats.numCompressedBuckets++;
-expectedStats.numSubObjCompressionRestart += 2;
+if (!alwaysUseCompressedBuckets) {
+ expectedStats.numCompressedBuckets++;
+ expectedStats.numSubObjCompressionRestart += 2;
+}
if (isTimeseriesScalabilityImprovementsEnabled) {
expectedStats.numBucketQueriesFailed++;
}
@@ -327,5 +332,4 @@ testIdleBucketExpiry(i => {
return {[timeFieldName]: ISODate(), [metaFieldName]: i, a: largeValue};
});
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_compression_fail.js b/jstests/noPassthrough/timeseries_compression_fail.js
index 4b7ffa85a21a4..aaee83849ff60 100644
--- a/jstests/noPassthrough/timeseries_compression_fail.js
+++ b/jstests/noPassthrough/timeseries_compression_fail.js
@@ -2,10 +2,7 @@
* Tests that the server can detect when timeseries bucket compression is not decompressible without
* data loss. Bucket should remain uncompressed and we log that this happened.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
let conn = MongoRunner.runMongod();
@@ -13,6 +10,13 @@ const dbName = jsTestName();
const db = conn.getDB(dbName);
const coll = db.getCollection('t');
+// TODO SERVER-77454: Investigate re-enabling this.
+if (FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) {
+ jsTestLog("Skipping test as the always use compressed buckets feature is enabled");
+ MongoRunner.stopMongod(conn);
+ quit();
+}
+
// Assumes each bucket has a limit of 1000 measurements.
const bucketMaxCount = 1000;
const numDocs = bucketMaxCount + 100;
@@ -47,4 +51,3 @@ assert.eq(1, bucketDocs[0].control.version);
assert.eq(1, bucketDocs[1].control.version);
MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/noPassthrough/timeseries_create.js b/jstests/noPassthrough/timeseries_create.js
index b13d166d35eaa..24c238a0a0f54 100644
--- a/jstests/noPassthrough/timeseries_create.js
+++ b/jstests/noPassthrough/timeseries_create.js
@@ -5,10 +5,7 @@
* @tags: [
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
const conn = MongoRunner.runMongod();
@@ -231,5 +228,4 @@ testTimeseriesNamespaceExists((testDB, collName) => {
assert.commandWorked(testDB.runCommand({drop: coll.getName(), writeConcern: {w: "majority"}}));
}
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_delete_one_transaction.js b/jstests/noPassthrough/timeseries_delete_one_transaction.js
new file mode 100644
index 0000000000000..3bb68d8a94f3d
--- /dev/null
+++ b/jstests/noPassthrough/timeseries_delete_one_transaction.js
@@ -0,0 +1,263 @@
+/**
+ * Tests the deleteOne command on time-series collections in multi-document transactions.
+ */
+load("jstests/libs/fail_point_util.js");
+load('jstests/libs/parallel_shell_helpers.js');
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const metaFieldName = "mm";
+const timeFieldName = "tt";
+const collectionNamePrefix = "test_coll_";
+let collectionCounter = 0;
+
+const testDB = rst.getPrimary().getDB(jsTestName());
+let testColl = testDB[collectionNamePrefix + collectionCounter];
+assert.commandWorked(testDB.dropDatabase());
+
+const docsPerMetaField = 3;
+const initializeData = function() {
+ testColl = testDB[collectionNamePrefix + ++collectionCounter];
+ assert.commandWorked(testDB.createCollection(
+ testColl.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
+
+ let docs = [];
+ for (let i = 0; i < docsPerMetaField; ++i) {
+ docs.push({_id: i, [timeFieldName]: new Date(), [metaFieldName]: 0});
+ docs.push({_id: i, [timeFieldName]: new Date(), [metaFieldName]: 1});
+ docs.push({_id: i, [timeFieldName]: new Date(), [metaFieldName]: 2});
+ }
+
+ // Insert test documents.
+ assert.commandWorked(testColl.insertMany(docs));
+ printjson("Printing docs: " + tojson(testColl.find({}).toArray()));
+};
+
+// 1. Delete one document from the collection in a transaction.
+(function basicDeleteOne() {
+ jsTestLog("Running 'basicDeleteOne'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+
+ session.startTransaction();
+ assert.commandWorked(sessionColl.deleteOne({_id: 0, [metaFieldName]: 0}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+
+ // Expect one deleted document with meta: 0.
+ assert.eq(testColl.find({[metaFieldName]: 0}).toArray().length, docsPerMetaField - 1);
+})();
+
+// 2. deleteOne should not have visible changes when the transaction is aborted.
+(function deleteOneTransactionAborts() {
+ jsTestLog("Running 'deleteOneTransactionAborts'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+
+ session.startTransaction();
+ assert.commandWorked(sessionColl.deleteOne({_id: 0, [metaFieldName]: 1}));
+ assert.commandWorked(session.abortTransaction_forTesting());
+ session.endSession();
+
+ // The transaction was aborted so no documents should have been deleted.
+ assert.eq(testColl.find({[metaFieldName]: 1}).toArray().length, docsPerMetaField);
+})();
+
+// 3. Run a few deleteOnes in a single transaction.
+(function multipleDeleteOne() {
+ jsTestLog("Running 'multipleDeleteOne'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+ session.startTransaction();
+
+ for (let i = 0; i < docsPerMetaField; ++i) {
+ assert.commandWorked(sessionColl.deleteOne({_id: i, [metaFieldName]: 0}));
+ }
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+
+ // Expect all documents with {meta: 0} to be deleted.
+ assert.eq(testColl.find({[metaFieldName]: 0}).toArray().length, 0);
+})();
+
+// 4. Tests performing deleteOnes in and out of a transaction on abort.
+(function mixedDeleteOneAbortTxn() {
+ jsTestLog("Running 'mixedDeleteOneAbortTxn'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+ session.startTransaction();
+
+ // Delete all documents for meta values 0, 1.
+ for (let i = 0; i < docsPerMetaField; ++i) {
+ assert.commandWorked(sessionColl.deleteOne({_id: i, [metaFieldName]: 0}));
+ assert.commandWorked(sessionColl.deleteOne({_id: i, [metaFieldName]: 1}));
+ }
+
+ // Outside of the session and transaction, perform a deleteOne.
+ const docFilterNoTxn = {_id: 0, [metaFieldName]: 2};
+ assert.commandWorked(testColl.deleteOne(docFilterNoTxn));
+
+ assert.commandWorked(session.abortTransaction_forTesting());
+ session.endSession();
+
+ // The aborted transaction should not have deleted any documents.
+ assert.eq(testColl.find({[metaFieldName]: 0}).toArray().length, docsPerMetaField);
+ assert.eq(testColl.find({[metaFieldName]: 1}).toArray().length, docsPerMetaField);
+
+ // The delete outside of the transaction should have succeeded.
+ assert.eq(testColl.find(docFilterNoTxn).toArray().length, 0);
+})();
+
+// 5. Tests performing deleteOnes in and out of a transaction on commit.
+(function mixedDeleteOneCommitTxn() {
+ jsTestLog("Running 'mixedDeleteOneCommitTxn'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+ session.startTransaction();
+
+ for (let i = 0; i < docsPerMetaField; ++i) {
+ // Within the transaction.
+ assert.commandWorked(sessionColl.deleteOne({_id: i, [metaFieldName]: 0}));
+ assert.commandWorked(sessionColl.deleteOne({_id: i, [metaFieldName]: 1}));
+
+ // Outside of the session and transaction, perform deleteOne.
+ assert.commandWorked(testColl.deleteOne({_id: i, [metaFieldName]: 2}));
+ }
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+
+ // Expect all documents to have been deleted.
+ assert.eq(testColl.find({}).toArray().length, 0);
+})();
+
+// 6. Tests a race to delete the same document in and out of a transaction.
+(function raceToDeleteOne() {
+ jsTestLog("Running 'raceToDeleteOne'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+ session.startTransaction();
+
+ // Within the transaction, perform deleteOne.
+ const deleteFilter = {_id: 1, [metaFieldName]: 0};
+ assert.commandWorked(sessionColl.deleteOne(deleteFilter));
+
+ // Note: there is a change the parallel shell runs after the transaction is committed and that
+ // is fine as both interleavings should succeed.
+ const awaitTestDelete = startParallelShell(
+ funWithArgs(function(dbName, collName, filter) {
+ const testDB = db.getSiblingDB(dbName);
+ const coll = testDB.getCollection(collName);
+
+ // Outside of the session and transaction, perform deleteOne.
+ assert.commandWorked(coll.deleteOne(filter));
+ }, testDB.getName(), testColl.getName(), deleteFilter), testDB.getMongo().port);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert.eq(testColl.find(deleteFilter).toArray().length, 0);
+ session.endSession();
+
+ // Allow non-transactional deleteOne to finish.
+ awaitTestDelete();
+ assert.eq(testColl.find(deleteFilter).toArray().length, 0);
+})();
+
+// 7. Tests a transactional deleteOne on a document which gets inserted after the transaction
+// starts.
+(function deleteOneAndInsertBeforeCommit() {
+ jsTestLog("Running 'deleteOneAndInsertBeforeCommit'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+ const newDoc = {_id: 101, [timeFieldName]: new Date(), [metaFieldName]: 101};
+
+ session.startTransaction();
+ // Ensure the document does not exist within the snapshot of the newly started transaction.
+ assert.eq(sessionColl.find(newDoc).toArray().length, 0);
+
+ // Outside of the session and transaction, insert document.
+ assert.commandWorked(testColl.insert(newDoc));
+
+ // Double check the document is still not visible from within the transaction.
+ assert.eq(sessionColl.find(newDoc).toArray().length, 0);
+
+ // Within the transaction, perform deleteOne.
+ assert.commandWorked(sessionColl.deleteOne(newDoc));
+ assert.eq(sessionColl.find(newDoc).toArray().length, 0);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+
+ // The newly inserted document should be present even though the transaction commits after the
+ // insert.
+ assert.eq(testColl.find(newDoc).toArray().length, 1);
+})();
+
+// 8. Tests two side-by-side transactional deleteOnes on the same document.
+(function deleteOneInTwoTransactions() {
+ jsTestLog("Running 'deleteOneInTwoTransactions'");
+ initializeData();
+
+ const sessionA = testDB.getMongo().startSession();
+ const sessionB = testDB.getMongo().startSession();
+ const collA = sessionA.getDatabase(jsTestName()).getCollection(testColl.getName());
+ const collB = sessionB.getDatabase(jsTestName()).getCollection(testColl.getName());
+
+ const docToDelete = {_id: 1, [metaFieldName]: 1};
+
+ // Start transactions on different sessions.
+ sessionA.startTransaction({readConcern: {level: "snapshot"}});
+ sessionB.startTransaction({readConcern: {level: "snapshot"}});
+
+ // Ensure the document exists in the snapshot of both transactions.
+ assert.eq(collA.find(docToDelete).toArray().length, 1);
+ assert.eq(collB.find(docToDelete).toArray().length, 1);
+
+ // Perform deleteOne on transaction A.
+ assert.commandWorked(collA.deleteOne(docToDelete));
+
+ const deleteCommand = {
+ delete: collB.getName(),
+ deletes: [{
+ q: docToDelete,
+ limit: 1,
+ }]
+ };
+
+ // We expect the deleteOne on transaction B to fail, causing the transaction to abort.
+ // Sidenote: avoiding the deleteOne method from 'crud_api.js' because it throws.
+ assert.commandFailedWithCode(collB.runCommand(deleteCommand), ErrorCodes.WriteConflict);
+ assert.commandFailedWithCode(sessionB.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ sessionB.endSession();
+
+ // Ensure the document does not exist in the snapshot of transaction A.
+ assert.eq(collA.find(docToDelete).toArray().length, 0);
+ // Since transaction A has not committed yet, the document should still be present outside of
+ // the transaction.
+ assert.eq(testColl.find(docToDelete).toArray().length, 1);
+
+ // Ensure the document has been successfully deleted after transaction A commits.
+ assert.commandWorked(sessionA.commitTransaction_forTesting());
+ assert.eq(testColl.find(docToDelete).toArray().length, 0);
+
+ sessionA.endSession();
+})();
+
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_direct_remove_reopen.js b/jstests/noPassthrough/timeseries_direct_remove_reopen.js
index dbbd7898181b0..ea4a60612f6d9 100644
--- a/jstests/noPassthrough/timeseries_direct_remove_reopen.js
+++ b/jstests/noPassthrough/timeseries_direct_remove_reopen.js
@@ -1,11 +1,8 @@
/**
* Tests that direct removal in a timeseries bucket collection synchronizes with bucket reopening.
*/
-(function() {
-'use strict';
-
load("jstests/libs/fail_point_util.js");
-load("jstests/libs/feature_flag_util.js"); // For isEnabled.
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load("jstests/libs/parallel_shell_helpers.js");
const conn = MongoRunner.runMongod();
@@ -18,7 +15,7 @@ if (!FeatureFlagUtil.isEnabled(testDB, "TimeseriesScalabilityImprovements")) {
jsTestLog(
"Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled.");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
const collName = 'test';
@@ -88,5 +85,4 @@ buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
assert.neq(buckets[0]._id, oldId);
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_direct_update.js b/jstests/noPassthrough/timeseries_direct_update.js
index 93f6282b8f124..3f110f9fcbe06 100644
--- a/jstests/noPassthrough/timeseries_direct_update.js
+++ b/jstests/noPassthrough/timeseries_direct_update.js
@@ -2,12 +2,9 @@
* Tests that direct updates to a timeseries bucket collection close the bucket, preventing further
* inserts to land in that bucket or deletes and updates to be applied to it.
*/
-(function() {
-'use strict';
-
load("jstests/libs/fail_point_util.js");
load("jstests/libs/parallel_shell_helpers.js");
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const conn = MongoRunner.runMongod();
@@ -96,15 +93,14 @@ assert(!buckets[2].control.hasOwnProperty("closed"));
// Make sure that closed buckets are skipped by updates and deletes.
if (FeatureFlagUtil.isPresentAndEnabled(testDB, "TimeseriesUpdatesSupport")) {
- // TODO SERVER-73454 Enable this test.
// The first two buckets containing documents 0 and 1 are closed, so we can only update the
// third document in the last bucket.
- // const result = assert.commandWorked(coll.updateMany({}, {$set: {newField: 123}}));
- // assert.eq(result.matchedCount, 1, result);
- // assert.eq(result.modifiedCount, 1, result);
- // assert.docEq(docs.slice(2, 3),
- // coll.find({newField: 123}, {newField: 0}).toArray(),
- // `Expected exactly one document to be updated. ${coll.find().toArray()}`);
+ const result = assert.commandWorked(coll.updateMany({}, {$set: {newField: 123}}));
+ assert.eq(result.matchedCount, 1, result);
+ assert.eq(result.modifiedCount, 1, result);
+ assert.docEq(docs.slice(2, 3),
+ coll.find({newField: 123}, {newField: 0}).toArray(),
+ `Expected exactly one document to be updated. ${coll.find().toArray()}`);
}
if (FeatureFlagUtil.isPresentAndEnabled(testDB, "TimeseriesDeletesSupport")) {
// The first two buckets containing documents 0 and 1 are closed, so we can only delete the
@@ -124,5 +120,4 @@ if (FeatureFlagUtil.isPresentAndEnabled(testDB, "TimeseriesDeletesSupport")) {
`Expected exactly one document to be deleted. ${coll.find().toArray()}`);
}
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js b/jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js
index 2baf8294674e2..d87e1edf101ea 100644
--- a/jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js
+++ b/jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js
@@ -11,10 +11,8 @@
* requires_wiredtiger,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const minWiredTigerCacheSizeGB = 0.256;
const cacheSize = minWiredTigerCacheSizeGB * 1000 * 1000 * 1000; // 256 MB
@@ -36,6 +34,10 @@ replSet.startSet({setParameter: {timeseriesBucketMaxSize: defaultBucketMaxSize}}
replSet.initiate();
const db = replSet.getPrimary().getDB(jsTestName());
+
+const alwaysUseCompressedBuckets =
+ FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets");
+
const coll = db.getCollection('t');
coll.drop();
assert.commandWorked(db.createCollection(
@@ -45,7 +47,7 @@ if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) {
replSet.stopSet();
jsTestLog(
'Skipping test because the TimeseriesScalabilityImprovements feature flag is disabled.');
- return;
+ quit();
}
// Helper to log timeseries stats.
@@ -115,7 +117,11 @@ while (bucketsClosedDueToSize == 0) {
// buckets should be closed due to cache pressure.
assert.eq(bucketsClosedDueToSize, cardinalityForCachePressure, formatStatsLog(timeseriesStats));
assert.eq(bucketsClosedDueToCachePressure, 0, formatStatsLog(timeseriesStats));
-assert.eq(compressedBuckets, cardinalityForCachePressure, formatStatsLog(timeseriesStats));
+if (alwaysUseCompressedBuckets) {
+ assert.eq(compressedBuckets, 0, formatStatsLog(timeseriesStats));
+} else {
+ assert.eq(compressedBuckets, cardinalityForCachePressure, formatStatsLog(timeseriesStats));
+}
// If we pass the cardinality point to simulate cache pressure, we will begin to see buckets closed
// due to 'CachePressure' and not 'DueToSize'.
@@ -145,9 +151,12 @@ assert.eq(bucketsClosedDueToSize, cardinalityForCachePressure, formatStatsLog(ti
assert.eq(
bucketsClosedDueToCachePressure, cardinalityForCachePressure, formatStatsLog(timeseriesStats));
-// We expect the number of compressed buckets to double (independent to whether the buckets were
-// closed due to size or cache pressure).
-assert.eq(compressedBuckets, 2 * cardinalityForCachePressure, formatStatsLog(timeseriesStats));
+if (alwaysUseCompressedBuckets) {
+ assert.eq(compressedBuckets, 0, formatStatsLog(timeseriesStats));
+} else {
+ // We expect the number of compressed buckets to double (independent to whether the buckets were
+ // closed due to size or cache pressure).
+ assert.eq(compressedBuckets, 2 * cardinalityForCachePressure, formatStatsLog(timeseriesStats));
+}
replSet.stopSet();
-})();
diff --git a/jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js b/jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js
index f8e4d77fd2909..5c5dee6b6bb90 100644
--- a/jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js
+++ b/jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js
@@ -11,10 +11,8 @@
* requires_wiredtiger,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const defaultBucketMaxSize = 128000; // 125 KB
const minWiredTigerCacheSizeGB = 0.256; // 256 MB
@@ -32,6 +30,10 @@ replSet.startSet({setParameter: {timeseriesBucketMaxSize: defaultBucketMaxSize}}
replSet.initiate();
const db = replSet.getPrimary().getDB(jsTestName());
+
+const alwaysUseCompressedBuckets =
+ FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets");
+
let coll = db.getCollection('t');
coll.drop();
@@ -39,7 +41,7 @@ if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) {
replSet.stopSet();
jsTestLog(
'Skipping test because the TimeseriesScalabilityImprovements feature flag is disabled.');
- return;
+ quit();
}
// Helper to log timeseries stats.
@@ -110,7 +112,9 @@ const initializeBuckets = function(numOfBuckets = 1) {
expectedBucketCount++;
numBucketsClosedDueToSize++;
- numCompressedBuckets++;
+ if (!alwaysUseCompressedBuckets) {
+ numCompressedBuckets++;
+ }
timeseriesStats = assert.commandWorked(coll.stats()).timeseries;
assert.eq(timeseriesStats.bucketCount, expectedBucketCount, formatStatsLog(timeseriesStats));
@@ -140,7 +144,9 @@ const initializeBuckets = function(numOfBuckets = 1) {
// We create one bucket for 'meta2', fill it up and create another one for future insertions.
expectedBucketCount += 2;
numBucketsClosedDueToSize++;
- numCompressedBuckets++;
+ if (!alwaysUseCompressedBuckets) {
+ numCompressedBuckets++;
+ }
timeseriesStats = assert.commandWorked(coll.stats()).timeseries;
assert.eq(timeseriesStats.bucketCount, expectedBucketCount, formatStatsLog(timeseriesStats));
@@ -201,8 +207,9 @@ const initializeBuckets = function(numOfBuckets = 1) {
assert.eq(timeseriesStats.numBucketsClosedDueToSize, 0, formatStatsLog(timeseriesStats));
assert.eq(
timeseriesStats.numBucketsClosedDueToCachePressure, 1, formatStatsLog(timeseriesStats));
- assert.eq(timeseriesStats.numCompressedBuckets, 1, formatStatsLog(timeseriesStats));
+ assert.eq(timeseriesStats.numCompressedBuckets,
+ alwaysUseCompressedBuckets ? 0 : 1,
+ formatStatsLog(timeseriesStats));
})();
replSet.stopSet();
-})();
diff --git a/jstests/noPassthrough/timeseries_expire.js b/jstests/noPassthrough/timeseries_expire.js
index 8f4ce8564e6e3..d9654460db4fe 100644
--- a/jstests/noPassthrough/timeseries_expire.js
+++ b/jstests/noPassthrough/timeseries_expire.js
@@ -7,11 +7,8 @@
* requires_getmore,
* ]
*/
-(function() {
-"use strict";
-
load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers'
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'});
const testDB = conn.getDB(jsTestName());
@@ -52,5 +49,4 @@ TimeseriesTest.run((insert) => {
assert.eq(0, bucketDocs.length, bucketDocs);
});
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_expires_with_partial_index.js b/jstests/noPassthrough/timeseries_expires_with_partial_index.js
index 9a0c6534ed365..54fb1a1b7458c 100644
--- a/jstests/noPassthrough/timeseries_expires_with_partial_index.js
+++ b/jstests/noPassthrough/timeseries_expires_with_partial_index.js
@@ -10,13 +10,10 @@
* requires_fcv_63,
* ]
*/
-(function() {
-"use strict";
-
load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers'
load("jstests/libs/clustered_collections/clustered_collection_util.js");
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/ttl_util.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {TTLUtil} from "jstests/libs/ttl_util.js";
const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'});
const testDB = conn.getDB(jsTestName());
@@ -117,5 +114,4 @@ TimeseriesTest.run((insert) => {
checkInsertion(coll, collectionTTLExpiredDocLowMeta, true);
});
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_extended_range_startup.js b/jstests/noPassthrough/timeseries_extended_range_startup.js
index a77a02be71c16..9d2885791ccc9 100644
--- a/jstests/noPassthrough/timeseries_extended_range_startup.js
+++ b/jstests/noPassthrough/timeseries_extended_range_startup.js
@@ -57,4 +57,4 @@ assert.eq(1, primaryDB.extended.count());
assert.eq(1, getExtendedRangeCount(primary));
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/timeseries_idle_buckets.js b/jstests/noPassthrough/timeseries_idle_buckets.js
index 099d22d591ee4..426efb9e91eb5 100644
--- a/jstests/noPassthrough/timeseries_idle_buckets.js
+++ b/jstests/noPassthrough/timeseries_idle_buckets.js
@@ -5,10 +5,8 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const rst = new ReplSetTest({nodes: 1});
rst.startSet({setParameter: {timeseriesIdleBucketExpiryMemoryUsageThreshold: 10485760}});
@@ -16,6 +14,8 @@ rst.initiate();
const db = rst.getPrimary().getDB(jsTestName());
+const alwaysUseCompressedBuckets =
+ FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets");
const isBucketReopeningEnabled = TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db);
assert.commandWorked(db.dropDatabase());
@@ -70,7 +70,9 @@ for (let i = 0; i < numDocs; i++) {
// Check buckets.
if (isBucketReopeningEnabled) {
- let bucketDocs = bucketsColl.find({"control.version": 2}).limit(1).toArray();
+ let bucketDocs = bucketsColl.find({"control.version": alwaysUseCompressedBuckets ? 1 : 2})
+ .limit(1)
+ .toArray();
if (bucketDocs.length > 0) {
foundExpiredBucket = true;
}
@@ -80,7 +82,7 @@ for (let i = 0; i < numDocs; i++) {
.toArray();
if (bucketDocs.length > 1) {
// If bucket compression is enabled the expired bucket should have been compressed
- assert.eq(2,
+ assert.eq(alwaysUseCompressedBuckets ? 1 : 2,
bucketDocs[0].control.version,
'unexpected control.version in first bucket: ' + tojson(bucketDocs));
assert.eq(1,
@@ -105,4 +107,3 @@ for (let i = 0; i < numDocs; i++) {
assert(foundExpiredBucket, "Did not find an expired bucket");
rst.stopSet();
-})();
diff --git a/jstests/noPassthrough/timeseries_insert_after_cycle_primary.js b/jstests/noPassthrough/timeseries_insert_after_cycle_primary.js
index 44c922a6b794f..9a1a67d8da104 100644
--- a/jstests/noPassthrough/timeseries_insert_after_cycle_primary.js
+++ b/jstests/noPassthrough/timeseries_insert_after_cycle_primary.js
@@ -6,10 +6,7 @@
* requires_replication,
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
const replTest = new ReplSetTest({nodes: 2});
replTest.startSet();
@@ -90,5 +87,4 @@ if (TimeseriesTest.timeseriesScalabilityImprovementsEnabled(testDB())) {
checkColl(2, 2);
}
-replTest.stopSet();
-})();
+replTest.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_insert_ordered_false.js b/jstests/noPassthrough/timeseries_insert_ordered_false.js
index 5434415c2a0e9..4fc57ece56d0b 100644
--- a/jstests/noPassthrough/timeseries_insert_ordered_false.js
+++ b/jstests/noPassthrough/timeseries_insert_ordered_false.js
@@ -5,10 +5,7 @@
* requires_sharding,
* ]
*/
-(function() {
-'use strict';
-
-load('jstests/core/timeseries/libs/timeseries.js');
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
load('jstests/libs/fail_point_util.js');
const conn = MongoRunner.runMongod();
@@ -113,13 +110,6 @@ assert.commandWorked(mongos.adminCommand({enableSharding: jsTestName()}));
// Run test on sharded cluster before sharding the collection.
runTest(mongos, st.getPrimaryShard(jsTestName()), false);
-if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) {
- jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled");
- st.stop();
- return;
-}
-
// Run test on sharded cluster after sharding the collection.
runTest(mongos, st.getPrimaryShard(jsTestName()), true);
st.stop();
-})();
diff --git a/jstests/noPassthrough/timeseries_insert_ordered_true.js b/jstests/noPassthrough/timeseries_insert_ordered_true.js
index cbb2f04a78c62..3e57dd42feecd 100644
--- a/jstests/noPassthrough/timeseries_insert_ordered_true.js
+++ b/jstests/noPassthrough/timeseries_insert_ordered_true.js
@@ -1,10 +1,7 @@
/**
* Tests that time-series inserts respect {ordered: true}.
*/
-(function() {
-'use strict';
-
-load('jstests/core/timeseries/libs/timeseries.js');
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
load('jstests/libs/fail_point_util.js');
const conn = MongoRunner.runMongod();
@@ -67,4 +64,3 @@ assert.eq(bucketsColl.count(),
' buckets but found: ' + tojson(bucketsColl.find().toArray()));
MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/noPassthrough/timeseries_insert_rollback.js b/jstests/noPassthrough/timeseries_insert_rollback.js
index 460bc95cfe857..8b90c8ecdf12c 100644
--- a/jstests/noPassthrough/timeseries_insert_rollback.js
+++ b/jstests/noPassthrough/timeseries_insert_rollback.js
@@ -53,7 +53,7 @@ rollbackTest.transitionToSteadyStateOperations();
assert.commandWorked(coll.insert(docs[2], {ordered: true}));
assert.commandWorked(coll.insert(docs[3], {ordered: false}));
-assert.docEq(docs.slice(2), coll.find().toArray());
+assert.sameMembers(docs.slice(2), coll.find().toArray());
const buckets = bucketsColl.find().toArray();
assert.eq(buckets.length, 2, 'Expected two bucket but found: ' + tojson(buckets));
diff --git a/jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js b/jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js
index b7f002039fca4..75d3760e55331 100644
--- a/jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js
+++ b/jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js
@@ -7,19 +7,11 @@
* does_not_support_stepdowns,
* ]
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js');
-load("jstests/core/timeseries/libs/timeseries.js");
+import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
const kSmallMemoryLimit = 1024;
-const conn = MongoRunner.runMongod({
- setParameter: {
- internalQueryMaxBlockingSortMemoryUsageBytes: kSmallMemoryLimit,
- featureFlagBucketUnpackWithSort: true
- }
-});
+const conn = MongoRunner.runMongod(
+ {setParameter: {internalQueryMaxBlockingSortMemoryUsageBytes: kSmallMemoryLimit}});
const dbName = jsTestName();
const testDB = conn.getDB(dbName);
@@ -172,5 +164,4 @@ function assertSorted(result) {
assert.eq(naive, opt);
}
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_large_measurements_max_size.js b/jstests/noPassthrough/timeseries_large_measurements_max_size.js
index 7c2b68f9cce2e..a23cab48234fa 100644
--- a/jstests/noPassthrough/timeseries_large_measurements_max_size.js
+++ b/jstests/noPassthrough/timeseries_large_measurements_max_size.js
@@ -7,10 +7,7 @@
* requires_fcv_61,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
const conn = MongoRunner.runMongod();
@@ -86,5 +83,4 @@ for (let i = 0; i < numMeasurements; i++) {
assert.commandWorked(coll.insertMany(batch, {ordered: false}));
checkBucketSize();
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_latency_stats.js b/jstests/noPassthrough/timeseries_latency_stats.js
index 923dd642b7207..bd294da9d39f9 100644
--- a/jstests/noPassthrough/timeseries_latency_stats.js
+++ b/jstests/noPassthrough/timeseries_latency_stats.js
@@ -25,20 +25,20 @@ assert.contains(bucketsColl.getName(), testDB.getCollectionNames());
const getLatencyStats = () => {
const stats = coll.aggregate([{$collStats: {latencyStats: {}}}]).next();
- assert(stats.hasOwnProperty("latencyStats"));
- assert(stats.latencyStats.hasOwnProperty("writes"));
+ assert(stats.hasOwnProperty("latencyStats"), tojson(stats));
+ assert(stats.latencyStats.hasOwnProperty("writes"), tojson(stats));
return stats.latencyStats.writes;
};
const stats1 = getLatencyStats();
-assert.eq(stats1.ops, 0);
-assert.eq(stats1.latency, 0);
+assert.eq(stats1.ops, 0, tojson(stats1));
+assert.eq(stats1.latency, 0, tojson(stats1));
assert.commandWorked(coll.insert({[timeFieldName]: new Date(), x: 1}));
const stats2 = getLatencyStats();
-assert.eq(stats2.ops, 1);
-assert.gt(stats2.latency, stats1.latency);
+assert.eq(stats2.ops, 1, tojson(stats2));
+assert.gt(stats2.latency, stats1.latency, tojson(stats2));
const reps = 10;
for (let i = 0; i < reps; ++i) {
@@ -46,8 +46,8 @@ for (let i = 0; i < reps; ++i) {
}
const stats3 = getLatencyStats();
-assert.eq(stats3.ops, 1 + reps);
-assert.gt(stats3.latency, stats2.latency);
+assert.eq(stats3.ops, 1 + reps, tojson(stats3));
+assert.gt(stats3.latency, stats2.latency, tojson(stats3));
MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/timeseries_multi_update_spill_to_disk.js b/jstests/noPassthrough/timeseries_multi_update_spill_to_disk.js
new file mode 100644
index 0000000000000..08424e70c346b
--- /dev/null
+++ b/jstests/noPassthrough/timeseries_multi_update_spill_to_disk.js
@@ -0,0 +1,129 @@
+/**
+ * Tests running time-series multi-update commands that spill to disk.
+ *
+ * @tags: [
+ * featureFlagTimeseriesUpdatesSupport
+ * ]
+ */
+
+import {getExecutionStages} from "jstests/libs/analyze_plan.js";
+
+const dateTime = ISODate("2021-07-12T16:00:00Z");
+const buckets = ["A", "B", "C", "D", "E", "F", "G"];
+const numDocsPerBucket = 4;
+
+const conn = MongoRunner.runMongod({setParameter: 'allowDiskUseByDefault=true'});
+const db = conn.getDB(jsTestName());
+const coll = db.getCollection(jsTestName());
+
+function setUpCollectionForTest() {
+ coll.drop();
+ assert.commandWorked(
+ db.createCollection(coll.getName(), {timeseries: {timeField: "time", metaField: "meta"}}));
+
+ let docs = [];
+ for (const bucket of buckets) {
+ for (let i = 0; i < numDocsPerBucket; ++i) {
+ docs.push({"time": dateTime, "meta": bucket, str: i % 2 == 0 ? "even" : "odd"});
+ }
+ }
+ assert.commandWorked(coll.insert(docs));
+}
+
+function verifySpillingStats(
+ explain, expectedSpills, expectedMemoryLimitBytes, expectedDiskLimitBytes) {
+ const execStages = getExecutionStages(explain);
+ assert.gt(execStages.length, 0, `No execution stages found: ${tojson(explain)}`);
+ assert.eq("TS_MODIFY",
+ execStages[0].stage,
+ `TS_MODIFY stage not found in executionStages: ${tojson(explain)}`);
+ assert.eq("SPOOL",
+ execStages[0].inputStage.stage,
+ `SPOOL stage not found in executionStages: ${tojson(explain)}`);
+ const spoolStage = execStages[0].inputStage;
+ assert.eq(spoolStage.memLimit, expectedMemoryLimitBytes, tojson(explain));
+ assert.eq(spoolStage.diskLimit, expectedDiskLimitBytes, tojson(explain));
+ assert.eq(spoolStage.spills, expectedSpills, tojson(explain));
+ if (expectedSpills > 0) {
+ assert(spoolStage.usedDisk, tojson(explain));
+ assert.gt(spoolStage.spilledDataStorageSize, 0, tojson(explain));
+ assert.gte(
+ spoolStage.totalDataSizeSpooled, spoolStage.spilledDataStorageSize, tojson(explain));
+ } else {
+ assert(!spoolStage.usedDisk, tojson(explain));
+ assert.eq(spoolStage.spilledDataStorageSize, 0, tojson(explain));
+ assert.gt(spoolStage.totalDataSizeSpooled, 0, tojson(explain));
+ }
+}
+
+function runTest({memoryLimitBytes, expectedSpills}) {
+ assert.commandWorked(db.adminCommand(
+ {setParameter: 1, internalQueryMaxSpoolMemoryUsageBytes: memoryLimitBytes}));
+
+ const diskLimitBytes = 10 * memoryLimitBytes;
+ assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryMaxSpoolDiskUsageBytes: diskLimitBytes}));
+ assert.commandWorked(db.adminCommand({setParameter: 1, allowDiskUseByDefault: true}));
+
+ setUpCollectionForTest();
+
+ const updateCommand = {
+ update: coll.getName(),
+ updates: [{q: {str: "even"}, u: {$set: {str: "not even"}}, multi: true}]
+ };
+
+ // First run an explain and verify the spilling stats.
+ const explain =
+ assert.commandWorked(db.runCommand({explain: updateCommand, verbosity: "executionStats"}));
+ verifySpillingStats(explain, expectedSpills, memoryLimitBytes, diskLimitBytes);
+
+ // Now run the actual command and verify the results.
+ const res = assert.commandWorked(db.runCommand(updateCommand));
+ // We'll update exactly half the records.
+ const expectedNUpdated = buckets.length * numDocsPerBucket / 2;
+ assert.eq(
+ expectedNUpdated, res.n, "Update did not report the correct number of records update");
+ assert.eq(coll.find({str: "even"}).toArray().length,
+ 0,
+ "Collection has an unexpected number of records matching filter post-update");
+}
+
+(function noSpilling() {
+ runTest({memoryLimitBytes: 100 * 1024 * 1024, expectedSpills: 0});
+})();
+
+(function spillEveryRecord() {
+ // Spool stage just spills 32-byte record ids in this instance. Set a limit just under that size
+ // so that we will need to spill on every record.
+ runTest({memoryLimitBytes: 30, expectedSpills: buckets.length});
+})();
+
+(function spillEveryOtherRecord() {
+ // Spool stage just spills 32-byte record ids in this instance. Set a limit just over that size
+ // so that we will need to spill on every other record.
+ runTest({memoryLimitBytes: 50, expectedSpills: Math.floor(buckets.length / 2)});
+})();
+
+(function maxDiskUseExceeded() {
+ assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryMaxSpoolDiskUsageBytes: 1}));
+ setUpCollectionForTest();
+ assert.commandFailedWithCode(db.runCommand({
+ update: coll.getName(),
+ updates: [{q: {str: "even"}, u: {$set: {str: "not even"}}, multi: true}]
+ }),
+ 7443700);
+})();
+
+(function maxMemoryUseExceeded_spillingDisabled() {
+ assert.commandWorked(db.adminCommand({setParameter: 1, allowDiskUseByDefault: false}));
+
+ setUpCollectionForTest();
+ assert.commandFailedWithCode(db.runCommand({
+ update: coll.getName(),
+ updates: [{q: {str: "even"}, u: {$set: {str: "not even"}}, multi: true}]
+ }),
+ ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
+})();
+
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_out_concurrent_sharding.js b/jstests/noPassthrough/timeseries_out_concurrent_sharding.js
new file mode 100644
index 0000000000000..cdab4605b6789
--- /dev/null
+++ b/jstests/noPassthrough/timeseries_out_concurrent_sharding.js
@@ -0,0 +1,178 @@
+/*
+ * Ensures that when $out is doing a rename collection operation and a concurrent 'shardCollection'
+ * command is invoked, the operations are serialized. This is a targeted test to reproduce the
+ * scenario described in SERVER-76626. We block the rename operation behind a DDL lock and validate
+ * that a concurrent 'shardCollection' command cannot make progress.
+ *
+ * @tags: [
+ * # We need a timeseries collection.
+ * requires_timeseries,
+ * requires_fcv_71,
+ * featureFlagAggOutTimeseries
+ * ]
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/parallel_shell_helpers.js");
+load("jstests/libs/fail_point_util.js"); // for configureFailPoint.
+
+const dbName = "test";
+const timeFieldName = 'time';
+const metaFieldName = 'tag';
+const numDocs = 40;
+
+/* Create new sharded collection on testDB */
+let _collCounter = 0;
+function setUpCollection(testDB) {
+ const collName = 'coll_' + _collCounter++;
+
+ // Create a time-series collection to be the source for $out.
+ testDB.createCollection(collName,
+ {timeseries: {timeField: timeFieldName, metaField: metaFieldName}});
+ const docs = [];
+ for (let i = 0; i < numDocs; ++i) {
+ docs.push({
+ [timeFieldName]: ISODate(),
+ [metaFieldName]: (1 * numDocs) + i,
+ });
+ }
+ assert.commandWorked(testDB[collName].insertMany(docs));
+ return testDB[collName];
+}
+
+function runOut(dbName, sourceCollName, targetCollName, expectCommandWorked) {
+ const testDB = db.getSiblingDB(dbName);
+ const cmdRes = testDB.runCommand({
+ aggregate: sourceCollName,
+ pipeline: [{
+ $out: {
+ db: testDB.getName(),
+ coll: targetCollName,
+ timeseries: {timeField: "time", metaField: "tag"}
+ }
+ }],
+ cursor: {}
+ });
+ if (expectCommandWorked) {
+ assert.commandWorked(cmdRes);
+ } else {
+ assert.commandFailed(cmdRes);
+ }
+}
+
+function checkMetadata(testDB) {
+ const checkOptions = {'checkIndexes': 1};
+ let inconsistencies = testDB.checkMetadataConsistency(checkOptions).toArray();
+ assert.eq(0, inconsistencies, inconsistencies);
+}
+
+function runOutAndShardCollectionConcurrently_shardCollectionMustFail(st, testDB, primaryShard) {
+ // The target collection should exist to produce the metadata inconsistency scenario.
+ const sourceColl = setUpCollection(testDB);
+ const targetColl = setUpCollection(testDB);
+
+ // Set a failpoint in the internalRenameCollection command after the sharding check.
+ const fp = configureFailPoint(primaryShard, 'blockBeforeInternalRenameAndAfterTakingDDLLocks');
+
+ // Run an $out aggregation pipeline in a parallel shell.
+ let outShell = startParallelShell(funWithArgs(runOut,
+ testDB.getName(),
+ sourceColl.getName(),
+ targetColl.getName(),
+ true /*expectCommandWorked*/),
+ st.s.port);
+
+ // Wait for the aggregation pipeline to hit the failpoint.
+ fp.wait();
+
+ // Validate the temporary collection exists, meaning we are in the middle of the $out stage.
+ const collNames = testDB.getCollectionNames();
+ assert.eq(collNames.filter(col => col.includes('tmp.agg_out')).length, 1, collNames);
+
+ // Assert sharding the target collection fails, since the rename command has a lock on the
+ // view namespace.
+ jsTestLog("attempting to shard the target collection.");
+ assert.commandFailedWithCode(
+ testDB.adminCommand({shardCollection: targetColl.getFullName(), key: {[metaFieldName]: 1}}),
+ ErrorCodes.LockBusy);
+
+ // Turn off the failpoint and resume the $out aggregation pipeline.
+ jsTestLog("turning the failpoint off.");
+ fp.off();
+ outShell();
+ // Assert the metadata is consistent.
+ checkMetadata(testDB);
+
+ // Assert sharding the target collection succeeds, since there is no lock on the view
+ // namespace.
+ assert.commandWorked(testDB.adminCommand(
+ {shardCollection: targetColl.getFullName(), key: {[metaFieldName]: 1}}));
+
+ // Assert the metadata is consistent.
+ checkMetadata(testDB);
+
+ sourceColl.drop();
+ targetColl.drop();
+}
+
+function runOutAndShardCollectionConcurrently_OutMustFail(st, testDB, primaryShard) {
+ // The target collection should exist to produce the metadata inconsistency scenario.
+ const sourceColl = setUpCollection(testDB);
+ const targetColl = setUpCollection(testDB);
+
+ // Set a failpoint in the internalRenameCollection command after the sharding check.
+ const fp = configureFailPoint(primaryShard, 'blockBeforeInternalRenameAndBeforeTakingDDLLocks');
+
+ // Run an $out aggregation pipeline in a parallel shell.
+ let outShell = startParallelShell(funWithArgs(runOut,
+ testDB.getName(),
+ sourceColl.getName(),
+ targetColl.getName(),
+ false /*expectCommandWorked*/),
+ st.s.port);
+
+ // Wait for the aggregation pipeline to hit the failpoint.
+ fp.wait();
+
+ // Validate the temporary collection exists, meaning we are in the middle of the $out stage.
+ const collNames = testDB.getCollectionNames();
+ assert.eq(collNames.filter(col => col.includes('tmp.agg_out')).length, 1, collNames);
+
+ // Assert sharding the target collection fails, since the rename command has a lock on the
+ // view namespace.
+ jsTestLog("attempting to shard the target collection.");
+ assert.commandWorked(testDB.adminCommand(
+ {shardCollection: targetColl.getFullName(), key: {[metaFieldName]: 1}}));
+
+ // Turn off the failpoint and resume the $out aggregation pipeline.
+ jsTestLog("turning the failpoint off.");
+ fp.off();
+ outShell();
+
+ // Assert the metadata is consistent.
+ checkMetadata(testDB);
+
+ sourceColl.drop();
+ targetColl.drop();
+}
+
+const st = new ShardingTest({shards: 2});
+const testDB = st.s.getDB(dbName);
+const primaryShard = st.shard0;
+
+// Reduce DDL lock timeout to half a second to speedup testing command that are expected to fail
+// with LockBusy error
+const fp = configureFailPoint(primaryShard, "overrideDDLLockTimeout", {'timeoutMillisecs': 500});
+
+assert.commandWorked(
+ st.s.adminCommand({enableSharding: dbName, primaryShard: primaryShard.shardName}));
+
+// Running tests
+runOutAndShardCollectionConcurrently_shardCollectionMustFail(st, testDB, primaryShard);
+runOutAndShardCollectionConcurrently_OutMustFail(st, testDB, primaryShard);
+
+fp.off();
+
+st.stop();
+}());
diff --git a/jstests/noPassthrough/timeseries_retry_delete_and_update.js b/jstests/noPassthrough/timeseries_retry_delete_and_update.js
index 276dcb0449f9a..1127dedbb203c 100644
--- a/jstests/noPassthrough/timeseries_retry_delete_and_update.js
+++ b/jstests/noPassthrough/timeseries_retry_delete_and_update.js
@@ -5,7 +5,7 @@
* @tags: [
* requires_replication,
* requires_timeseries,
- * requires_fcv_70,
+ * featureFlagTimeseriesUpdatesSupport,
* ]
*/
(function() {
@@ -42,14 +42,15 @@ let retriedStatementsCount = 0;
* returns the command object given the collection to run it on, and a validate function that
* validates the result after the command has been applied to the collection.
*/
-const runTest = function(initialDocs, cmdBuilderFn, validateFn) {
+const runTest = function(
+ initialDocs, cmdBuilderFn, validateFn, expectError = false, statementRetried = 1) {
const session = primary.startSession({retryWrites: true});
const testDB = session.getDatabase(jsTestName());
- const coll = testDB.getCollection('timeseres_retry_delete_and_update_' + collCount++);
+ const coll = testDB.getCollection('timeseries_retry_delete_and_update_' + collCount++);
coll.drop();
- assert.commandWorked(
- testDB.createCollection(coll.getName(), {timeseries: {timeField: timeFieldName}}));
+ assert.commandWorked(testDB.createCollection(
+ coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
assert.commandWorked(testDB.runCommand({
insert: coll.getName(),
@@ -59,13 +60,18 @@ const runTest = function(initialDocs, cmdBuilderFn, validateFn) {
}));
// For retryable writes, the server uses 'txnNumber' as the key to look up previously executed
- // operations in the sesssion.
+ // operations in the session.
let cmdObj = cmdBuilderFn(coll);
cmdObj["lsid"] = session.getSessionId();
cmdObj["txnNumber"] = NumberLong(1);
- assert.commandWorked(testDB.runCommand(cmdObj), 'Failed to write bucket on first write');
- assert.commandWorked(testDB.runCommand(cmdObj), 'Failed to write bucket on retry write');
+ if (expectError) {
+ assert.commandFailedWithCode(testDB.runCommand(cmdObj), ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(testDB.runCommand(cmdObj), ErrorCodes.InvalidOptions);
+ } else {
+ assert.commandWorked(testDB.runCommand(cmdObj), 'Failed to write bucket on first write');
+ assert.commandWorked(testDB.runCommand(cmdObj), 'Failed to write bucket on retry write');
+ }
validateFn(coll);
@@ -73,7 +79,8 @@ const runTest = function(initialDocs, cmdBuilderFn, validateFn) {
assert.eq(++retriedCommandsCount,
transactionsServerStatus.retriedCommandsCount,
'Incorrect statistic in db.serverStatus(): ' + tojson(transactionsServerStatus));
- assert.eq(++retriedStatementsCount,
+ retriedStatementsCount += statementRetried;
+ assert.eq(retriedStatementsCount,
transactionsServerStatus.retriedStatementsCount,
'Incorrect statistic in db.serverStatus(): ' + tojson(transactionsServerStatus));
@@ -106,7 +113,19 @@ function deleteValidateFn(coll) {
})();
function updateCmdBuilderFn(coll) {
- return {update: coll.getName(), updates: [{q: {}, u: {$inc: {updated: 1}}, multi: false}]};
+ return {
+ update: coll.getName(),
+ updates: [
+ {q: {}, u: {$inc: {updated: 1}}, multi: false},
+ {q: {}, u: {$inc: {updated: 1}}, multi: true},
+ {q: {}, u: {$inc: {anotherUpdated: 1}}, multi: false},
+ ],
+ };
+}
+function updateCmdUnorderedBuilderFn(coll) {
+ let updateCmd = updateCmdBuilderFn(coll);
+ updateCmd["ordered"] = false;
+ return updateCmd;
}
function updateValidateFn(coll) {
assert.eq(coll.countDocuments({updated: {$exists: true}}),
@@ -114,14 +133,61 @@ function updateValidateFn(coll) {
"Expected exactly one document to be updated.");
assert.eq(coll.countDocuments({updated: 1}), 1, "Expected document to be updated only once.");
}
+function updateUnorderedValidateFn(coll) {
+ updateValidateFn(coll);
+ assert.eq(coll.countDocuments({anotherUpdated: {$exists: true}}),
+ 1,
+ "Expected exactly one document to be updated.");
+ assert.eq(
+ coll.countDocuments({anotherUpdated: 1}), 1, "Expected document to be updated only once.");
+}
-// TODO SERVER-73726 Enable update tests.
-// (function testPartialBucketUpdate() {
-// runTest(allDocumentsSameBucket, updateCmdBuilderFn, updateValidateFn);
-// })();
-// (function testFullBucketUpdate() {
-// runTest(allDocumentsDifferentBuckets, updateCmdBuilderFn, updateValidateFn);
-// })();
+(function testPartialBucketUpdate() {
+ runTest(allDocumentsSameBucket,
+ updateCmdBuilderFn,
+ updateValidateFn,
+ /*expectError=*/ true);
+})();
+(function testFullBucketUpdate() {
+ runTest(allDocumentsDifferentBuckets,
+ updateCmdBuilderFn,
+ updateValidateFn,
+ /*expectError=*/ true);
+})();
+(function testPartialBucketUpdateUnordered() {
+ runTest(allDocumentsSameBucket,
+ updateCmdUnorderedBuilderFn,
+ updateUnorderedValidateFn,
+ /*expectError=*/ true,
+ /*statementRetried=*/ 2);
+})();
+(function testFullBucketUpdateUnordered() {
+ runTest(allDocumentsDifferentBuckets,
+ updateCmdUnorderedBuilderFn,
+ updateUnorderedValidateFn,
+ /*expectError=*/ true,
+ /*statementRetried=*/ 2);
+})();
+
+function upsertCmdBuilderFn(coll) {
+ return {
+ update: coll.getName(),
+ updates: [{
+ q: {[timeFieldName]: dateTime, [metaFieldName]: "B"},
+ u: {$inc: {updated: 1}},
+ multi: false,
+ upsert: true,
+ }],
+ };
+}
+function upsertValidateFn(coll) {
+ assert.eq(coll.countDocuments({[metaFieldName]: "B", updated: 1}),
+ 1,
+ "Expected exactly one document to be upserted once.");
+}
+(function testUpsert() {
+ runTest(allDocumentsSameBucket, upsertCmdBuilderFn, upsertValidateFn);
+})();
rst.stopSet();
})();
diff --git a/jstests/noPassthrough/timeseries_sample.js b/jstests/noPassthrough/timeseries_sample.js
index 70bf25398ba3b..3a5f69824f2ee 100644
--- a/jstests/noPassthrough/timeseries_sample.js
+++ b/jstests/noPassthrough/timeseries_sample.js
@@ -2,10 +2,7 @@
* Tests inserting sample data into the time-series buckets collection. This test is for the
* exercising the optimized $sample implementation for $_internalUnpackBucket.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
+import {aggPlanHasStage, getAggPlanStage, getPlanStage} from "jstests/libs/analyze_plan.js";
let conn = MongoRunner.runMongod({setParameter: {timeseriesBucketMaxCount: 100}});
@@ -197,5 +194,4 @@ assert.gte(sampleFromBucketStage.dupsTested, 150, sampleFromBucketStage);
const multiIteratorStage = getPlanStage(sampleFromBucketStage, "MULTI_ITERATOR");
assert.neq(multiIteratorStage, null, explainRes);
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_serverStatus.js b/jstests/noPassthrough/timeseries_serverStatus.js
index 58b3fe74f7da7..f3ac9b61a73c1 100644
--- a/jstests/noPassthrough/timeseries_serverStatus.js
+++ b/jstests/noPassthrough/timeseries_serverStatus.js
@@ -1,12 +1,9 @@
/**
* Tests that serverStatus contains a bucketCatalog section.
*/
-(function() {
-"use strict";
-
load("jstests/libs/fail_point_util.js");
load("jstests/libs/parallel_shell_helpers.js");
-load("jstests/libs/feature_flag_util.js");
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const conn = MongoRunner.runMongod();
@@ -112,5 +109,4 @@ if (!FeatureFlagUtil.isEnabled(conn, "TimeseriesScalabilityImprovements")) {
checkNoServerStatus();
}
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_server_parameters.js b/jstests/noPassthrough/timeseries_server_parameters.js
index f74e1923ef381..d1be2c8d99c41 100644
--- a/jstests/noPassthrough/timeseries_server_parameters.js
+++ b/jstests/noPassthrough/timeseries_server_parameters.js
@@ -6,10 +6,6 @@
* ]
*/
-(function() {
-'use strict';
-
-load("jstests/core/timeseries/libs/timeseries.js");
load("jstests/noPassthrough/libs/server_parameter_helpers.js");
// Valid parameter values are in the range [0, infinity).
@@ -32,5 +28,4 @@ testNumericServerParameter('timeseriesBucketMaxSize',
true /*hasLowerBound*/,
0 /*lowerOutOfBounds*/,
false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
-})();
+ "unused" /*upperOutOfBounds*/);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_server_status_measurements.js b/jstests/noPassthrough/timeseries_server_status_measurements.js
index e5177cdadb4c8..67a765b6f2781 100644
--- a/jstests/noPassthrough/timeseries_server_status_measurements.js
+++ b/jstests/noPassthrough/timeseries_server_status_measurements.js
@@ -7,10 +7,7 @@
* requires_fcv_61,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
const conn = MongoRunner.runMongod();
@@ -75,5 +72,4 @@ for (let i = 0; i < numMeasurements; i++) {
assert.commandWorked(coll.insertMany(batch, {ordered: false}));
checkBucketSize();
-MongoRunner.stopMongod(conn);
-}());
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_sort.js b/jstests/noPassthrough/timeseries_sort.js
index b8cdd15df0971..3df1f0685abd9 100644
--- a/jstests/noPassthrough/timeseries_sort.js
+++ b/jstests/noPassthrough/timeseries_sort.js
@@ -6,11 +6,8 @@
* requires_sharding,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/analyze_plan.js"); // For getAggPlanStage
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
+import {getAggPlanStages} from "jstests/libs/analyze_plan.js";
Random.setRandomSeed();
@@ -26,12 +23,6 @@ const st = new ShardingTest({shards: 2});
const sDB = st.s.getDB(dbName);
assert.commandWorked(sDB.adminCommand({enableSharding: dbName}));
-if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) {
- jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled");
- st.stop();
- return;
-}
-
st.ensurePrimaryShard(dbName, st.shard0.shardName);
// Shard time-series collection.
@@ -130,4 +121,3 @@ assertAccessPath([forwardSort], {$natural: -1}, "COLLSCAN", 1);
assertAccessPath([backwardSort], {$natural: 1}, "COLLSCAN", -1);
st.stop();
-})();
diff --git a/jstests/noPassthrough/timeseries_ttl.js b/jstests/noPassthrough/timeseries_ttl.js
index af3101ec6c0a8..5232f5f735b04 100644
--- a/jstests/noPassthrough/timeseries_ttl.js
+++ b/jstests/noPassthrough/timeseries_ttl.js
@@ -8,10 +8,8 @@
* requires_getmore,
* ]
*/
-(function() {
-"use strict";
load("jstests/libs/clustered_collections/clustered_collection_util.js");
-load("jstests/libs/ttl_util.js");
+import {TTLUtil} from "jstests/libs/ttl_util.js";
// Run TTL monitor constantly to speed up this test.
const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'});
@@ -155,5 +153,4 @@ testCase((coll, bucketsColl) => {
assert.eq(0, bucketsColl.find().itcount());
})();
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_ttl_index_downgrade.js b/jstests/noPassthrough/timeseries_ttl_index_downgrade.js
deleted file mode 100644
index 3f90074172775..0000000000000
--- a/jstests/noPassthrough/timeseries_ttl_index_downgrade.js
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Tests that the cluster cannot be downgraded when there are secondary TTL indexes with partial
- * filters on time-series present.
- */
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/feature_flag_util.js"); // For isEnabled.
-
-const conn = MongoRunner.runMongod();
-const db = conn.getDB("test");
-
-if (!FeatureFlagUtil.isEnabled(db, "TimeseriesScalabilityImprovements")) {
- jsTestLog(
- "Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled.");
- MongoRunner.stopMongod(conn);
- return;
-}
-
-const collName = "timeseries_ttl_index_downgrade";
-const coll = db.getCollection(collName);
-const bucketsColl = db.getCollection("system.buckets." + collName);
-
-const timeFieldName = "tm";
-const metaFieldName = "mm";
-const timeSpec = {
- [timeFieldName]: 1
-};
-
-assert.commandWorked(db.createCollection(
- coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
-
-function checkIndexForDowngrade(isCompatible) {
- if (!isCompatible) {
- assert.commandFailedWithCode(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}),
- ErrorCodes.CannotDowngrade);
- assert.commandWorked(coll.dropIndexes("*"));
- }
-
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
- assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-}
-
-// Verify that downgrading succeeds on a time-series collection without any indexes.
-checkIndexForDowngrade(true);
-
-// Verify that downgrading succeeds on a time-series collection with a partial index.
-const options = {
- name: "partialIndexOnMeta",
- partialFilterExpression: {[metaFieldName]: {$gt: 5}}
-};
-assert.commandWorked(coll.createIndex(timeSpec, options));
-checkIndexForDowngrade(true);
-
-// Verify that downgrading succeeds on a time-series collection created with expireAfterSeconds
-// value.
-coll.drop();
-assert.commandWorked(db.createCollection(
- coll.getName(),
- {timeseries: {timeField: timeFieldName, metaField: metaFieldName}, expireAfterSeconds: 3600}));
-checkIndexForDowngrade(true);
-
-// Verify that downgrading fails on a time-series collection with a partial, TTL index.
-assert.commandWorked(coll.createIndex(timeSpec, Object.merge(options, {expireAfterSeconds: 400})));
-checkIndexForDowngrade(false);
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/timeseries_update_delete_transaction.js b/jstests/noPassthrough/timeseries_update_delete_transaction.js
index 3bf3a1dd3c492..7b3c50d2d459a 100644
--- a/jstests/noPassthrough/timeseries_update_delete_transaction.js
+++ b/jstests/noPassthrough/timeseries_update_delete_transaction.js
@@ -5,11 +5,6 @@
* requires_replication,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
-
const rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
@@ -34,9 +29,11 @@ assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.
session.startTransaction();
// Time-series update in a multi-document transaction should fail.
-assert.commandFailedWithCode(sessionColl.update({[metaFieldName]: "a"}, {"$set": {"b": "a"}}),
+assert.commandFailedWithCode(session.getDatabase(jsTestName()).runCommand({
+ update: collectionName,
+ updates: [{q: {[metaFieldName]: "a"}, u: {"$set": {"b": "a"}}, multi: true}],
+}),
ErrorCodes.OperationNotSupportedInTransaction);
assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
session.endSession();
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_update_one_transaction.js b/jstests/noPassthrough/timeseries_update_one_transaction.js
new file mode 100644
index 0000000000000..5c10cf9be7953
--- /dev/null
+++ b/jstests/noPassthrough/timeseries_update_one_transaction.js
@@ -0,0 +1,273 @@
+/**
+ * Tests the updateOne command on time-series collections in multi-document transactions.
+ *
+ * @tags: [
+ * requires_replication,
+ * requires_timeseries,
+ * featureFlagTimeseriesUpdatesSupport,
+ * ]
+ */
+load("jstests/libs/fail_point_util.js");
+load('jstests/libs/parallel_shell_helpers.js');
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const metaFieldName = "mm";
+const timeFieldName = "tt";
+const collectionNamePrefix = "test_coll_";
+let collectionCounter = 0;
+
+const testDB = rst.getPrimary().getDB(jsTestName());
+let testColl = testDB[collectionNamePrefix + collectionCounter];
+assert.commandWorked(testDB.dropDatabase());
+
+const docsPerMetaField = 3;
+const initializeData = function() {
+ testColl = testDB[collectionNamePrefix + ++collectionCounter];
+ assert.commandWorked(testDB.createCollection(
+ testColl.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
+
+ let docs = [];
+ for (let i = 0; i < docsPerMetaField; ++i) {
+ docs.push({_id: i, [timeFieldName]: new Date(), [metaFieldName]: 0});
+ docs.push({_id: i, [timeFieldName]: new Date(), [metaFieldName]: 1});
+ docs.push({_id: i, [timeFieldName]: new Date(), [metaFieldName]: 2});
+ }
+
+ // Insert test documents.
+ assert.commandWorked(testColl.insertMany(docs));
+};
+
+// 1. Update one document from the collection in a transaction.
+(function basicUpdateOne() {
+ jsTestLog("Running 'basicUpdateOne'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+
+ session.startTransaction();
+ assert.commandWorked(sessionColl.updateOne({_id: 0, [metaFieldName]: 0}, {$inc: {updated: 1}}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+
+ // Expect one updated document with updated: 1.
+ assert.eq(testColl.find({updated: 1}).toArray().length, 1);
+})();
+
+// 2. updateOne should not have visible changes when the transaction is aborted.
+(function updateOneTransactionAborts() {
+ jsTestLog("Running 'updateOneTransactionAborts'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+
+ session.startTransaction();
+ assert.commandWorked(sessionColl.updateOne({_id: 0, [metaFieldName]: 1}, {$inc: {updated: 1}}));
+ assert.commandWorked(session.abortTransaction_forTesting());
+ session.endSession();
+
+ // The transaction was aborted so no documents should have been updated.
+ assert.eq(testColl.find({updated: 1}).toArray().length, 0);
+})();
+
+// 3. Run a few updateOnes in a single transaction.
+(function multipleUpdateOne() {
+ jsTestLog("Running 'multipleUpdateOne'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+ session.startTransaction();
+
+ for (let i = 0; i < docsPerMetaField; ++i) {
+ assert.commandWorked(
+ sessionColl.updateOne({_id: i, [metaFieldName]: 0}, {$inc: {updated: 1}}));
+ }
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+
+ // Expect all documents with {meta: 0} to be updated.
+ assert.eq(testColl.find({[metaFieldName]: 0, updated: 1}).toArray().length, docsPerMetaField);
+})();
+
+// 4. Tests performing updateOnes in and out of a transaction on abort.
+(function mixedUpdateOneAbortTxn() {
+ jsTestLog("Running 'mixedUpdateOneAbortTxn'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+ session.startTransaction();
+
+ // Update all documents for meta values 0, 1.
+ for (let i = 0; i < docsPerMetaField; ++i) {
+ assert.commandWorked(
+ sessionColl.updateOne({_id: i, [metaFieldName]: 0}, {$inc: {updated: 1}}));
+ assert.commandWorked(
+ sessionColl.updateOne({_id: i, [metaFieldName]: 1}, {$inc: {updated: 1}}));
+ }
+
+ // Outside of the session and transaction, perform an updateOne.
+ assert.commandWorked(testColl.updateOne({_id: 0, [metaFieldName]: 2}, {$inc: {updated: 1}}));
+
+ assert.commandWorked(session.abortTransaction_forTesting());
+ session.endSession();
+
+ // The aborted transaction should not have updated any documents.
+ assert.eq(testColl.find({[metaFieldName]: 0, updated: 1}).toArray().length, 0);
+ assert.eq(testColl.find({[metaFieldName]: 1, updated: 1}).toArray().length, 0);
+
+ // The update outside of the transaction should have succeeded.
+ assert.eq(testColl.find({[metaFieldName]: 2, updated: 1}).toArray().length, 1);
+})();
+
+// 5. Tests performing updateOnes in and out of a transaction on commit.
+(function mixedUpdateOneCommitTxn() {
+ jsTestLog("Running 'mixedUpdateOneCommitTxn'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+ session.startTransaction();
+
+ for (let i = 0; i < docsPerMetaField; ++i) {
+ // Within the transaction.
+ assert.commandWorked(
+ sessionColl.updateOne({_id: i, [metaFieldName]: 0}, {$inc: {updated: 1}}));
+ assert.commandWorked(
+ sessionColl.updateOne({_id: i, [metaFieldName]: 1}, {$inc: {updated: 1}}));
+
+ // Outside of the session and transaction, perform updateOne.
+ assert.commandWorked(
+ testColl.updateOne({_id: i, [metaFieldName]: 2}, {$inc: {updated: 1}}));
+ }
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+
+ // Expect all documents to have been updated.
+ assert.eq(testColl.find({updated: 1}).toArray().length, 9);
+})();
+
+// 6. Tests a race to update the same document in and out of a transaction.
+(function raceToUpdateOne() {
+ jsTestLog("Running 'raceToUpdateOne'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+ session.startTransaction();
+
+ // Within the transaction, perform an updateOne.
+ const updateFilter = {_id: 1, [metaFieldName]: 0};
+ assert.commandWorked(sessionColl.updateOne(updateFilter, {$set: {_id: 10}}));
+
+ // Note: there is a change the parallel shell runs after the transaction is committed and that
+ // is fine as both interleavings should succeed.
+ const awaitTestUpdate = startParallelShell(
+ funWithArgs(function(dbName, collName, filter) {
+ const testDB = db.getSiblingDB(dbName);
+ const coll = testDB.getCollection(collName);
+
+ // Outside of the session and transaction, perform updateOne.
+ assert.commandWorked(coll.updateOne(filter, {$set: {_id: 10}}));
+ }, testDB.getName(), testColl.getName(), updateFilter), testDB.getMongo().port);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+
+ // Allow non-transactional updateOne to finish.
+ awaitTestUpdate();
+ assert.eq(testColl.find({_id: 10}).toArray().length, 1);
+})();
+
+// 7. Tests a transactional updateOne on a document which gets visible after the transaction
+// starts.
+(function updateOneAndInsertBeforeCommit() {
+ jsTestLog("Running 'updateOneAndInsertBeforeCommit'");
+ initializeData();
+
+ const session = testDB.getMongo().startSession();
+ const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName());
+
+ session.startTransaction();
+ // Ensure the document does not exist within the snapshot of the newly started transaction.
+ assert.eq(sessionColl.find({[metaFieldName]: 101}).toArray().length, 0);
+
+ // Outside of the session and transaction, update document.
+ assert.commandWorked(
+ testColl.updateOne({[metaFieldName]: 0, _id: 0}, {$set: {[metaFieldName]: 101}}));
+
+ // Double check the document is still not visible from within the transaction.
+ assert.eq(sessionColl.find({[metaFieldName]: 101}).toArray().length, 0);
+
+ // Within the transaction, perform updateOne.
+ assert.commandWorked(sessionColl.updateOne({[metaFieldName]: 101}, {$inc: {updated: 1}}));
+ assert.eq(sessionColl.find({updated: 1}).toArray().length, 0);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+
+ // The newly updated document should not be updated even though the transaction commits after
+ // the write performed outside.
+ assert.eq(testColl.find({updated: 1}).toArray().length, 0);
+})();
+
+// 8. Tests two side-by-side transactional updateOnes on the same document.
+(function updateOneInTwoTransactions() {
+ jsTestLog("Running 'updateOneInTwoTransactions'");
+ initializeData();
+
+ const sessionA = testDB.getMongo().startSession();
+ const sessionB = testDB.getMongo().startSession();
+ const collA = sessionA.getDatabase(jsTestName()).getCollection(testColl.getName());
+ const collB = sessionB.getDatabase(jsTestName()).getCollection(testColl.getName());
+
+ const docToUpdate = {_id: 1, [metaFieldName]: 1};
+
+ // Start transactions on different sessions.
+ sessionA.startTransaction({readConcern: {level: "snapshot"}});
+ sessionB.startTransaction({readConcern: {level: "snapshot"}});
+
+ // Ensure the document exists in the snapshot of both transactions.
+ assert.eq(collA.find(docToUpdate).toArray().length, 1);
+ assert.eq(collB.find(docToUpdate).toArray().length, 1);
+
+ // Perform updateOne on transaction A.
+ assert.commandWorked(collA.updateOne(docToUpdate, {$inc: {updated: 1}}));
+
+ const updateCommand = {
+ update: collB.getName(),
+ updates: [{
+ q: docToUpdate,
+ u: {$inc: {updated: 1}},
+ multi: false,
+ }]
+ };
+
+ // We expect the updateOne on transaction B to fail, causing the transaction to abort.
+ // Sidenote: avoiding the updateOne method from 'crud_api.js' because it throws.
+ assert.commandFailedWithCode(collB.runCommand(updateCommand), ErrorCodes.WriteConflict);
+ assert.commandFailedWithCode(sessionB.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ sessionB.endSession();
+
+ // Ensure the document is updated in the snapshot of transaction A.
+ assert.eq(collA.find({updated: 1}).toArray().length, 1);
+ // Since transaction A has not committed yet, the document should still not be updated outside
+ // of the transaction.
+ assert.eq(testColl.find({updated: 1}).toArray().length, 0);
+
+ // Ensure the document has been successfully updated after transaction A commits.
+ assert.commandWorked(sessionA.commitTransaction_forTesting());
+ assert.eq(testColl.find({updated: 1}).toArray().length, 1);
+
+ sessionA.endSession();
+})();
+
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/timeseries_update_oplog.js b/jstests/noPassthrough/timeseries_update_oplog.js
new file mode 100644
index 0000000000000..9b6e8c8c37f67
--- /dev/null
+++ b/jstests/noPassthrough/timeseries_update_oplog.js
@@ -0,0 +1,164 @@
+/**
+ * Tests time-series updates are replicated atomically as applyOps oplog entries that group the
+ * writes together.
+ *
+ * @tags: [
+ * requires_replication,
+ * requires_timeseries,
+ * featureFlagTimeseriesUpdatesSupport,
+ * ]
+ */
+(function() {
+'use strict';
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const timeFieldName = 'time';
+const metaFieldName = 'tag';
+const dateTime = ISODate("2023-06-29T16:00:00Z");
+const testDB = primary.getDB("test");
+let collCount = 0;
+
+const initialMeasurement = [
+ {_id: 0, [timeFieldName]: dateTime, [metaFieldName]: 0},
+ {_id: 1, [timeFieldName]: dateTime, [metaFieldName]: 0, a: 1},
+ {_id: 2, [timeFieldName]: dateTime, [metaFieldName]: 0, a: 1},
+ {_id: 3, [timeFieldName]: dateTime, [metaFieldName]: 1},
+];
+
+const runTest = function({cmdBuilderFn, validateFn, retryableWrite = false}) {
+ const coll = testDB.getCollection('timeseries_update_oplog' + collCount++);
+ coll.drop();
+ assert.commandWorked(testDB.createCollection(
+ coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
+ assert.commandWorked(coll.insertMany(initialMeasurement));
+
+ let cmdObj = cmdBuilderFn(coll);
+ if (retryableWrite) {
+ const session = primary.startSession({retryWrites: true});
+ cmdObj["lsid"] = session.getSessionId();
+ cmdObj["txnNumber"] = NumberLong(0);
+ assert.commandWorked(session.getDatabase("test").runCommand(cmdObj));
+ } else {
+ assert.commandWorked(testDB.runCommand(cmdObj));
+ }
+
+ validateFn(testDB, coll, retryableWrite);
+};
+
+function partialBucketMultiUpdateBuilderFn(coll) {
+ return {update: coll.getName(), updates: [{q: {a: 1}, u: {$inc: {updated: 1}}, multi: true}]};
+}
+function fullBucketMultiUpdateBuilderFn(coll) {
+ return {
+ update: coll.getName(),
+ updates: [{q: {[metaFieldName]: 0}, u: {$inc: {updated: 1}}, multi: true}]
+ };
+}
+function partialBucketSingletonUpdateBuilderFn(coll) {
+ return {
+ update: coll.getName(),
+ updates: [{q: {[metaFieldName]: 0}, u: {$inc: {updated: 1}}, multi: false}]
+ };
+}
+function fullBucketSingletonUpdateBuilderFn(coll) {
+ return {
+ update: coll.getName(),
+ updates: [{q: {[metaFieldName]: 1}, u: {$inc: {updated: 1}}, multi: false}]
+ };
+}
+function upsertBuilderFn(coll) {
+ return {
+ update: coll.getName(),
+ updates: [{
+ q: {[timeFieldName]: dateTime, [metaFieldName]: 2},
+ u: {$inc: {updated: 1}},
+ multi: false,
+ upsert: true
+ }]
+ };
+}
+
+// Full bucket update's oplog entry is an ApplyOps[delete, insert].
+function fullBucketValidateFn(testDB, coll, retryableWrite) {
+ const opEntries =
+ testDB.getSiblingDB("local")
+ .oplog.rs
+ .find({"o.applyOps.ns": testDB.getName() + '.system.buckets.' + coll.getName()})
+ .toArray();
+ assert.eq(opEntries.length, 1);
+ const opEntry = opEntries[0];
+ assert.eq(opEntry["o"]["applyOps"].length, 2);
+ assert(opEntry["o"]["applyOps"][0]["op"] == "d");
+ assert(opEntry["o"]["applyOps"][1]["op"] == "i");
+}
+// Partial bucket update's oplog entry is an ApplyOps[update, insert].
+function partialBucketValidateFn(testDB, coll, retryableWrite) {
+ const opEntries =
+ testDB.getSiblingDB("local")
+ .oplog.rs
+ .find({"o.applyOps.ns": testDB.getName() + '.system.buckets.' + coll.getName()})
+ .toArray();
+ assert.eq(opEntries.length, 1);
+ const opEntry = opEntries[0];
+ assert.eq(opEntry["o"]["applyOps"].length, 2);
+ assert(opEntry["o"]["applyOps"][0]["op"] == "u");
+ assert(opEntry["o"]["applyOps"][1]["op"] == "i");
+}
+// When inserting a new measurement, an Upsert's oplog entry is an ApplyOps[insert] if it's a
+// retryable write. Otherwise, it generates a regular insert oplog entry.
+function upsertValidateFn(testDB, coll, retryableWrite) {
+ const opEntries =
+ testDB.getSiblingDB("local")
+ .oplog.rs
+ .find({"o.applyOps.ns": testDB.getName() + '.system.buckets.' + coll.getName()})
+ .toArray();
+ if (retryableWrite) {
+ assert.eq(opEntries.length, 1);
+ const opEntry = opEntries[0];
+ assert.eq(opEntry["o"]["applyOps"].length, 1);
+ assert(opEntry["o"]["applyOps"][0]["op"] == "i");
+ } else {
+ assert.eq(opEntries.length, 0);
+ }
+}
+
+(function testPartialBucketMultiUpdate() {
+ runTest({cmdBuilderFn: partialBucketMultiUpdateBuilderFn, validateFn: partialBucketValidateFn});
+})();
+(function testFullBucketMultiUpdate() {
+ runTest({cmdBuilderFn: fullBucketMultiUpdateBuilderFn, validateFn: fullBucketValidateFn});
+})();
+(function testPartialBucketSingletonUpdate() {
+ runTest(
+ {cmdBuilderFn: partialBucketSingletonUpdateBuilderFn, validateFn: partialBucketValidateFn});
+})();
+(function testPartialBucketSingletonUpdate() {
+ runTest({cmdBuilderFn: fullBucketSingletonUpdateBuilderFn, validateFn: fullBucketValidateFn});
+})();
+(function testPartialBucketRetryableSingletonUpdate() {
+ runTest({
+ cmdBuilderFn: partialBucketSingletonUpdateBuilderFn,
+ validateFn: partialBucketValidateFn,
+ retryableWrite: true
+ });
+})();
+(function testPartialBucketRetryableSingletonUpdate() {
+ runTest({
+ cmdBuilderFn: fullBucketSingletonUpdateBuilderFn,
+ validateFn: fullBucketValidateFn,
+ retryableWrite: true
+ });
+})();
+(function testUpsert() {
+ runTest({cmdBuilderFn: upsertBuilderFn, validateFn: upsertValidateFn});
+})();
+(function testRetryableUpsert() {
+ runTest({cmdBuilderFn: upsertBuilderFn, validateFn: upsertValidateFn, retryableWrite: true});
+})();
+
+rst.stopSet();
+})();
\ No newline at end of file
diff --git a/jstests/noPassthrough/timestamp_index_builds.js b/jstests/noPassthrough/timestamp_index_builds.js
index 61299bbaf4ea8..bc7c303dcfb0d 100644
--- a/jstests/noPassthrough/timestamp_index_builds.js
+++ b/jstests/noPassthrough/timestamp_index_builds.js
@@ -65,7 +65,7 @@ nodes.forEach(node => assert.commandWorked(node.adminCommand(
// This test create indexes with majority of nodes not available for replication. So, disabling
// index build commit quorum.
-assert.commandWorked(coll.createIndexes([{foo: 1}], {background: true}, 0));
+assert.commandWorked(coll.createIndexes([{foo: 1}], {}, 0));
rst.awaitReplication();
rst.stopSet(undefined, true);
diff --git a/jstests/noPassthrough/traffic_reading.js b/jstests/noPassthrough/traffic_reading.js
index aa29d360387e8..9b041085fe4f8 100644
--- a/jstests/noPassthrough/traffic_reading.js
+++ b/jstests/noPassthrough/traffic_reading.js
@@ -15,13 +15,13 @@ mkdir(recordingDir);
// Create the options and run mongod
var opts = {auth: "", setParameter: "trafficRecordingDirectory=" + recordingDir};
-m = MongoRunner.runMongod(opts);
+let m = MongoRunner.runMongod(opts);
// Get the port of the host
var serverPort = m.port;
// Create necessary users
-adminDB = m.getDB("admin");
+let adminDB = m.getDB("admin");
const testDB = m.getDB("test");
const coll = testDB.getCollection("foo");
adminDB.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
diff --git a/jstests/noPassthrough/transaction_api_commit_errors.js b/jstests/noPassthrough/transaction_api_commit_errors.js
new file mode 100644
index 0000000000000..1613599a78b26
--- /dev/null
+++ b/jstests/noPassthrough/transaction_api_commit_errors.js
@@ -0,0 +1,115 @@
+/**
+ * Tests that the transaction API handles commit errors correctly.
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/fail_point_util.js");
+
+const kDbName = "testDb";
+const kCollName = "testColl";
+
+function makeSingleInsertTxn(doc) {
+ return [{
+ dbName: kDbName,
+ command: {
+ insert: kCollName,
+ documents: [doc],
+ }
+ }];
+}
+
+function runTxn(conn, commandInfos) {
+ return conn.adminCommand({testInternalTransactions: 1, commandInfos: commandInfos});
+}
+
+const st = new ShardingTest({config: 1, shards: 1});
+const shardPrimary = st.rs0.getPrimary();
+
+// Set up the test collection.
+assert.commandWorked(st.s.getDB(kDbName)[kCollName].insert([{_id: 0}]));
+
+//
+// Error codes where the API should retry and eventually commit the transaction, either by retrying
+// commit until it succeeds or retrying the entire transaction until it succeeds. Fail commands 10
+// times to exhaust internal retries at layers below the transaction API.
+//
+
+// Retryable error. Note this error is not a NotPrimary error so it won't be rewritten by mongos.
+let commitFailPoint =
+ configureFailPoint(shardPrimary,
+ "failCommand",
+ {
+ errorCode: ErrorCodes.ReadConcernMajorityNotAvailableYet,
+ failCommands: ["commitTransaction"],
+ failInternalCommands: true,
+ },
+ {times: 10});
+let res = assert.commandWorked(runTxn(st.s, makeSingleInsertTxn({_id: 1})));
+commitFailPoint.off();
+
+// No command error with a retryable write concern error.
+commitFailPoint = configureFailPoint(
+ shardPrimary,
+ "failCommand",
+ {
+ writeConcernError:
+ {code: NumberInt(ErrorCodes.ReadConcernMajorityNotAvailableYet), errmsg: "foo"},
+ failCommands: ["commitTransaction"],
+ failInternalCommands: true,
+ },
+ {times: 10});
+res = assert.commandWorked(runTxn(st.s, makeSingleInsertTxn({_id: 2})));
+commitFailPoint.off();
+
+//
+// Error codes where the API should not retry.
+//
+
+// Non-transient commit error with a non-retryable write concern error.
+commitFailPoint = configureFailPoint(shardPrimary,
+ "failCommand",
+ {
+ errorCode: ErrorCodes.InternalError,
+ failCommands: ["commitTransaction"],
+ failInternalCommands: true,
+ },
+ {times: 10});
+res = assert.commandFailedWithCode(runTxn(st.s, makeSingleInsertTxn({_id: 3})),
+ ErrorCodes.InternalError);
+commitFailPoint.off();
+
+// No commit error with a non-retryable write concern error.
+commitFailPoint = configureFailPoint(
+ shardPrimary,
+ "failCommand",
+ {
+ writeConcernError: {code: NumberInt(ErrorCodes.InternalError), errmsg: "foo"},
+ failCommands: ["commitTransaction"],
+ failInternalCommands: true,
+ },
+ {times: 10});
+// The internal transaction test command will rethrow a write concern error as a top-level error.
+res = assert.commandFailedWithCode(runTxn(st.s, makeSingleInsertTxn({_id: 4})),
+ ErrorCodes.InternalError);
+commitFailPoint.off();
+
+// Non-transient commit error that is normally transient. Note NoSuchTransaction is not transient
+// with a write concern error, which is what this is meant to simulate. Also note the fail command
+// fail point can't take both a write concern error and write concern error so we "cheat" and
+// override the error labels.
+commitFailPoint = configureFailPoint(shardPrimary,
+ "failCommand",
+ {
+ errorCode: ErrorCodes.NoSuchTransaction,
+ errorLabels: [],
+ failCommands: ["commitTransaction"],
+ failInternalCommands: true,
+ },
+ {times: 10});
+res = assert.commandFailedWithCode(runTxn(st.s, makeSingleInsertTxn({_id: 5})),
+ ErrorCodes.NoSuchTransaction);
+commitFailPoint.off();
+
+st.stop();
+}());
diff --git a/jstests/noPassthrough/transaction_reaper.js b/jstests/noPassthrough/transaction_reaper.js
index 1240cfd1ba349..b2412b935d2d9 100644
--- a/jstests/noPassthrough/transaction_reaper.js
+++ b/jstests/noPassthrough/transaction_reaper.js
@@ -1,5 +1,4 @@
// @tags: [
-// requires_fcv_70,
// requires_replication,
// requires_sharding,
// ]
diff --git a/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js b/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js
deleted file mode 100644
index 20bf20a55ca21..0000000000000
--- a/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Test that write errors in a transaction due to SnapshotUnavailable are labelled
- * TransientTransactionError and the error is reported at the top level, not in a writeErrors array.
- *
- * Other transient transaction errors are tested elsewhere: WriteConflict is tested in
- * transactions_write_conflicts.js, NotWritablePrimary is tested in transient_txn_error_labels.js,
- * and NoSuchTransaction is tested in transient_txn_error_labels_with_write_concern.js.
- *
- * @tags: [uses_transactions]
- */
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js");
-
-const name = "transaction_write_with_snapshot_unavailable";
-const replTest = new ReplSetTest({name: name, nodes: 1});
-replTest.startSet();
-replTest.initiate();
-
-const dbName = name;
-const dbNameB = dbName + "B";
-const collName = "collection";
-const collNameB = collName + "B";
-
-const primary = replTest.getPrimary();
-const primaryDB = primary.getDB(dbName);
-
-if (FeatureFlagUtil.isEnabled(primaryDB, "PointInTimeCatalogLookups")) {
- // With the PointInTimeCatalogLookups feature this test doesn't make sense as the
- // SnapshotUnavailable error will be removed
- replTest.stopSet();
- return;
-}
-
-assert.commandWorked(primaryDB[collName].insertOne({}, {writeConcern: {w: "majority"}}));
-
-function testOp(cmd) {
- let op = Object.getOwnPropertyNames(cmd)[0];
- let session = primary.startSession();
- let sessionDB = session.getDatabase(name);
-
- jsTestLog(`Testing that WriteConflict during ${op} is labelled TransientTransactionError`);
-
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{}]}));
- // Create collection outside transaction, cannot write to it in the transaction
- assert.commandWorked(primaryDB.getSiblingDB(dbNameB).runCommand({create: collNameB}));
-
- let res;
- try {
- res = sessionDB.getSiblingDB(dbNameB).runCommand(cmd);
- assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);
- assert.eq(res.ok, 0);
- assert(!res.hasOwnProperty("writeErrors"));
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- } catch (ex) {
- printjson(cmd);
- printjson(res);
- throw ex;
- }
-
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.commandWorked(primaryDB.getSiblingDB(dbNameB).runCommand(
- {dropDatabase: 1, writeConcern: {w: "majority"}}));
-}
-
-testOp({insert: collNameB, documents: [{_id: 0}]});
-testOp({update: collNameB, updates: [{q: {}, u: {$set: {x: 1}}}]});
-testOp({delete: collNameB, deletes: [{q: {_id: 0}, limit: 1}]});
-
-replTest.stopSet();
-})();
diff --git a/jstests/noPassthrough/transition_to_catalog_shard_feature_flag.js b/jstests/noPassthrough/transition_to_catalog_shard_feature_flag.js
index 2fff0b6b7e0f9..de8fb8513b68a 100644
--- a/jstests/noPassthrough/transition_to_catalog_shard_feature_flag.js
+++ b/jstests/noPassthrough/transition_to_catalog_shard_feature_flag.js
@@ -1,6 +1,6 @@
/**
- * Verifies the transitionToCatalogShard feature flag guards running the catalog shard transition
- * commands.
+ * Verifies the transitionFromDedicatedConfigServer feature flag guards running the config shard
+ * transition commands.
*
* @tags: [requires_fcv_70]
*/
@@ -21,13 +21,14 @@ const st = new ShardingTest({
});
// None of the transition commands can be run on mongos or the config server.
-assert.commandFailedWithCode(st.s.adminCommand({transitionToCatalogShard: 1}),
+assert.commandFailedWithCode(st.s.adminCommand({transitionFromDedicatedConfigServer: 1}),
ErrorCodes.CommandNotFound);
assert.commandFailedWithCode(st.s.adminCommand({transitionToDedicatedConfigServer: 1}), 7368401);
const configPrimary = st.configRS.getPrimary();
-assert.commandFailedWithCode(configPrimary.adminCommand({_configsvrTransitionToCatalogShard: 1}),
- ErrorCodes.CommandNotFound);
+assert.commandFailedWithCode(
+ configPrimary.adminCommand({_configsvrTransitionFromDedicatedConfigServer: 1}),
+ ErrorCodes.CommandNotFound);
assert.commandFailedWithCode(
configPrimary.adminCommand({_configsvrTransitionToDedicatedConfigServer: 1}), 7368402);
diff --git a/jstests/noPassthrough/ttl_changes_are_immediate.js b/jstests/noPassthrough/ttl_changes_are_immediate.js
index d9c509740861a..ac19e7c3f44c5 100644
--- a/jstests/noPassthrough/ttl_changes_are_immediate.js
+++ b/jstests/noPassthrough/ttl_changes_are_immediate.js
@@ -1,7 +1,5 @@
// Ensure that changes to the TTL sleep time are reflected immediately.
-(function() {
-"use strict";
-load("jstests/libs/ttl_util.js");
+import {TTLUtil} from "jstests/libs/ttl_util.js";
let runner = MongoRunner.runMongod({setParameter: "ttlMonitorSleepSecs=1000"});
let db = runner.getDB("test");
@@ -24,5 +22,4 @@ TTLUtil.waitForPass(coll.getDB(), true, 20 * 1000);
assert.eq(coll.count(), 0, "We should get 0 documents after TTL monitor run");
-MongoRunner.stopMongod(runner);
-})();
+MongoRunner.stopMongod(runner);
\ No newline at end of file
diff --git a/jstests/noPassthrough/ttl_hidden_index.js b/jstests/noPassthrough/ttl_hidden_index.js
index 7ef7ecef991e0..e359562a4931a 100644
--- a/jstests/noPassthrough/ttl_hidden_index.js
+++ b/jstests/noPassthrough/ttl_hidden_index.js
@@ -1,7 +1,5 @@
// Make sure the TTL index still work after we hide it
-(function() {
-"use strict";
-load("jstests/libs/ttl_util.js");
+import {TTLUtil} from "jstests/libs/ttl_util.js";
let runner = MongoRunner.runMongod({setParameter: "ttlMonitorSleepSecs=1"});
let coll = runner.getDB("test").ttl_hiddenl_index;
@@ -24,5 +22,4 @@ TTLUtil.waitForPass(coll.getDB());
assert.eq(coll.count(), 0, "We should get 0 documents after TTL monitor run");
-MongoRunner.stopMongod(runner);
-})();
+MongoRunner.stopMongod(runner);
\ No newline at end of file
diff --git a/jstests/noPassthrough/ttl_monitor_does_not_unregister_index_during_collection_creation.js b/jstests/noPassthrough/ttl_monitor_does_not_unregister_index_during_collection_creation.js
deleted file mode 100644
index 45478301ac682..0000000000000
--- a/jstests/noPassthrough/ttl_monitor_does_not_unregister_index_during_collection_creation.js
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Ensures that the TTLMonitor does not remove the cached index information from the
- * TTLCollectionCache object for a newly created index before the implicitly created collection is
- * registered and visible in the CollectionCatalog.
- * Removing this cached index information prevents the TTLMonitor from removing expired documents
- * for that collection.
- */
-(function() {
-'use strict';
-load("jstests/libs/ttl_util.js");
-
-const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'});
-
-const dbName = "test";
-const collName = "ttlMonitor";
-
-const db = conn.getDB(dbName);
-const coll = db.getCollection(collName);
-
-TestData.dbName = dbName;
-TestData.collName = collName;
-
-coll.drop();
-
-const failPoint = "hangTTLCollectionCacheAfterRegisteringInfo";
-assert.commandWorked(db.adminCommand({configureFailPoint: failPoint, mode: "alwaysOn"}));
-
-// Create an index on a non-existent collection. This will implicitly create the collection.
-let awaitcreateIndex = startParallelShell(() => {
- const testDB = db.getSiblingDB(TestData.dbName);
- assert.commandWorked(
- testDB.getCollection(TestData.collName).createIndex({x: 1}, {expireAfterSeconds: 0}));
-}, db.getMongo().port);
-
-// Wait for the TTL monitor to run and register the index in the TTL collection cache.
-checkLog.containsJson(db.getMongo(), 4664000);
-
-// Let the TTL monitor run once. It should not remove the index from the cached TTL information
-// until the collection is committed.
-TTLUtil.waitForPass(coll.getDB());
-
-// Finish the index build.
-assert.commandWorked(db.adminCommand({configureFailPoint: failPoint, mode: "off"}));
-awaitcreateIndex();
-
-// Insert documents, which should expire immediately and be removed on the next TTL pass.
-const now = new Date();
-for (let i = 0; i < 10; i++) {
- assert.commandWorked(coll.insert({x: now}));
-}
-
-// Let the TTL monitor run once to remove the expired documents.
-TTLUtil.waitForPass(coll.getDB());
-
-assert.eq(0, coll.find({}).count());
-
-MongoRunner.stopMongod(conn);
-}());
diff --git a/jstests/noPassthrough/ttl_operation_metrics.js b/jstests/noPassthrough/ttl_operation_metrics.js
index 674304619716e..72ea675601efc 100644
--- a/jstests/noPassthrough/ttl_operation_metrics.js
+++ b/jstests/noPassthrough/ttl_operation_metrics.js
@@ -5,12 +5,9 @@
* requires_replication,
* ]
*/
-(function() {
-'use strict';
-
load('jstests/noPassthrough/libs/index_build.js'); // For IndexBuildTest
load("jstests/libs/fail_point_util.js");
-load("jstests/libs/ttl_util.js");
+import {TTLUtil} from "jstests/libs/ttl_util.js";
var rst = new ReplSetTest({
nodes: 2,
@@ -115,5 +112,4 @@ assertMetrics(secondary, (metrics) => {
assert.eq(primaryDB[collName].count({}), 1);
assert.eq(secondaryDB[collName].count({}), 1);
-rst.stopSet();
-}());
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/ttl_operation_metrics_multi_dbs.js b/jstests/noPassthrough/ttl_operation_metrics_multi_dbs.js
index e6e1bfefdf399..12ec14b34db5a 100644
--- a/jstests/noPassthrough/ttl_operation_metrics_multi_dbs.js
+++ b/jstests/noPassthrough/ttl_operation_metrics_multi_dbs.js
@@ -5,12 +5,9 @@
* requires_replication,
* ]
*/
-(function() {
-'use strict';
-
load('jstests/noPassthrough/libs/index_build.js'); // For IndexBuildTest
load("jstests/libs/fail_point_util.js");
-load("jstests/libs/ttl_util.js");
+import {TTLUtil} from "jstests/libs/ttl_util.js";
var rst = new ReplSetTest({
nodes: 2,
@@ -91,10 +88,12 @@ assertMetrics(primary, (metrics) => {
assert.gte(metrics[dbName2].totalUnitsWritten, 2);
});
-// Clear metrics and wait for a TTL pass to delete the documents.
+// Clear metrics and wait for two TTL passes to make sure we both observe the inserts and delete the
+// documents.
clearMetrics(primary);
pauseTtl.off();
TTLUtil.waitForPass(primaryDB1);
+TTLUtil.waitForPass(primaryDB1);
// Ensure that the TTL monitor deleted 2 documents on the primary and recorded read and write
// metrics.
@@ -144,5 +143,4 @@ const secondaryDB2 = secondary.getDB(dbName2);
assert.eq(secondaryDB1[collName].count({}), 0);
assert.eq(secondaryDB2[collName].count({}), 2);
-rst.stopSet();
-}());
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/noPassthrough/ttl_partial_index.js b/jstests/noPassthrough/ttl_partial_index.js
index 00125a9bb914a..427e1a4bdcb91 100644
--- a/jstests/noPassthrough/ttl_partial_index.js
+++ b/jstests/noPassthrough/ttl_partial_index.js
@@ -1,8 +1,6 @@
// Test that the TTL monitor will correctly use TTL indexes that are also partial indexes.
// SERVER-17984.
-(function() {
-"use strict";
-load("jstests/libs/ttl_util.js");
+import {TTLUtil} from "jstests/libs/ttl_util.js";
// Launch mongod with shorter TTL monitor sleep interval.
var runner = MongoRunner.runMongod({setParameter: "ttlMonitorSleepSecs=1"});
@@ -26,5 +24,4 @@ assert.eq(0,
"Wrong number of documents in partial index, after TTL monitor run");
assert.eq(
1, coll.find().itcount(), "Wrong number of documents in collection, after TTL monitor run");
-MongoRunner.stopMongod(runner);
-})();
+MongoRunner.stopMongod(runner);
\ No newline at end of file
diff --git a/jstests/noPassthrough/ttl_resharding_collection.js b/jstests/noPassthrough/ttl_resharding_collection.js
index cafa433fa5f43..12b8019b9aa0b 100644
--- a/jstests/noPassthrough/ttl_resharding_collection.js
+++ b/jstests/noPassthrough/ttl_resharding_collection.js
@@ -1,7 +1,5 @@
// Tests that the TTL Monitor is disabled for .system.resharding.* namespaces.
-(function() {
-"use strict";
-load("jstests/libs/ttl_util.js");
+import {TTLUtil} from "jstests/libs/ttl_util.js";
// Launch mongod with shorter TTL monitor sleep interval.
const runner = MongoRunner.runMongod({setParameter: "ttlMonitorSleepSecs=1"});
@@ -22,5 +20,4 @@ TTLUtil.waitForPass(coll.getDB());
// namespace.
assert.eq(
1, coll.find().itcount(), "Wrong number of documents in collection, after TTL monitor run");
-MongoRunner.stopMongod(runner);
-})();
+MongoRunner.stopMongod(runner);
\ No newline at end of file
diff --git a/jstests/noPassthrough/ttl_with_dropIndex.js b/jstests/noPassthrough/ttl_with_dropIndex.js
index 6bed0fa6b9151..fdf8f861ae015 100644
--- a/jstests/noPassthrough/ttl_with_dropIndex.js
+++ b/jstests/noPassthrough/ttl_with_dropIndex.js
@@ -1,9 +1,7 @@
/**
* Verify the behavior of dropping TTL index.
*/
-(function() {
-'use strict';
-load("jstests/libs/ttl_util.js");
+import {TTLUtil} from "jstests/libs/ttl_util.js";
let conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'});
let db = conn.getDB('test');
@@ -38,5 +36,4 @@ TTLUtil.waitForPass(db);
assert.eq(coll.find().itcount(), 50);
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/unindex_detects_data_corruption.js b/jstests/noPassthrough/unindex_detects_data_corruption.js
new file mode 100644
index 0000000000000..a475b92846348
--- /dev/null
+++ b/jstests/noPassthrough/unindex_detects_data_corruption.js
@@ -0,0 +1,38 @@
+/**
+ * This tests that errors are logged when unindexing _id finds evidence of corruption, the server
+ * does not crash, and the appropriate error is returned.
+ */
+(function() {
+
+const replSet = new ReplSetTest({nodes: 1});
+replSet.startSet();
+replSet.initiate();
+
+const primary = replSet.getPrimary();
+
+const db = primary.getDB('test');
+const collName = 'coll';
+const coll = db[collName];
+
+assert.commandWorked(coll.insert({a: "first"}));
+
+assert.commandWorked(primary.adminCommand(
+ {configureFailPoint: "WTIndexUassertDuplicateRecordForKeyOnIdUnindex", mode: "alwaysOn"}));
+
+assert.commandFailedWithCode(coll.remove({a: "first"}), ErrorCodes.DataCorruptionDetected);
+
+assert.commandWorked(primary.adminCommand(
+ {configureFailPoint: "WTIndexUassertDuplicateRecordForKeyOnIdUnindex", mode: "off"}));
+
+assert.soonNoExcept(() => {
+ // The health log entry is written asynchronously by a background worker, expect it to be
+ // eventually found.
+ let entry = primary.getDB('local').system.healthlog.findOne({severity: 'error'});
+ assert(entry, "No healthlog entry found on " + tojson(primary));
+ assert.eq("Un-index seeing multiple records for key", entry.msg, tojson(entry));
+ assert.eq(1, primary.getDB('local').system.healthlog.count({severity: 'error'}));
+ return true;
+});
+
+replSet.stopSet();
+})();
diff --git a/jstests/noPassthrough/upsert_invalid_multiple_id_fields.js b/jstests/noPassthrough/upsert_invalid_multiple_id_fields.js
new file mode 100644
index 0000000000000..44c9f83101a08
--- /dev/null
+++ b/jstests/noPassthrough/upsert_invalid_multiple_id_fields.js
@@ -0,0 +1,35 @@
+/**
+ * SERVER-75879: Tests that an invalid document with multiple _id fields cannot be inserted by an
+ * update sent with upsert=true.
+ */
+(function() {
+"use strict";
+
+// Run tests on a standalone mongod.
+let conn = MongoRunner.runMongod({setParameter: {enableComputeMode: true}});
+let db = conn.getDB(jsTestName());
+
+// _buildBsonObj is a lightweight BSON builder allowing us to construct an invalid BSON in shell.
+let invalidBson = _buildBsonObj("a", 1, "_id", 1, "_id", 2, "_id", 3);
+
+// Assert the BSON is indeed invalid. First, we build a valid one from its JSON string.
+let validBson = JSON.parse(JSON.stringify(invalidBson));
+assert.eq(JSON.stringify(invalidBson), JSON.stringify(validBson));
+assert.gt(Object.bsonsize(invalidBson), Object.bsonsize(validBson));
+assert.neq(bsonWoCompare(invalidBson, validBson), 0);
+
+// Test that a replacement is not permitted
+assert.throwsWithCode(() => {
+ db.coll.replaceOne({}, invalidBson, {upsert: true});
+}, 2);
+
+// Test that an upsert is not permitted
+assert.writeErrorWithCode(db.coll.update({}, invalidBson, {upsert: true}), ErrorCodes.BadValue);
+
+// Assert that a valid one is actually insertable
+assert.writeOK(db.coll.update({}, validBson, {upsert: true}));
+
+let inserted = db.coll.findOne();
+assert.docEq(inserted, validBson);
+MongoRunner.stopMongod(conn);
+})();
diff --git a/jstests/noPassthrough/validate_adjust_multikey.js b/jstests/noPassthrough/validate_adjust_multikey.js
index ad36ac48f352c..15db61de99afa 100644
--- a/jstests/noPassthrough/validate_adjust_multikey.js
+++ b/jstests/noPassthrough/validate_adjust_multikey.js
@@ -1,8 +1,7 @@
/**
* Tests foreground validation's ability to fix up allowable multikey metadata problems.
*/
-(function() {
-load("jstests/libs/analyze_plan.js"); // For getWinningPlan to analyze explain() output.
+import {getWinningPlan} from "jstests/libs/analyze_plan.js";
const conn = MongoRunner.runMongod();
const dbName = jsTestName();
@@ -109,5 +108,4 @@ runTest((coll) => {
assertIndexMultikey(coll, 'a_text', false);
});
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/validate_db_metadata_command_whole_db.js b/jstests/noPassthrough/validate_db_metadata_command_whole_db.js
index 01e4e3dfa0a91..17cd38786edbe 100644
--- a/jstests/noPassthrough/validate_db_metadata_command_whole_db.js
+++ b/jstests/noPassthrough/validate_db_metadata_command_whole_db.js
@@ -4,11 +4,7 @@
* requires_sharding,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-load("jstests/core/timeseries/libs/timeseries.js"); // For TimeseriesTest.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
const dbName = jsTestName();
const collName = "coll1";
@@ -95,5 +91,4 @@ MongoRunner.stopMongod(conn);
const st = new ShardingTest({shards: 2});
st.shardColl(dbName + "." + collName, {_id: 1}, {_id: 1});
runTest(st.s);
-st.stop();
-}());
+st.stop();
\ No newline at end of file
diff --git a/jstests/noPassthrough/validate_db_metadata_limits.js b/jstests/noPassthrough/validate_db_metadata_limits.js
index dce8af08b6f08..13cefab9c18b3 100644
--- a/jstests/noPassthrough/validate_db_metadata_limits.js
+++ b/jstests/noPassthrough/validate_db_metadata_limits.js
@@ -25,4 +25,4 @@ assert(res.apiVersionErrors, res);
assert(res.apiVersionErrors.length < 100, res);
MongoRunner.stopMongod(conn);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/validate_duplicate_record.js b/jstests/noPassthrough/validate_duplicate_record.js
new file mode 100644
index 0000000000000..1d1430e5c9e22
--- /dev/null
+++ b/jstests/noPassthrough/validate_duplicate_record.js
@@ -0,0 +1,33 @@
+/**
+ * Tests that duplicate records for _id index keys are detected by validate.
+ */
+(function() {
+"use strict";
+
+// Disable testing diagnostics (TestingProctor) so we do not hit test only fasserts.
+TestData.testingDiagnosticsEnabled = false;
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+let primary = rst.getPrimary();
+let coll = primary.getCollection('test.duplicate_record');
+assert.commandWorked(coll.createIndex({x: 1}));
+
+for (let i = 0; i < 5; i++) {
+ assert.commandWorked(coll.insert({x: i}));
+}
+
+function testValidateWithFailpoint(fpName) {
+ assert.commandWorked(primary.adminCommand({configureFailPoint: fpName, mode: "alwaysOn"}));
+ let res = assert.commandWorked(coll.validate());
+ assert(!res.valid);
+ assert.commandWorked(primary.adminCommand({configureFailPoint: fpName, mode: "off"}));
+}
+
+// Test duplicate record for index key on _id index.
+testValidateWithFailpoint("WTIndexUassertDuplicateRecordForIdIndex");
+
+rst.stopSet();
+})();
diff --git a/jstests/noPassthrough/validate_memory_limit.js b/jstests/noPassthrough/validate_memory_limit.js
index 63f55a0ed43d8..ff60122fede0f 100644
--- a/jstests/noPassthrough/validate_memory_limit.js
+++ b/jstests/noPassthrough/validate_memory_limit.js
@@ -7,10 +7,7 @@
* requires_wiredtiger,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/disk/libs/wt_file_helper.js");
+import {getUriForIndex, truncateUriAndRestartMongod} from "jstests/disk/libs/wt_file_helper.js";
const kIndexKeyLength = 1024 * 1024;
@@ -80,5 +77,4 @@ checkValidateLogs();
// Repair, but incompletely if only some inconsistencies are reported.
checkValidateRepair();
-MongoRunner.stopMongod(conn, null, {skipValidation: true});
-})();
\ No newline at end of file
+MongoRunner.stopMongod(conn, null, {skipValidation: true});
\ No newline at end of file
diff --git a/jstests/noPassthrough/validate_multikey_failures.js b/jstests/noPassthrough/validate_multikey_failures.js
index 6d21116c1d0c5..be3d3010b30ca 100644
--- a/jstests/noPassthrough/validate_multikey_failures.js
+++ b/jstests/noPassthrough/validate_multikey_failures.js
@@ -75,4 +75,4 @@ assert(checkLog.checkContainsWithAtLeastCountJson(conn, 7556101, {"indexKey": {"
assert(checkLog.checkContainsWithAtLeastCountJson(conn, 5367500, {"index": "a.b_1"}, 1));
MongoRunner.stopMongod(conn, null, {skipValidation: true});
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/validate_out_of_order.js b/jstests/noPassthrough/validate_out_of_order.js
index 27142a71c5f79..c8c9eaeecbd1f 100644
--- a/jstests/noPassthrough/validate_out_of_order.js
+++ b/jstests/noPassthrough/validate_out_of_order.js
@@ -19,11 +19,11 @@ for (let i = 0; i < 5; i++) {
// Test record store out-of-order detection.
assert.commandWorked(
- primary.adminCommand({configureFailPoint: "WTRecordStoreUassertOutOfOrder", mode: "alwaysOn"}));
+ primary.adminCommand({configureFailPoint: "failRecordStoreTraversal", mode: "alwaysOn"}));
let res = assert.commandWorked(coll.validate());
assert(!res.valid);
assert.commandWorked(
- primary.adminCommand({configureFailPoint: "WTRecordStoreUassertOutOfOrder", mode: "off"}));
+ primary.adminCommand({configureFailPoint: "failRecordStoreTraversal", mode: "off"}));
// Test index entry out-of-order detection.
assert.commandWorked(
diff --git a/jstests/noPassthrough/validate_timeseries_bucket_reopening.js b/jstests/noPassthrough/validate_timeseries_bucket_reopening.js
index 9936c12aee79c..79f47ebd72bc3 100644
--- a/jstests/noPassthrough/validate_timeseries_bucket_reopening.js
+++ b/jstests/noPassthrough/validate_timeseries_bucket_reopening.js
@@ -7,10 +7,7 @@
*
* @tags: [requires_replication]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'.
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
load("jstests/libs/fail_point_util.js");
const rst = new ReplSetTest({nodes: 1});
@@ -24,7 +21,7 @@ if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) {
rst.stopSet();
jsTestLog(
'Skipping test because the TimeseriesScalabilityImprovements feature flag is disabled.');
- return;
+ quit();
}
const collNamePrefix = db.validate_timeseries_bucket_reopening_;
@@ -110,4 +107,3 @@ validateBucketReopening(metaFieldName2);
fpSameStripe.off();
rst.stopSet();
-})();
diff --git a/jstests/noPassthrough/validate_with_long_index_name.js b/jstests/noPassthrough/validate_with_long_index_name.js
index 5c97e37133663..8bebd3e07ed6c 100644
--- a/jstests/noPassthrough/validate_with_long_index_name.js
+++ b/jstests/noPassthrough/validate_with_long_index_name.js
@@ -7,10 +7,11 @@
* requires_wiredtiger,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/disk/libs/wt_file_helper.js");
+import {
+ getUriForColl,
+ getUriForIndex,
+ truncateUriAndRestartMongod
+} from "jstests/disk/libs/wt_file_helper.js";
// 64 * 1024 * 1024 = 64MB worth of index names ensures that we test against the maximum BSONObj
// size lmit.
@@ -63,5 +64,4 @@ assert(!res.valid);
assert.contains(extraIndexEntries, res.warnings);
assert.contains(extraSizeLimitations, res.errors);
-MongoRunner.stopMongod(conn, null, {skipValidation: true});
-})();
\ No newline at end of file
+MongoRunner.stopMongod(conn, null, {skipValidation: true});
\ No newline at end of file
diff --git a/jstests/noPassthrough/views_count_distinct_disk_use.js b/jstests/noPassthrough/views_count_distinct_disk_use.js
index cd8d3046637c2..d90bfefdaff40 100644
--- a/jstests/noPassthrough/views_count_distinct_disk_use.js
+++ b/jstests/noPassthrough/views_count_distinct_disk_use.js
@@ -1,10 +1,7 @@
// Test count and distinct on views use with different values of the allowDiskUseByDefault
// parameter.
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod was unable to start up");
@@ -45,5 +42,4 @@ if (!checkSBEEnabled(viewsDB)) {
// stage needs to spill to disk if the memory limit is reached.
testDiskUse({distinct: "largeView", key: "largeStr"});
-MongoRunner.stopMongod(conn);
-})();
+MongoRunner.stopMongod(conn);
\ No newline at end of file
diff --git a/jstests/noPassthrough/vote_abort_index_build.js b/jstests/noPassthrough/vote_abort_index_build.js
index c1e8e65f3606d..91418d37da6c6 100644
--- a/jstests/noPassthrough/vote_abort_index_build.js
+++ b/jstests/noPassthrough/vote_abort_index_build.js
@@ -2,7 +2,7 @@
* Tests the 'voteAbortIndexBuild' internal command.
*
* @tags: [
- * featureFlagIndexBuildGracefulErrorHandling,
+ * requires_fcv_71,
* requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/write_conflict_wildcard.js b/jstests/noPassthrough/write_conflict_wildcard.js
index 6d221a770e629..3ade58ecaaed3 100644
--- a/jstests/noPassthrough/write_conflict_wildcard.js
+++ b/jstests/noPassthrough/write_conflict_wildcard.js
@@ -5,10 +5,7 @@
* TODO SERVER-56443: This test is specific to the classic engine. If/when the classic engine is
* deleted, this test should be removed as well.
*/
-(function() {
-"strict";
-
-load("jstests/libs/sbe_util.js");
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const conn = MongoRunner.runMongod();
const testDB = conn.getDB("test");
@@ -16,7 +13,7 @@ const testDB = conn.getDB("test");
if (checkSBEEnabled(testDB)) {
jsTestLog("Skipping test as SBE is not resilient to WCEs");
MongoRunner.stopMongod(conn);
- return;
+ quit();
}
const coll = testDB.write_conflict_wildcard;
@@ -45,4 +42,3 @@ for (let i = 0; i < 1000; ++i) {
assert.commandWorked(
testDB.adminCommand({configureFailPoint: 'WTWriteConflictExceptionForReads', mode: "off"}));
MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/noPassthroughWithMongod/background.js b/jstests/noPassthroughWithMongod/background.js
index a6fe673a37622..7194e9aed97eb 100644
--- a/jstests/noPassthroughWithMongod/background.js
+++ b/jstests/noPassthroughWithMongod/background.js
@@ -2,7 +2,7 @@
assert(db.getName() == "test");
-t = db.bg1;
+let t = db.bg1;
t.drop();
var a = new Mongo(db.getMongo().host).getDB(db.getName());
@@ -18,7 +18,7 @@ for (var i = 0; i < 100000; i++) {
}
// start bg indexing
-a.bg1.createIndex({i: 1}, {name: "i_1", background: true});
+a.bg1.createIndex({i: 1}, {name: "i_1"});
// add more data
bulk = t.initializeUnorderedBulkOp();
diff --git a/jstests/noPassthroughWithMongod/btreedel.js b/jstests/noPassthroughWithMongod/btreedel.js
index 221c04a2fc527..12216ca03e36d 100644
--- a/jstests/noPassthroughWithMongod/btreedel.js
+++ b/jstests/noPassthroughWithMongod/btreedel.js
@@ -1,7 +1,7 @@
// btreedel.js
// @tags: [SERVER-32869]
-t = db.foo;
+let t = db.foo;
t.remove({});
var bulk = t.initializeUnorderedBulkOp();
@@ -30,8 +30,8 @@ t.remove({_id: {$gt: 200000, $lt: 600000}});
print("3");
print(d.hasNext());
-n = 0;
-last = {};
+let n = 0;
+let last = {};
printjson(c.next());
while (c.hasNext()) {
n++;
diff --git a/jstests/noPassthroughWithMongod/capped4.js b/jstests/noPassthroughWithMongod/capped4.js
index f948614f24e87..d03a5d8fb4cc6 100644
--- a/jstests/noPassthroughWithMongod/capped4.js
+++ b/jstests/noPassthroughWithMongod/capped4.js
@@ -1,20 +1,20 @@
// @tags: [requires_capped]
-t = db.jstests_capped4;
+let t = db.jstests_capped4;
t.drop();
db.createCollection("jstests_capped4", {size: 1000, capped: true});
t.createIndex({i: 1});
-for (i = 0; i < 20; ++i) {
+for (let i = 0; i < 20; ++i) {
t.save({i: i});
}
-c = t.find().sort({$natural: -1}).limit(2);
+let c = t.find().sort({$natural: -1}).limit(2);
c.next();
c.next();
-d = t.find().sort({i: -1}).limit(2);
+let d = t.find().sort({i: -1}).limit(2);
d.next();
d.next();
-for (i = 20; t.findOne({i: 19}); ++i) {
+for (var i = 20; t.findOne({i: 19}); ++i) {
t.save({i: i});
}
// assert( !t.findOne( { i : 19 } ), "A" );
diff --git a/jstests/noPassthroughWithMongod/capped6.js b/jstests/noPassthroughWithMongod/capped6.js
index ad94043158bbb..70fb9dfce3048 100644
--- a/jstests/noPassthroughWithMongod/capped6.js
+++ b/jstests/noPassthroughWithMongod/capped6.js
@@ -18,7 +18,7 @@ var maxDocuments = Random.randInt(400) + 100;
* check is performed in both forward and reverse directions.
*/
function checkOrder(i, valueArray) {
- res = coll.find().sort({$natural: -1});
+ let res = coll.find().sort({$natural: -1});
assert(res.hasNext(), "A");
var j = i;
while (res.hasNext()) {
@@ -41,7 +41,7 @@ function prepareCollection(shouldReverse) {
assert.commandWorked(db.createCollection("capped6", {capped: true, size: 1000}));
var valueArray = new Array(maxDocuments);
var c = "";
- for (i = 0; i < maxDocuments; ++i, c += "-") {
+ for (let i = 0; i < maxDocuments; ++i, c += "-") {
// The a values are strings of increasing length.
valueArray[i] = {a: c};
}
@@ -67,7 +67,7 @@ function runCapTrunc(valueArray, valueArrayCurIndex, n, inc) {
for (var i = valueArrayCurIndex; i < maxDocuments; ++i) {
assert.commandWorked(coll.insert(valueArray[i]));
}
- count = coll.count();
+ let count = coll.count();
// The index corresponding to the last document in the collection.
valueArrayCurIndex = maxDocuments - 1;
diff --git a/jstests/noPassthroughWithMongod/clone_collection_as_capped_no_conflicts.js b/jstests/noPassthroughWithMongod/clone_collection_as_capped_no_conflicts.js
index b1deedfa1d01f..d9dc81356a336 100644
--- a/jstests/noPassthroughWithMongod/clone_collection_as_capped_no_conflicts.js
+++ b/jstests/noPassthroughWithMongod/clone_collection_as_capped_no_conflicts.js
@@ -41,4 +41,4 @@ assert.eq(toColl.count(), 1);
// Interrupt the sleep command.
assert.commandWorked(testDB.getSiblingDB("admin").killOp(sleepID));
sleepCommand();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthroughWithMongod/collMod_no_conflicts.js b/jstests/noPassthroughWithMongod/collMod_no_conflicts.js
index 130582d2f61c5..fe8b4edcad80d 100644
--- a/jstests/noPassthroughWithMongod/collMod_no_conflicts.js
+++ b/jstests/noPassthroughWithMongod/collMod_no_conflicts.js
@@ -39,4 +39,4 @@ assert.eq(res[0].options.pipeline, collModPipeline);
// Interrupt the sleep command.
assert.commandWorked(testDB.getSiblingDB("admin").killOp(sleepID));
sleepCommand();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js b/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js
index 1474626e757d2..30bf982dd20e2 100644
--- a/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js
+++ b/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js
@@ -26,10 +26,7 @@ try {
awaitParallelShell = startParallelShell(() => {
db.getSiblingDB("test").runCommand({
createIndexes: "collstats_show_ready_and_in_progress_indexes",
- indexes: [
- {key: {a: 1}, name: 'a_1', background: true},
- {key: {b: 1}, name: 'b_1', background: true}
- ]
+ indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]
});
}, db.getMongo().port);
diff --git a/jstests/noPassthroughWithMongod/column_scan_explain.js b/jstests/noPassthroughWithMongod/column_scan_explain.js
index 43b7ee11a0068..5819fd39004f6 100644
--- a/jstests/noPassthroughWithMongod/column_scan_explain.js
+++ b/jstests/noPassthroughWithMongod/column_scan_explain.js
@@ -5,15 +5,13 @@
* featureFlagColumnstoreIndexes,
* ]
*/
-(function() {
-"use strict";
-
+import {getPlanStages} from "jstests/libs/analyze_plan.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
+import {getSbePlanStages} from "jstests/libs/sbe_explain_helpers.js";
load("jstests/aggregation/extras/utils.js"); // For assertArrayEq
-load("jstests/libs/sbe_explain_helpers.js"); // For getSbePlanStages.
-load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
if (!setUpServerForColumnStoreIndexTest(db)) {
- return;
+ quit();
}
const coll = db.column_scan_explain;
@@ -268,4 +266,3 @@ assert.commandWorked(coll.insertMany(docs, {ordered: false}));
["stage", "planNodeId"]),
`Mismatching column scan plan stage ${tojson(columnScanPlanStages[0])}`);
}());
-}());
diff --git a/jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js b/jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js
index dd671fa72d0d1..3b4b24f1b54cd 100644
--- a/jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js
+++ b/jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js
@@ -6,15 +6,12 @@
* featureFlagColumnstoreIndexes,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For "planHasStage."
-load("jstests/libs/columnstore_util.js"); // For "setUpServerForColumnStoreIndexTest."
+import {planHasStage} from "jstests/libs/analyze_plan.js";
+import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js";
load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For "setParameter."
if (!setUpServerForColumnStoreIndexTest(db)) {
- return;
+ quit();
}
const coll = db.columnstore_planning_heuristics;
@@ -76,4 +73,3 @@ assertColumnScanUsed({}, false, "none");
const explain = coll.find({}, {_id: 0, a: 1}).hint({"$**": "columnstore"}).explain();
assert(planHasStage(db, explain, "COLUMN_SCAN"),
`Hint should have overridden heuristics to use column scan: ${tojson(explain)}`);
-})();
diff --git a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
index 3a4e48d9dbc59..c89449da23b4b 100644
--- a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
+++ b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
@@ -23,7 +23,7 @@ commands.push({
commands.push({
req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
setupFunc: function() {
- coll.insert({type: 'oak'});
+ assert.commandWorked(coll.insert({type: 'oak'}));
assert.eq(coll.getIndexes().length, 1);
},
confirmFunc: function() {
@@ -41,7 +41,7 @@ commands.push({
writeConcern: {w: 'majority'}
},
setupFunc: function() {
- coll.insert({type: 'oak'});
+ assert.commandWorked(coll.insert({type: 'oak'}));
assert.eq(coll.count({type: 'ginkgo'}), 0);
assert.eq(coll.count({type: 'oak'}), 1);
},
@@ -59,7 +59,7 @@ commands.push({
writeConcern: {w: 'majority'}
},
setupFunc: function() {
- coll.insert({type: 'oak'});
+ assert.commandWorked(coll.insert({type: 'oak'}));
assert.eq(coll.count({type: 'ginkgo'}), 0);
assert.eq(coll.count({type: 'oak'}), 1);
},
@@ -77,7 +77,7 @@ commands.push({
writeConcern: {w: 'majority'}
},
setupFunc: function() {
- coll.insert({type: 'oak'});
+ assert.commandWorked(coll.insert({type: 'oak'}));
assert.eq(coll.count({type: 'ginkgo'}), 0);
assert.eq(coll.count({type: 'oak'}), 1);
},
@@ -90,7 +90,7 @@ commands.push({
commands.push({
req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1, type: "willow"}}]},
setupFunc: function() {
- coll.insert({_id: 1, type: 'oak'});
+ assert.commandWorked(coll.insert({_id: 1, type: 'oak'}));
assert.eq(coll.count({type: 'willow'}), 0);
},
confirmFunc: function() {
@@ -101,8 +101,8 @@ commands.push({
commands.push({
req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}},
setupFunc: function() {
- coll.insert({_id: 1, type: 'oak'});
- coll.insert({_id: 2, type: 'maple'});
+ assert.commandWorked(coll.insert({_id: 1, type: 'oak'}));
+ assert.commandWorked(coll.insert({_id: 2, type: 'maple'}));
},
confirmFunc: function() {
assert.eq(db.foo.count({type: 'oak'}), 1);
@@ -120,15 +120,24 @@ commands.push({
});
},
reduce: function(key, values) {
- return {count: values.length};
+ // We may be re-reducing values that have already been partially reduced. In that case,
+ // we expect to see an object like {count: } in the array of input values.
+ const numValues = values.reduce(function(acc, currentValue) {
+ if (typeof currentValue === "object") {
+ return acc + currentValue.count;
+ } else {
+ return acc + 1;
+ }
+ }, 0);
+ return {count: numValues};
},
out: "foo"
},
setupFunc: function() {
- coll.insert({x: 1, tags: ["a", "b"]});
- coll.insert({x: 2, tags: ["b", "c"]});
- coll.insert({x: 3, tags: ["c", "a"]});
- coll.insert({x: 4, tags: ["b", "c"]});
+ assert.commandWorked(coll.insert({x: 1, tags: ["a", "b"]}));
+ assert.commandWorked(coll.insert({x: 2, tags: ["b", "c"]}));
+ assert.commandWorked(coll.insert({x: 3, tags: ["c", "a"]}));
+ assert.commandWorked(coll.insert({x: 4, tags: ["b", "c"]}));
},
confirmFunc: function() {
assert.eq(db.foo.findOne({_id: 'a'}).value.count, 2);
diff --git a/jstests/noPassthroughWithMongod/convert_to_capped_no_conflicts.js b/jstests/noPassthroughWithMongod/convert_to_capped_no_conflicts.js
index f037b0075ee0f..1d97d5d595467 100644
--- a/jstests/noPassthroughWithMongod/convert_to_capped_no_conflicts.js
+++ b/jstests/noPassthroughWithMongod/convert_to_capped_no_conflicts.js
@@ -35,4 +35,4 @@ assert(testColl.isCapped());
// Interrupt the sleep command.
assert.commandWorked(testDB.getSiblingDB("admin").killOp(sleepID));
sleepCommand();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthroughWithMongod/cursor_server_status_metrics_lifespan_histogram.js b/jstests/noPassthroughWithMongod/cursor_server_status_metrics_lifespan_histogram.js
index b80ce4dcc0f74..4467e6afe10be 100644
--- a/jstests/noPassthroughWithMongod/cursor_server_status_metrics_lifespan_histogram.js
+++ b/jstests/noPassthroughWithMongod/cursor_server_status_metrics_lifespan_histogram.js
@@ -82,4 +82,4 @@ while (cursorLt10Minutes.hasNext()) {
cursorLt10Minutes.next();
}
assert.eq(getNumCursorsLessThan10Minutes() - initialNumCursorsLt10m, 1);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/noPassthroughWithMongod/dup_bgindex.js b/jstests/noPassthroughWithMongod/dup_bgindex.js
index dd62117cf9f22..7dd8fa072b7dd 100644
--- a/jstests/noPassthroughWithMongod/dup_bgindex.js
+++ b/jstests/noPassthroughWithMongod/dup_bgindex.js
@@ -2,15 +2,15 @@
(function() {
var t = db.duplIndexTest;
t.drop();
-docs = [];
+let docs = [];
for (var i = 0; i < 10000; i++) {
docs.push({name: "foo", z: {a: 17, b: 4}, i: i});
}
assert.commandWorked(t.insert(docs));
-var cmd = "assert.commandWorked(db.duplIndexTest.createIndex( { i : 1 }, {background:true} ));";
+var cmd = "assert.commandWorked(db.duplIndexTest.createIndex( { i : 1 } ));";
var join1 = startParallelShell(cmd);
var join2 = startParallelShell(cmd);
-assert.commandWorked(t.createIndex({i: 1}, {background: true}));
+assert.commandWorked(t.createIndex({i: 1}));
assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
assert.commandWorked(t.dropIndex({i: 1}));
assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
diff --git a/jstests/noPassthroughWithMongod/explain1.js b/jstests/noPassthroughWithMongod/explain1.js
index 1156a5b86edb6..2fedbc6bd05ca 100644
--- a/jstests/noPassthroughWithMongod/explain1.js
+++ b/jstests/noPassthroughWithMongod/explain1.js
@@ -1,10 +1,10 @@
// SERVER-2662 - drop client cursor in a context where query will yield frequently
-t = db.jstests_slowNightly_explain1;
+let t = db.jstests_slowNightly_explain1;
t.drop();
// Periodically drops the collection, invalidating client cursors for s2's operations.
-s1 = startParallelShell(function() {
+let s1 = startParallelShell(function() {
t = db.jstests_slowNightly_explain1;
for (var i = 0; i < 80; ++i) {
t.drop();
@@ -17,11 +17,11 @@ s1 = startParallelShell(function() {
});
// Query repeatedly.
-s2 = startParallelShell(function() {
+let s2 = startParallelShell(function() {
t = db.jstests_slowNightly_explain1;
for (var i = 0; i < 500; ++i) {
try {
- z = t.find({x: {$gt: 0}, y: 1}).explain();
+ let z = t.find({x: {$gt: 0}, y: 1}).explain();
t.count({x: {$gt: 0}, y: 1});
} catch (e) {
}
@@ -29,7 +29,7 @@ s2 = startParallelShell(function() {
});
// Put pressure on s2 to yield more often.
-s3 = startParallelShell(function() {
+let s3 = startParallelShell(function() {
t = db.jstests_slowNightly_explain1;
for (var i = 0; i < 200; ++i) {
t.validate({scandata: true});
diff --git a/jstests/noPassthroughWithMongod/explain2.js b/jstests/noPassthroughWithMongod/explain2.js
index 0720091e4a442..d3ab85f262cc4 100644
--- a/jstests/noPassthroughWithMongod/explain2.js
+++ b/jstests/noPassthroughWithMongod/explain2.js
@@ -1,18 +1,19 @@
// Test for race condition SERVER-2807. One cursor is dropped and another is not.
// @tags: [requires_capped]
-collName = 'jstests_slowNightly_explain2';
+let collName = 'jstests_slowNightly_explain2';
-t = db[collName];
+let t = db[collName];
t.drop();
db.createCollection(collName, {capped: true, size: 100000});
t = db[collName];
t.createIndex({x: 1});
-a = startParallelShell('for( i = 0; i < 50000; ++i ) { db.' + collName + '.insert( {x:i,y:1} ); }');
+let a =
+ startParallelShell('for( i = 0; i < 50000; ++i ) { db.' + collName + '.insert( {x:i,y:1} ); }');
-for (i = 0; i < 800; ++i) {
+for (let i = 0; i < 800; ++i) {
t.find({x: {$gt: -1}, y: 1}).sort({x: -1}).explain();
}
diff --git a/jstests/noPassthroughWithMongod/explain3.js b/jstests/noPassthroughWithMongod/explain3.js
index 6d35949d273d8..81e87c9e184da 100644
--- a/jstests/noPassthroughWithMongod/explain3.js
+++ b/jstests/noPassthroughWithMongod/explain3.js
@@ -1,10 +1,10 @@
// SERVER-2810 - similar to explain1 test, but with a scan and order find
-t = db.jstests_slowNightly_explain3;
+let t = db.jstests_slowNightly_explain3;
t.drop();
// Periodically drops the collection, invalidating client cursors for s2's operations.
-s1 = startParallelShell(function() {
+let s1 = startParallelShell(function() {
t = db.jstests_slowNightly_explain3;
for (var i = 0; i < 80; ++i) {
t.drop();
@@ -17,18 +17,18 @@ s1 = startParallelShell(function() {
});
// Query repeatedly.
-s2 = startParallelShell(function() {
+let s2 = startParallelShell(function() {
t = db.jstests_slowNightly_explain3;
for (var i = 0; i < 500; ++i) {
try {
- z = t.find({x: {$gt: 0}, y: 1}).sort({x: 1}).explain();
+ let z = t.find({x: {$gt: 0}, y: 1}).sort({x: 1}).explain();
} catch (e) {
}
}
});
// Put pressure on s2 to yield more often.
-s3 = startParallelShell(function() {
+let s3 = startParallelShell(function() {
t = db.jstests_slowNightly_explain3;
for (var i = 0; i < 200; ++i) {
t.validate({scandata: true});
diff --git a/jstests/noPassthroughWithMongod/external_sort_text_agg.js b/jstests/noPassthroughWithMongod/external_sort_text_agg.js
index 583aff8601bd0..1d11dc7219704 100644
--- a/jstests/noPassthroughWithMongod/external_sort_text_agg.js
+++ b/jstests/noPassthroughWithMongod/external_sort_text_agg.js
@@ -2,7 +2,7 @@
var t = db.external_sort_text_agg;
t.drop();
t.createIndex({text: "text"});
-for (i = 0; i < 100; i++) {
+for (let i = 0; i < 100; i++) {
t.insert({_id: i, text: Array(210000).join("asdf ")});
// string over 1MB to hit the 100MB threshold for external sort
}
diff --git a/jstests/noPassthroughWithMongod/findAndModify_upsert_no_conflicts.js b/jstests/noPassthroughWithMongod/findAndModify_upsert_no_conflicts.js
index 4c9e83705583c..fdd202f506580 100644
--- a/jstests/noPassthroughWithMongod/findAndModify_upsert_no_conflicts.js
+++ b/jstests/noPassthroughWithMongod/findAndModify_upsert_no_conflicts.js
@@ -47,4 +47,4 @@ assert.eq(testDB[collName].find(updateDoc).toArray().length, 1);
// Interrupt the sleep command.
assert.commandWorked(testDB.getSiblingDB("admin").killOp(sleepID));
sleepCommand();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthroughWithMongod/geo_axis_aligned.js b/jstests/noPassthroughWithMongod/geo_axis_aligned.js
index 877954b0f2aa5..f7d92cd5ea71e 100644
--- a/jstests/noPassthroughWithMongod/geo_axis_aligned.js
+++ b/jstests/noPassthroughWithMongod/geo_axis_aligned.js
@@ -1,21 +1,21 @@
// Axis aligned circles - hard-to-find precision errors possible with exact distances here
-t = db.axisaligned;
+let t = db.axisaligned;
t.drop();
-scale = [1, 10, 1000, 10000];
-bits = [2, 3, 4, 5, 6, 7, 8, 9];
-radius = [0.0001, 0.001, 0.01, 0.1];
-center = [[5, 52], [6, 53], [7, 54], [8, 55], [9, 56]];
+let scale = [1, 10, 1000, 10000];
+let bits = [2, 3, 4, 5, 6, 7, 8, 9];
+let radius = [0.0001, 0.001, 0.01, 0.1];
+let center = [[5, 52], [6, 53], [7, 54], [8, 55], [9, 56]];
-bound = [];
+let bound = [];
for (var j = 0; j < center.length; j++)
bound.push([-180, 180]);
// Scale all our values to test different sizes
-radii = [];
-centers = [];
-bounds = [];
+let radii = [];
+let centers = [];
+let bounds = [];
for (var s = 0; s < scale.length; s++) {
for (var i = 0; i < radius.length; i++) {
@@ -70,13 +70,13 @@ for (var b = 0; b < bits.length; b++) {
continue;
print("DOING WITHIN QUERY ");
- r = t.find({"loc": {"$within": {"$center": [center[j], radius[i]]}}});
+ let r = t.find({"loc": {"$within": {"$center": [center[j], radius[i]]}}});
assert.eq(5, r.count());
// FIXME: surely code like this belongs in utils.js.
- a = r.toArray();
- x = [];
+ let a = r.toArray();
+ let x = [];
for (k in a)
x.push(a[k]["_id"]);
x.sort();
diff --git a/jstests/noPassthroughWithMongod/geo_near_random2.js b/jstests/noPassthroughWithMongod/geo_near_random2.js
index 2fafb7d4c80e4..81be89c1e103c 100644
--- a/jstests/noPassthroughWithMongod/geo_near_random2.js
+++ b/jstests/noPassthroughWithMongod/geo_near_random2.js
@@ -5,10 +5,7 @@ var test = new GeoNearRandomTest("nightly.geo_near_random2");
test.insertPts(10000);
-opts = {
- sphere: 0,
- nToTest: test.nPts * 0.01
-};
+let opts = {sphere: 0, nToTest: test.nPts * 0.01};
test.testPt([0, 0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js
index 28110591d64fa..2b67642daabbd 100644
--- a/jstests/noPassthroughWithMongod/geo_polygon.js
+++ b/jstests/noPassthroughWithMongod/geo_polygon.js
@@ -2,14 +2,14 @@
// @tags: [SERVER-40561]
//
-t = db.geo_polygon4;
+let t = db.geo_polygon4;
t.drop();
-num = 0;
+let num = 0;
var bulk = t.initializeUnorderedBulkOp();
-for (x = -180; x < 180; x += .5) {
- for (y = -180; y < 180; y += .5) {
- o = {_id: num++, loc: [x, y]};
+for (let x = -180; x < 180; x += .5) {
+ for (let y = -180; y < 180; y += .5) {
+ let o = {_id: num++, loc: [x, y]};
bulk.insert(o);
}
}
diff --git a/jstests/noPassthroughWithMongod/group_pushdown.js b/jstests/noPassthroughWithMongod/group_pushdown.js
index 060e2f47fb129..e8584aa2f7f9b 100644
--- a/jstests/noPassthroughWithMongod/group_pushdown.js
+++ b/jstests/noPassthroughWithMongod/group_pushdown.js
@@ -1,15 +1,12 @@
/**
* Tests basic functionality of pushing $group into the find layer.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getAggPlanStage, getAggPlanStages} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is not enabled");
- return;
+ quit();
}
// Ensure group pushdown is enabled and capture the original value of
@@ -748,4 +745,3 @@ assert.commandWorked(db.adminCommand(
coll.aggregate([{$group: {_id: "$$REMOVE", o: {$first: "$non_existent_field"}}}]).toArray(),
[{_id: null, o: null}]);
})();
-})();
diff --git a/jstests/noPassthroughWithMongod/huge_multikey_index.js b/jstests/noPassthroughWithMongod/huge_multikey_index.js
index f7b703f0d9edb..d4d3e03acd4d4 100644
--- a/jstests/noPassthroughWithMongod/huge_multikey_index.js
+++ b/jstests/noPassthroughWithMongod/huge_multikey_index.js
@@ -1,11 +1,11 @@
// https://jira.mongodb.org/browse/SERVER-4534
// Building an index in the foreground on a field with a large array and few documents in
// the collection used to open too many files and crash the server.
-t = db.huge_multikey_index;
+let t = db.huge_multikey_index;
t.drop();
function doit() {
- arr = [];
+ let arr = [];
for (var i = 0; i < 1000 * 1000; i++)
arr.push(i);
diff --git a/jstests/noPassthroughWithMongod/index_bounds_static_limit.js b/jstests/noPassthroughWithMongod/index_bounds_static_limit.js
index 616ddcf2a9377..2b2a55ec06120 100644
--- a/jstests/noPassthroughWithMongod/index_bounds_static_limit.js
+++ b/jstests/noPassthroughWithMongod/index_bounds_static_limit.js
@@ -3,15 +3,12 @@
//
// We issue 'setParameter' command which is not compatible with stepdowns.
// @tags: [does_not_support_stepdowns]
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js"); // For explain helpers.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getPlanStage, getPlanStages} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
if (!checkSBEEnabled(db)) {
jsTest.log("Skipping test because SBE is not enabled");
- return;
+ quit();
}
const coll = db.index_bounds_static_limit;
@@ -69,4 +66,3 @@ try {
} finally {
setStaticLimit(staticLimit);
}
-})();
diff --git a/jstests/noPassthroughWithMongod/index_check10.js b/jstests/noPassthroughWithMongod/index_check10.js
index f355ace6bb86f..4644517980bf4 100644
--- a/jstests/noPassthroughWithMongod/index_check10.js
+++ b/jstests/noPassthroughWithMongod/index_check10.js
@@ -3,7 +3,7 @@
Random.setRandomSeed();
-t = db.test_index_check10;
+let t = db.test_index_check10;
function doIt() {
t.drop();
@@ -17,7 +17,7 @@ function doIt() {
}
var fields = ['a', 'b', 'c', 'd', 'e'];
- n = Random.randInt(5) + 1;
+ let n = Random.randInt(5) + 1;
var idx = sort();
var chars = "abcdefghijklmnopqrstuvwxyz";
@@ -32,7 +32,7 @@ function doIt() {
function r() {
var len = Random.randInt(700 / n);
- buf = "";
+ let buf = "";
for (var i = 0; i < len; ++i) {
buf += chars.charAt(Random.randInt(chars.length));
}
@@ -73,9 +73,9 @@ function doIt() {
}
}
s = sort();
- c1 = t.find(spec, {_id: null}).sort(s).hint(idx).toArray();
+ let c1 = t.find(spec, {_id: null}).sort(s).hint(idx).toArray();
try {
- c3 = t.find(spec, {_id: null}).sort(s).hint({$natural: 1}).toArray();
+ var c3 = t.find(spec, {_id: null}).sort(s).hint({$natural: 1}).toArray();
} catch (e) {
// may assert if too much data for in memory sort
print("retrying check...");
diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js
index 3e082cb1a3f98..6adad9b2b0909 100644
--- a/jstests/noPassthroughWithMongod/index_check9.js
+++ b/jstests/noPassthroughWithMongod/index_check9.js
@@ -2,7 +2,7 @@
Random.setRandomSeed();
-t = db.test_index_check9;
+let t = db.test_index_check9;
function doIt() {
t.drop();
@@ -16,7 +16,7 @@ function doIt() {
}
var fields = ['a', 'b', 'c', 'd', 'e'];
- n = Random.randInt(5) + 1;
+ let n = Random.randInt(5) + 1;
var idx = sort();
var chars = "abcdefghijklmnopqrstuvwxyz";
@@ -40,7 +40,7 @@ function doIt() {
return Random.randInt(10);
} else {
var len = Random.randInt(10);
- buf = "";
+ let buf = "";
for (var i = 0; i < len; ++i) {
buf += chars.charAt(Random.randInt(chars.length));
}
@@ -95,9 +95,9 @@ function doIt() {
}
}
s = sort();
- c1 = t.find(spec, {_id: null}).sort(s).hint(idx).toArray();
- c2 = t.find(spec, {_id: null}).sort(s).hint({$natural: 1}).toArray();
- count = t.count(spec);
+ let c1 = t.find(spec, {_id: null}).sort(s).hint(idx).toArray();
+ let c2 = t.find(spec, {_id: null}).sort(s).hint({$natural: 1}).toArray();
+ let count = t.count(spec);
assert.eq(c1, c2);
assert.eq(c2.length, count);
}
diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
index 9d9c9361f192a..d58a2b5bcbf56 100644
--- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js
+++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
@@ -75,7 +75,7 @@ for (var idx = 0; idx < dropAction.length; idx++) {
assert.commandWorked(bulk.execute());
jsTest.log("Starting background indexing for test of: " + JSON.stringify(dc));
- primaryDB.getCollection(collection).createIndex({i: 1}, {background: true});
+ primaryDB.getCollection(collection).createIndex({i: 1});
assert.eq(2, primaryDB.getCollection(collection).getIndexes().length);
// Wait for the secondary to get the index entry
diff --git a/jstests/noPassthroughWithMongod/logpath.js b/jstests/noPassthroughWithMongod/logpath.js
index bb39282871f13..6c333724b957f 100644
--- a/jstests/noPassthroughWithMongod/logpath.js
+++ b/jstests/noPassthroughWithMongod/logpath.js
@@ -23,7 +23,7 @@ assert(mkdir(testdir));
var cleanupFiles = function() {
var files = listFiles(logdir);
- for (f in files) {
+ for (let f in files) {
var name = files[f].name;
// mostly here for safety
@@ -38,7 +38,7 @@ var logCount = function(fpattern, prefix) {
var pat = RegExp(fpattern + (prefix ? "" : "$"));
var cnt = 0;
- for (f in files) {
+ for (let f in files) {
if (pat.test(files[f].name)) {
cnt++;
}
diff --git a/jstests/noPassthroughWithMongod/lookup_match_pushdown.js b/jstests/noPassthroughWithMongod/lookup_match_pushdown.js
index 9f9c163e98146..731c5e3bdbeea 100644
--- a/jstests/noPassthroughWithMongod/lookup_match_pushdown.js
+++ b/jstests/noPassthroughWithMongod/lookup_match_pushdown.js
@@ -1,11 +1,8 @@
/**
* Tests that the $match stage is pushed before $lookup stage.
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
-load('jstests/libs/analyze_plan.js'); // For getWinningPlan().
+import {getWinningPlan} from "jstests/libs/analyze_plan.js";
const coll = db.lookup_match_pushdown;
coll.drop();
@@ -76,5 +73,4 @@ const pipelineExprGt = [
{$unwind: "$a"},
{$match: {"a.z": 10, $expr: {$gt: ["$x", 5]}}}
];
-checkPipelineAndResults(pipelineExprGt, expectedPipeline, expectedResultsGt);
-}());
+checkPipelineAndResults(pipelineExprGt, expectedPipeline, expectedResultsGt);
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/lookup_with_limit.js b/jstests/noPassthroughWithMongod/lookup_with_limit.js
index ba5c3ae9529e9..08815adff0e16 100644
--- a/jstests/noPassthroughWithMongod/lookup_with_limit.js
+++ b/jstests/noPassthroughWithMongod/lookup_with_limit.js
@@ -1,15 +1,12 @@
/**
* Tests that the $limit stage is pushed before $lookup stages, except when there is an $unwind.
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js'); // For getWinningPlan().
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {flattenQueryPlanTree, getWinningPlan} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE $lookup is not enabled.");
- return;
+ quit();
}
const coll = db.lookup_with_limit;
@@ -88,4 +85,3 @@ pipeline = [
];
checkResults(pipeline, false, ["COLLSCAN", "EQ_LOOKUP", "$unwind", "$sort", "$limit"]);
checkResults(pipeline, true, ["COLLSCAN", "$lookup", "$sort"]);
-}());
diff --git a/jstests/noPassthroughWithMongod/ne_array_indexability.js b/jstests/noPassthroughWithMongod/ne_array_indexability.js
index e632e5fc1b6c1..dbfe6a2d27563 100644
--- a/jstests/noPassthroughWithMongod/ne_array_indexability.js
+++ b/jstests/noPassthroughWithMongod/ne_array_indexability.js
@@ -1,9 +1,8 @@
/**
* Test that $ne: [] queries are cached correctly. See SERVER-39764.
*/
-(function() {
-load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const coll = db.ne_array_indexability;
coll.drop();
@@ -48,4 +47,3 @@ runTest({'obj': {$ne: 'def'}}, {'obj': {$ne: [[1]]}});
assert.commandWorked(coll.runCommand('planCacheClear'));
runTest({'obj': {$nin: ['abc', 'def']}}, {'obj': {$nin: [[1], 'abc']}});
-})();
diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js
index 7f47f8db95d76..d793bab6c1cab 100644
--- a/jstests/noPassthroughWithMongod/no_balance_collection.js
+++ b/jstests/noPassthroughWithMongod/no_balance_collection.js
@@ -1,11 +1,7 @@
// Tests whether the noBalance flag disables balancing for collections
// @tags: [requires_sharding]
-(function() {
-"use strict";
-
load("jstests/sharding/libs/find_chunks_util.js");
-load("jstests/libs/feature_flag_util.js");
const st = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1}});
@@ -96,5 +92,4 @@ if (lastMigration == null) {
assert.eq(lastMigration.time, sh._lastMigration(collB).time);
}
-st.stop();
-}());
+st.stop();
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/now_variable.js b/jstests/noPassthroughWithMongod/now_variable.js
index c20558102fb9b..a220aa3218122 100644
--- a/jstests/noPassthroughWithMongod/now_variable.js
+++ b/jstests/noPassthroughWithMongod/now_variable.js
@@ -1,10 +1,7 @@
/**
* Tests for the $$NOW and $$CLUSTER_TIME system variable.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
const coll = db[jsTest.name()];
@@ -194,4 +191,3 @@ assert.eq(0, futureColl.find({$expr: {$lt: ["$timeField", "$$NOW"]}}).itcount())
assert.soon(() => {
return futureColl.find({$expr: {$lt: ["$timeField", "$$NOW"]}}).itcount() == 1;
}, "$$NOW should catch up after 3 seconds");
-}());
diff --git a/jstests/noPassthroughWithMongod/or_clustered_collection_sbe_cache.js b/jstests/noPassthroughWithMongod/or_clustered_collection_sbe_cache.js
new file mode 100644
index 0000000000000..32985d6006256
--- /dev/null
+++ b/jstests/noPassthroughWithMongod/or_clustered_collection_sbe_cache.js
@@ -0,0 +1,123 @@
+/**
+ * Verifies that $or queries on clustered collections that have plans with IXSCAN and
+ * CLUSTERED_IXSCAN stages does not use the SBE plan cache.
+ */
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+
+const mongod = MongoRunner.runMongod();
+const dbName = "test";
+const db = mongod.getDB(dbName);
+const coll = db.or_use_clustered_collection;
+assertDropCollection(db, coll.getName());
+
+// Create a clustered collection and create indexes.
+assert.commandWorked(
+ db.createCollection(coll.getName(), {clusteredIndex: {key: {_id: 1}, unique: true}}));
+assert.commandWorked(coll.createIndex({a: 1}));
+
+// Insert documents, and store them to be used later in the test.
+const docs = [];
+const numDocs = 10;
+for (let i = 0; i < numDocs; i++) {
+ docs.push({a: i, _id: i, noIndex: i});
+}
+assert.commandWorked(coll.insertMany(docs));
+
+function assertCorrectResults({query, expectedDocIds}) {
+ let results = query.toArray();
+ let expectedResults = [];
+ expectedDocIds.forEach(id => expectedResults.push(docs[id]));
+ assert.sameMembers(results, expectedResults);
+}
+
+function validatePlanCacheEntries({increment, query, expectedDocIds}) {
+ const oldSize = coll.getPlanCache().list().length;
+ assertCorrectResults({query: query, expectedDocIds: expectedDocIds});
+ const newSize = coll.getPlanCache().list().length;
+ assert.eq(oldSize + increment,
+ newSize,
+ "Expected " + tojson(increment) +
+ " new entries in the cache, but got: " + tojson(coll.getPlanCache().list()));
+}
+
+coll.getPlanCache().clear();
+// Validate queries with a single equality clustered collection scan.
+validatePlanCacheEntries(
+ {increment: 0, query: coll.find({$or: [{_id: 123}, {a: 12}]}), expectedDocIds: []});
+validatePlanCacheEntries(
+ {increment: 0, query: coll.find({$or: [{_id: 6}, {a: 5}]}), expectedDocIds: [5, 6]});
+
+// Validate queries with multiple equality clustered collection scans.
+validatePlanCacheEntries(
+ {increment: 0, query: coll.find({$or: [{_id: 100}, {_id: 123}, {a: 11}]}), expectedDocIds: []});
+validatePlanCacheEntries({
+ increment: 0,
+ query: coll.find({$or: [{_id: 9}, {_id: 5}, {a: 4}]}),
+ expectedDocIds: [4, 5, 9]
+});
+
+// Validate queries with multiple range clustered collection scans.
+validatePlanCacheEntries({
+ increment: 0,
+ query: coll.find({$or: [{_id: {$lt: -1}}, {_id: {$gt: 10}}, {a: 12}]}),
+ expectedDocIds: []
+});
+validatePlanCacheEntries({
+ increment: 0,
+ query: coll.find({$or: [{_id: {$lt: 1}}, {_id: {$gt: 8}}, {a: 4}]}),
+ expectedDocIds: [0, 4, 9]
+});
+
+// Validate queries with both range and equality clustered collection scans.
+validatePlanCacheEntries({
+ increment: 0,
+ query: coll.find({$or: [{_id: {$lt: -1}}, {_id: 11}, {a: 12}]}),
+ expectedDocIds: []
+});
+validatePlanCacheEntries({
+ increment: 0,
+ query: coll.find({$or: [{_id: {$lt: 2}}, {_id: 8}, {a: 4}]}),
+ expectedDocIds: [0, 1, 4, 8]
+});
+
+// Validate queries with 'max' and 'min' set have the correct results. These plans fall back to
+// collection scans by the query planner for clustered collections.
+validatePlanCacheEntries({
+ increment: 0,
+ query: coll.find({$or: [{_id: 123}, {a: 12}]}).max({_id: 4}).hint({_id: 1}),
+ expectedDocIds: []
+});
+validatePlanCacheEntries({
+ increment: 0,
+ query: coll.find({$or: [{_id: 6}, {a: 5}]}).max({_id: 6}).hint({_id: 1}),
+ expectedDocIds: [5]
+});
+
+validatePlanCacheEntries({
+ increment: 0,
+ query: coll.find({$or: [{_id: 8}, {a: 5}]}).min({_id: 6}).hint({_id: 1}),
+ expectedDocIds: [8]
+});
+validatePlanCacheEntries({
+ increment: 0,
+ query: coll.find({$or: [{_id: 123}, {a: 12}]}).min({_id: 4}).hint({_id: 1}),
+ expectedDocIds: []
+});
+
+// Validate queries that just use a collection scan still get cached. We are checking the SBE cache,
+// and don't expect it to increment for classic.
+const incrementCache = checkSBEEnabled(db) ? 1 : 0;
+validatePlanCacheEntries({
+ increment: incrementCache,
+ query: coll.find({_id: {$gte: 4}}),
+ expectedDocIds: [4, 5, 6, 7, 8, 9]
+});
+
+validatePlanCacheEntries({
+ increment: incrementCache,
+ query: coll.find({$and: [{_id: {$gte: 4}}, {noIndex: 6}]}),
+ expectedDocIds: [6]
+});
+
+MongoRunner.stopMongod(mongod);
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js b/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js
index 4406843eb8396..ebcbecca06015 100644
--- a/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js
+++ b/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js
@@ -2,10 +2,7 @@
* Tests that a $not-$in-$regex query, which cannot be supported by an index, cannot incorrectly
* hijack the cached plan for an earlier $not-$in query.
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js'); // For isCollScan and getPlanCacheKeyFromShape.
+import {getPlanCacheKeyFromShape, getWinningPlan, isCollscan} from "jstests/libs/analyze_plan.js";
const coll = db.plan_cache_not_in_regex;
coll.drop();
@@ -53,5 +50,4 @@ for (let [proj, sort] of [[{}, {}], [{_id: 0, a: 1}, {}], [{_id: 0, a: 1}, {a: 1
// Flush the plan cache before the next iteration.
coll.getPlanCache().clear();
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/plan_cache_replanning.js b/jstests/noPassthroughWithMongod/plan_cache_replanning.js
index d293413f0fc8b..430cdb89c7968 100644
--- a/jstests/noPassthroughWithMongod/plan_cache_replanning.js
+++ b/jstests/noPassthroughWithMongod/plan_cache_replanning.js
@@ -3,12 +3,9 @@
* oscillates. It achieves this by creating two indexes, A and B, on a collection, and interleaving
* queries which are "ideal" for index A with queries that are "ideal" for index B.
*/
-(function() {
-"use strict";
-
-load('jstests/libs/analyze_plan.js'); // For getPlanStage().
+import {getCachedPlan, getPlanCacheKeyFromShape, getPlanStage} from "jstests/libs/analyze_plan.js";
load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
const isSbeEnabled = checkSBEEnabled(db);
@@ -245,4 +242,3 @@ coll = assertDropAndRecreateCollection(db, "plan_cache_replanning");
entryAfterRunningSpecialQuery);
}
}
-})();
diff --git a/jstests/noPassthroughWithMongod/plan_selection_no_results.js b/jstests/noPassthroughWithMongod/plan_selection_no_results.js
index 6e9b0f56de61e..f21354285db92 100644
--- a/jstests/noPassthroughWithMongod/plan_selection_no_results.js
+++ b/jstests/noPassthroughWithMongod/plan_selection_no_results.js
@@ -4,9 +4,7 @@
* The plan which is able to most cheaply determine that there are no results should be selected as
* the winner.
*/
-(function() {
-"use strict";
-load("jstests/libs/analyze_plan.js");
+import {getPlanStage} from "jstests/libs/analyze_plan.js";
const coll = db.plan_selection_no_results;
coll.drop();
@@ -39,5 +37,4 @@ assert.eq(ixScan.keyPattern, {y: 1}, explain);
// Check that there's two rejected plans (one IX intersect plan and one plan which scans the
// {x: 1} index).
-assert.eq(explain.queryPlanner.rejectedPlans.length, 2, explain);
-})();
+assert.eq(explain.queryPlanner.rejectedPlans.length, 2, explain);
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/projection_optimizations.js b/jstests/noPassthroughWithMongod/projection_optimizations.js
index 8658d6f226043..831f5a4be64d4 100644
--- a/jstests/noPassthroughWithMongod/projection_optimizations.js
+++ b/jstests/noPassthroughWithMongod/projection_optimizations.js
@@ -1,11 +1,8 @@
/**
* Test projections with $and in cases where optimizations could be performed.
*/
-(function() {
-"use strict";
-
load("jstests/aggregation/extras/utils.js");
-load('jstests/libs/analyze_plan.js');
+import {getWinningPlan, isIndexOnly, isCollscan} from "jstests/libs/analyze_plan.js";
const coll = db.projection_and;
coll.drop();
@@ -25,9 +22,9 @@ let result = runFindWithProjection({
expected: [{a: 1, b: false}]
});
// Query should be optimized and covered.
-assert(isIndexOnly(db, getWinningPlan(result.explain().queryPlanner)));
+const winningPlan = getWinningPlan(result.explain().queryPlanner);
+assert(isIndexOnly(db, winningPlan), winningPlan);
result = runFindWithProjection(
{projection: {a: {$and: ['$a', true, 1]}}, expected: [{_id: 0, a: true}]});
-assert(isCollscan(db, getWinningPlan(result.explain().queryPlanner)));
-})();
+assert(isCollscan(db, getWinningPlan(result.explain().queryPlanner)));
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/query_stats_configuration.js b/jstests/noPassthroughWithMongod/query_stats_configuration.js
new file mode 100644
index 0000000000000..bc5ee4647ce76
--- /dev/null
+++ b/jstests/noPassthroughWithMongod/query_stats_configuration.js
@@ -0,0 +1,29 @@
+/**
+ * Tests that the telemetry store can be resized if it is configured, and cannot be resized if it is
+ * disabled.
+ */
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
+if (FeatureFlagUtil.isEnabled(db, "QueryStats")) {
+ function testTelemetrySetting(paramName, paramValue) {
+ // The feature flag is enabled - make sure the telemetry store can be configured.
+ const original = assert.commandWorked(db.adminCommand({getParameter: 1, [paramName]: 1}));
+ assert(original.hasOwnProperty(paramName), original);
+ const originalValue = original[paramName];
+ try {
+ assert.doesNotThrow(() => db.adminCommand({setParameter: 1, [paramName]: paramValue}));
+ // Other tests verify that changing the parameter actually affects the behavior.
+ } finally {
+ assert.doesNotThrow(() =>
+ db.adminCommand({setParameter: 1, [paramName]: originalValue}));
+ }
+ }
+ testTelemetrySetting("internalQueryStatsCacheSize", "2MB");
+ testTelemetrySetting("internalQueryStatsRateLimit", 2147483647);
+} else {
+ // The feature flag is disabled - make sure the telemetry store *cannot* be configured.
+ assert.commandFailedWithCode(
+ db.adminCommand({setParameter: 1, internalQueryStatsCacheSize: '2MB'}), 7373500);
+ assert.commandFailedWithCode(
+ db.adminCommand({setParameter: 1, internalQueryStatsRateLimit: 2147483647}), 7506200);
+}
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js b/jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js
index 9804c41cea4e6..19aceb64ddd86 100644
--- a/jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js
+++ b/jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js
@@ -2,12 +2,17 @@
* Tests that randomly generated documents can be queried from timeseries collections in the same
* manner as a tradional collection.
*/
-(function() {
-"use strict";
+import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js";
load('jstests/third_party/fast_check/fc-3.1.0.js'); // For fast-check (fc).
-const scalars = [fc.string(), fc.double(), fc.boolean(), fc.date(), fc.constant(null)];
+// TODO SERVER-67506: Re-enable this test when a decision is made about how Bonsai will handle
+// comparison to null. Other semantic difference tickets are also relevant here.
+let scalars = [fc.string(), fc.double(), fc.boolean(), fc.date()];
+if (!checkCascadesOptimizerEnabled(db)) {
+ scalars.push(fc.constant(null));
+}
+
const pathComponents = fc.constant("a", "b");
// Define our grammar for documents.
let documentModel = fc.letrec(
@@ -61,7 +66,7 @@ let testMixedTypeQuerying = () => {
// Query on pathArray w/ {[compare]: val} on test and control.
// Compare the results.
try {
- assert.docEq(
+ assert.sameMembers(
// Isn't timeseries.
db.control.find({[path]: {[compare]: val}}, {_id: 0}).toArray(),
// Is timeseries.
@@ -75,5 +80,4 @@ let testMixedTypeQuerying = () => {
}));
}; // testMixedTypeQuerying
-testMixedTypeQuerying();
-})();
+testMixedTypeQuerying();
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/reconfigwt.js b/jstests/noPassthroughWithMongod/reconfigwt.js
index ed1070b7e84c9..29a668a56f55b 100644
--- a/jstests/noPassthroughWithMongod/reconfigwt.js
+++ b/jstests/noPassthroughWithMongod/reconfigwt.js
@@ -14,7 +14,7 @@ if (ss.storageEngine.name !== "wiredTiger") {
var admin = conn.getDB("admin");
function reconfigure(str) {
- ret = admin.runCommand({setParameter: 1, "wiredTigerEngineRuntimeConfig": str});
+ let ret = admin.runCommand({setParameter: 1, "wiredTigerEngineRuntimeConfig": str});
print("ret: " + tojson(ret));
return ret;
}
diff --git a/jstests/noPassthroughWithMongod/recstore.js b/jstests/noPassthroughWithMongod/recstore.js
index cae767b063790..9b4e0ee1e6a61 100644
--- a/jstests/noPassthroughWithMongod/recstore.js
+++ b/jstests/noPassthroughWithMongod/recstore.js
@@ -3,7 +3,7 @@
// it is probably redundant with other tests but is a convenient starting point
// for testing such things.
-t = db.storetest;
+let t = db.storetest;
t.drop();
diff --git a/jstests/noPassthroughWithMongod/reindex_duplicate_keys.js b/jstests/noPassthroughWithMongod/reindex_duplicate_keys.js
index aa4a363a72a91..58370576a14c2 100644
--- a/jstests/noPassthroughWithMongod/reindex_duplicate_keys.js
+++ b/jstests/noPassthroughWithMongod/reindex_duplicate_keys.js
@@ -43,4 +43,4 @@ let runTest = function(doc) {
runTest();
runTest({a: 1});
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthroughWithMongod/remove9.js b/jstests/noPassthroughWithMongod/remove9.js
index ba5fd2207951e..87492ce930341 100644
--- a/jstests/noPassthroughWithMongod/remove9.js
+++ b/jstests/noPassthroughWithMongod/remove9.js
@@ -1,9 +1,9 @@
-t = db.jstests_remove9;
+let t = db.jstests_remove9;
t.drop();
-js =
+let js =
"while( 1 ) { for( i = 0; i < 10000; ++i ) { db.jstests_remove9.save( {i:i} ); } db.jstests_remove9.remove( {i: {$gte:0} } ); }";
-pid = startMongoProgramNoConnect("mongo", "--eval", js, db ? db.getMongo().host : null);
+let pid = startMongoProgramNoConnect("mongo", "--eval", js, db ? db.getMongo().host : null);
Random.setRandomSeed();
for (var i = 0; i < 10000; ++i) {
diff --git a/jstests/noPassthroughWithMongod/replReads.js b/jstests/noPassthroughWithMongod/replReads.js
index 878e25d094059..d093d6551e58b 100644
--- a/jstests/noPassthroughWithMongod/replReads.js
+++ b/jstests/noPassthroughWithMongod/replReads.js
@@ -12,8 +12,8 @@ function testReadLoadBalancing(numReplicas) {
s.getDB("test").foo.insert({a: 123});
- primary = s.rs0.getPrimary();
- secondaries = s.rs0.getSecondaries();
+ let primary = s.rs0.getPrimary();
+ let secondaries = s.rs0.getSecondaries();
function rsStats() {
return s.getDB("admin").runCommand("connPoolStats")["replicaSets"][s.rs0.name];
@@ -51,7 +51,7 @@ function testReadLoadBalancing(numReplicas) {
var connections = [];
for (var i = 0; i < secondaries.length * 10; i++) {
- conn = new Mongo(s._mongos[0].host);
+ let conn = new Mongo(s._mongos[0].host);
conn.setSecondaryOk();
conn.getDB('test').foo.findOne();
connections.push(conn);
@@ -70,7 +70,7 @@ function testReadLoadBalancing(numReplicas) {
db = primary.getDB("test");
printjson(rs.status());
- c = rs.conf();
+ let c = rs.conf();
print("config before: " + tojson(c));
for (i = 0; i < c.members.length; i++) {
if (c.members[i].host == db.runCommand("hello").primary)
@@ -102,7 +102,7 @@ function testReadLoadBalancing(numReplicas) {
secondaries = s.rs0.getSecondaries();
for (var i = 0; i < secondaries.length * 10; i++) {
- conn = new Mongo(s._mongos[0].host);
+ let conn = new Mongo(s._mongos[0].host);
conn.setSecondaryOk();
conn.getDB('test').foo.findOne();
connections.push(conn);
diff --git a/jstests/noPassthroughWithMongod/sbe_agg_pushdown.js b/jstests/noPassthroughWithMongod/sbe_agg_pushdown.js
index 78f998ad4f786..78c238630c8a4 100644
--- a/jstests/noPassthroughWithMongod/sbe_agg_pushdown.js
+++ b/jstests/noPassthroughWithMongod/sbe_agg_pushdown.js
@@ -2,10 +2,7 @@
// the pushed down query with SBE.
// TODO: Remove this file when all agg expressions are supported by SBE.
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
// Storing the expression we assume is unsupported as a constant, so we can easily change it when we
// implement $toBool in SBE.
@@ -15,7 +12,7 @@ const kUnsupportedExpression = {
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is not enabled");
- return;
+ quit();
}
const coll = db.jstests_sbe_pushdown;
@@ -58,5 +55,4 @@ assertPushdownQueryExecMode([{$match: {a: 2}}, {$project: {_id: 0, c: {kUnsuppor
// Test query with fully supported expressions are executed with SBE when pushed down.
assertPushdownQueryExecMode(
- [{$match: {$expr: {$eq: ["$b", {$dateFromParts: {year: 2021, month: 4, day: 28}}]}}}], "2");
-}());
+ [{$match: {$expr: {$eq: ["$b", {$dateFromParts: {year: 2021, month: 4, day: 28}}]}}}], "2");
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/sbe_index_count_scan_cache.js b/jstests/noPassthroughWithMongod/sbe_index_count_scan_cache.js
new file mode 100644
index 0000000000000..2e56872988521
--- /dev/null
+++ b/jstests/noPassthroughWithMongod/sbe_index_count_scan_cache.js
@@ -0,0 +1,69 @@
+/**
+ * Tests the SBE plan cache for COUNT SCAN queries.
+ */
+import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js";
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
+
+const testDb = db.getSiblingDB(jsTestName());
+// This test is specifically verifying the behavior of the SBE plan cache.
+if (!checkSBEEnabled(testDb)) {
+ jsTestLog("Skipping test because SBE is not enabled");
+ quit();
+}
+
+assert.commandWorked(testDb.dropDatabase());
+
+const coll = testDb.coll;
+
+assert.commandWorked(coll.insert([
+ {a: 1},
+ {a: 1, b: 1},
+ {a: null, b: 2},
+ {b: 4},
+ {a: {b: 4}},
+ {a: [], b: 2},
+ {a: [[], 3]},
+ {a: {}},
+]));
+
+function assertCountScan(pipeline) {
+ const explain = coll.explain().aggregate(pipeline);
+ const queryPlan = getWinningPlan(explain.stages[0].$cursor.queryPlanner);
+ const countScan = getPlanStages(queryPlan, "COUNT_SCAN");
+ assert.neq([], countScan, explain);
+}
+
+function runTest({index, query, expectedCount, updatedQuery, updatedCount}) {
+ assert.commandWorked(coll.createIndex(index));
+ coll.getPlanCache().clear();
+ assert.eq(0, coll.getPlanCache().list().length);
+ const oldHits = testDb.serverStatus().metrics.query.planCache.sbe.hits;
+
+ const pipeline = [{$match: query}, {$count: "count"}];
+ assertCountScan(pipeline);
+
+ assert.eq(expectedCount, coll.aggregate(pipeline).toArray()[0].count);
+ assert.eq(expectedCount, coll.aggregate(pipeline).toArray()[0].count);
+ // Verify that the cache has 1 entry, and has been hit for one time.
+ assert.eq(1, coll.getPlanCache().list().length);
+ assert.eq(testDb.serverStatus().metrics.query.planCache.sbe.hits, oldHits + 1);
+ // Run again with a different value to test the parameterization.
+ pipeline[0].$match = updatedQuery;
+ assert.eq(updatedCount, coll.aggregate(pipeline).toArray()[0].count);
+ // Cache not get updated.
+ assert.eq(1, coll.getPlanCache().list().length);
+ // Hits stats is incremented.
+ assert.eq(testDb.serverStatus().metrics.query.planCache.sbe.hits, oldHits + 2);
+
+ assert.commandWorked(coll.dropIndex(index));
+}
+
+runTest({index: {a: 1}, query: {a: 1}, expectedCount: 2, updatedQuery: {a: 3}, updatedCount: 1});
+// Test for multiKey and null case.
+runTest({
+ index: {a: 1, b: 1, _id: 1},
+ query: {a: {$in: [null, []]}, b: 2},
+ expectedCount: 2,
+ updatedQuery: {a: {$in: [null, []]}, b: 4},
+ updatedCount: 1
+});
diff --git a/jstests/noPassthroughWithMongod/sbe_query_eligibility.js b/jstests/noPassthroughWithMongod/sbe_query_eligibility.js
index e229c8269a2e2..4e2d627bf3935 100644
--- a/jstests/noPassthroughWithMongod/sbe_query_eligibility.js
+++ b/jstests/noPassthroughWithMongod/sbe_query_eligibility.js
@@ -1,11 +1,7 @@
/**
* Test that verifies which query shapes which are eligible for SBE.
*/
-(function() {
-"use strict";
-
-load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'.
+import {checkSBEEnabled} from "jstests/libs/sbe_util.js";
/**
* Utility which asserts that when running the given 'query' over 'collection', explain's reported
@@ -28,7 +24,7 @@ function assertEngineUsed(collection, query, isSBE) {
if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is disabled");
- return;
+ quit();
}
const collName = "sbe_eligiblity";
@@ -216,5 +212,4 @@ const fallbackToClassicCases = [
for (const query of fallbackToClassicCases) {
assertEngineUsed(coll, query, false /* isSBE */);
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/tcmalloc.js b/jstests/noPassthroughWithMongod/tcmalloc.js
index e69850fc8e0d6..81b8aaec18676 100644
--- a/jstests/noPassthroughWithMongod/tcmalloc.js
+++ b/jstests/noPassthroughWithMongod/tcmalloc.js
@@ -20,4 +20,4 @@ if (hasTcSetParameter()) {
assert.commandFailed(db.adminCommand({setParameter: 1, tcmallocReleaseRate: -1.0}));
assert.commandFailed(db.adminCommand({setParameter: 1, tcmallocReleaseRate: "foo"}));
}
-}());
\ No newline at end of file
+}());
diff --git a/jstests/noPassthroughWithMongod/telemetry_configuration.js b/jstests/noPassthroughWithMongod/telemetry_configuration.js
deleted file mode 100644
index 0ae4e8408c34c..0000000000000
--- a/jstests/noPassthroughWithMongod/telemetry_configuration.js
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Tests that the telemetry store can be resized if it is configured, and cannot be resized if it is
- * disabled.
- */
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js");
-
-if (FeatureFlagUtil.isEnabled(db, "Telemetry")) {
- function testTelemetrySetting(paramName, paramValue) {
- // The feature flag is enabled - make sure the telemetry store can be configured.
- const original = assert.commandWorked(db.adminCommand({getParameter: 1, [paramName]: 1}));
- assert(original.hasOwnProperty(paramName), original);
- const originalValue = original[paramName];
- try {
- assert.doesNotThrow(() => db.adminCommand({setParameter: 1, [paramName]: paramValue}));
- // Other tests verify that changing the parameter actually affects the behavior.
- } finally {
- assert.doesNotThrow(() =>
- db.adminCommand({setParameter: 1, [paramName]: originalValue}));
- }
- }
- testTelemetrySetting("internalQueryConfigureTelemetryCacheSize", "2MB");
- testTelemetrySetting("internalQueryConfigureTelemetrySamplingRate", 2147483647);
-} else {
- // The feature flag is disabled - make sure the telemetry store *cannot* be configured.
- assert.commandFailedWithCode(
- db.adminCommand({setParameter: 1, internalQueryConfigureTelemetryCacheSize: '2MB'}),
- 7373500);
- assert.commandFailedWithCode(
- db.adminCommand({setParameter: 1, internalQueryConfigureTelemetrySamplingRate: 2147483647}),
- 7506200);
-}
-}());
diff --git a/jstests/noPassthroughWithMongod/temp_namespace.js b/jstests/noPassthroughWithMongod/temp_namespace.js
index 1f4438d5b967f..249cf4082b231 100644
--- a/jstests/noPassthroughWithMongod/temp_namespace.js
+++ b/jstests/noPassthroughWithMongod/temp_namespace.js
@@ -3,10 +3,10 @@
// This test requires persistence beacuase it assumes data will survive a restart.
// @tags: [requires_persistence, requires_replication]
-testname = 'temp_namespace_sw';
+let testname = 'temp_namespace_sw';
var conn = MongoRunner.runMongod();
-d = conn.getDB('test');
+let d = conn.getDB('test');
assert.commandWorked(d.runCommand({
applyOps: [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'temp1', temp: true}}]
}));
diff --git a/jstests/noPassthroughWithMongod/testing_only_commands.js b/jstests/noPassthroughWithMongod/testing_only_commands.js
index 07121f76770e4..fcadd4640eb15 100644
--- a/jstests/noPassthroughWithMongod/testing_only_commands.js
+++ b/jstests/noPassthroughWithMongod/testing_only_commands.js
@@ -49,7 +49,7 @@ const isBoundedSortEnabled = function(conn) {
TestData.enableTestCommands = false;
var conn = MongoRunner.runMongod({});
-for (i in testOnlyCommands) {
+for (let i in testOnlyCommands) {
assertCmdNotFound(conn.getDB('test'), testOnlyCommands[i]);
}
assert.eq(isBoundedSortEnabled(conn), false);
@@ -59,7 +59,7 @@ MongoRunner.stopMongod(conn);
TestData.enableTestCommands = true;
var conn = MongoRunner.runMongod({});
-for (i in testOnlyCommands) {
+for (let i in testOnlyCommands) {
assertCmdFound(conn.getDB('test'), testOnlyCommands[i]);
}
assert.eq(isBoundedSortEnabled(conn), true);
diff --git a/jstests/noPassthroughWithMongod/timeseries_system_views_drop.js b/jstests/noPassthroughWithMongod/timeseries_system_views_drop.js
index 8adf9ad391cc4..b67e2eb9ae541 100644
--- a/jstests/noPassthroughWithMongod/timeseries_system_views_drop.js
+++ b/jstests/noPassthroughWithMongod/timeseries_system_views_drop.js
@@ -8,10 +8,7 @@
* requires_getmore,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/core/timeseries/libs/timeseries.js");
+import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
const testDB = db.getSiblingDB("timeseries_system_views_drop");
@@ -38,5 +35,4 @@ TimeseriesTest.run((insert) => {
assert.commandWorked(testDB.createView("myView", coll.getName(), []));
assert(testDB.system.views.drop());
-});
-})();
+});
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/ttl1.js b/jstests/noPassthroughWithMongod/ttl1.js
index 94738104d73f3..a049ba3af8fec 100644
--- a/jstests/noPassthroughWithMongod/ttl1.js
+++ b/jstests/noPassthroughWithMongod/ttl1.js
@@ -15,7 +15,7 @@ t.drop();
t.runCommand("create", {flags: 0});
var now = (new Date()).getTime();
-for (i = 0; i < 24; i++) {
+for (let i = 0; i < 24; i++) {
var past = new Date(now - (3600 * 1000 * i));
t.insert({x: past, y: past, z: past});
}
diff --git a/jstests/noPassthroughWithMongod/ttl_index_capped_collection_fails.js b/jstests/noPassthroughWithMongod/ttl_index_capped_collection_fails.js
index 18ce357341fd1..7f25bad57a482 100644
--- a/jstests/noPassthroughWithMongod/ttl_index_capped_collection_fails.js
+++ b/jstests/noPassthroughWithMongod/ttl_index_capped_collection_fails.js
@@ -19,4 +19,4 @@ assert.commandWorked(db.createCollection(cappedColl.getName(), {capped: true, si
assert.commandWorked(cappedColl.createIndex({foo: 1}));
assert.commandFailedWithCode(cappedColl.createIndex({bar: 1}, {expireAfterSeconds: 10}),
ErrorCodes.CannotCreateIndex);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js
index 8d9121fd8a241..331fe09de5a5c 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl.js
@@ -30,9 +30,9 @@ primarycol.drop();
primarydb.createCollection(primarycol.getName());
// create new collection. insert 24 docs, aged at one-hour intervalss
-now = (new Date()).getTime();
+let now = (new Date()).getTime();
var bulk = primarycol.initializeUnorderedBulkOp();
-for (i = 0; i < 24; i++) {
+for (let i = 0; i < 24; i++) {
bulk.insert({x: new Date(now - (3600 * 1000 * i))});
}
assert.commandWorked(bulk.execute());
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
index 23bb9367452b3..79fcaf4befaed 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
@@ -22,7 +22,7 @@ var primeSystemReplset = function() {
var restartWithConfig = function() {
MongoRunner.stopMongod(conn);
conn = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: conn.dbpath});
- testDB = conn.getDB("test");
+ let testDB = conn.getDB("test");
var n = 100;
for (var i = 0; i < n; i++) {
testDB.foo.insert({x: new Date()});
diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js
index 61a97a8ce00d3..93c7df8ad2e41 100644
--- a/jstests/noPassthroughWithMongod/ttl_sharded.js
+++ b/jstests/noPassthroughWithMongod/ttl_sharded.js
@@ -14,7 +14,7 @@ var s = new ShardingTest({shards: 2, mongos: 1});
var dbname = "testDB";
var coll = "ttl_sharded";
var ns = dbname + "." + coll;
-t = s.getDB(dbname).getCollection(coll);
+let t = s.getDB(dbname).getCollection(coll);
// enable sharding of the collection. Only 1 chunk initially
s.adminCommand({enablesharding: dbname});
diff --git a/jstests/noPassthroughWithMongod/validate_bson_types.js b/jstests/noPassthroughWithMongod/validate_bson_types.js
index ffcdd80306d59..82894b66ea04d 100644
--- a/jstests/noPassthroughWithMongod/validate_bson_types.js
+++ b/jstests/noPassthroughWithMongod/validate_bson_types.js
@@ -30,4 +30,4 @@ assert.commandWorked(coll.insert({s: MinKey()}));
assert.commandWorked(coll.insert({t: MaxKey()})); // MaxKey
assert.commandWorked(coll.validate({checkBSONConformance: true}));
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthroughWithMongod/validate_command.js b/jstests/noPassthroughWithMongod/validate_command.js
index 3ef6a1d79e454..e01b087357be9 100644
--- a/jstests/noPassthroughWithMongod/validate_command.js
+++ b/jstests/noPassthroughWithMongod/validate_command.js
@@ -20,7 +20,7 @@ function testValidate(output) {
// Test to confirm that validate is working as expected.
// SETUP DATA
-t = db.jstests_validate;
+let t = db.jstests_validate;
t.drop();
for (var i = 0; i < count; i++) {
diff --git a/jstests/noPassthroughWithMongod/validate_timeseries_count.js b/jstests/noPassthroughWithMongod/validate_timeseries_count.js
index ed965ac9e9d56..9882ec5879912 100644
--- a/jstests/noPassthroughWithMongod/validate_timeseries_count.js
+++ b/jstests/noPassthroughWithMongod/validate_timeseries_count.js
@@ -7,8 +7,8 @@
* ]
*/
-(function() {
-"use strict";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
let testCount = 0;
const collNamePrefix = "validate_timeseries_count";
const bucketNamePrefix = "system.buckets.validate_timeseries_count";
@@ -42,24 +42,28 @@ assert.eq(res.nNonCompliantDocuments, 0);
assert.eq(res.warnings.length, 0);
// Manually changes the control.count of a version-2 (closed) bucket, expects warnings.
-jsTestLog("Manually changing the 'control.count' of a version-2 bucket.");
-testCount += 1;
-collName = collNamePrefix + testCount;
-bucketName = bucketNamePrefix + testCount;
-db.getCollection(collName).drop();
-assert.commandWorked(db.createCollection(
- collName, {timeseries: {timeField: "timestamp", metaField: "metadata", granularity: "hours"}}));
-coll = db.getCollection(collName);
-bucket = db.getCollection(bucketName);
-coll.insertMany([...Array(1002).keys()].map(i => ({
- "metadata": {"sensorId": 2, "type": "temperature"},
- "timestamp": ISODate(),
- "temp": i
- })),
- {ordered: false});
-bucket.updateOne({"meta.sensorId": 2, 'control.version': 2}, {"$set": {"control.count": 10}});
-res = bucket.validate();
-assert(res.valid, tojson(res));
-assert.eq(res.nNonCompliantDocuments, 1);
-assert.eq(res.warnings.length, 1);
-})();
+if (!FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) {
+ // TODO SERVER-77454: Investigate re-enabling this.
+ jsTestLog("Manually changing the 'control.count' of a version-2 bucket.");
+ testCount += 1;
+ collName = collNamePrefix + testCount;
+ bucketName = bucketNamePrefix + testCount;
+ db.getCollection(collName).drop();
+ assert.commandWorked(db.createCollection(
+ collName,
+ {timeseries: {timeField: "timestamp", metaField: "metadata", granularity: "hours"}}));
+ coll = db.getCollection(collName);
+ bucket = db.getCollection(bucketName);
+ coll.insertMany(
+ [...Array(1002).keys()].map(i => ({
+ "metadata": {"sensorId": 2, "type": "temperature"},
+ "timestamp": ISODate(),
+ "temp": i
+ })),
+ {ordered: false});
+ bucket.updateOne({"meta.sensorId": 2, 'control.version': 2}, {"$set": {"control.count": 10}});
+ res = bucket.validate();
+ assert(res.valid, tojson(res));
+ assert.eq(res.nNonCompliantDocuments, 1);
+ assert.eq(res.warnings.length, 1);
+}
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/validate_timeseries_data_indexes.js b/jstests/noPassthroughWithMongod/validate_timeseries_data_indexes.js
index 1136b8dcc5f9c..d53f28218e668 100644
--- a/jstests/noPassthroughWithMongod/validate_timeseries_data_indexes.js
+++ b/jstests/noPassthroughWithMongod/validate_timeseries_data_indexes.js
@@ -114,4 +114,4 @@ res = assert.commandWorked(coll.validate());
assert(res.valid, tojson(res));
assert(res.warnings.length == 1, tojson(res));
assert(res.nNonCompliantDocuments == 1, tojson(res));
-})();
\ No newline at end of file
+})();
diff --git a/jstests/noPassthroughWithMongod/validate_timeseries_minmax.js b/jstests/noPassthroughWithMongod/validate_timeseries_minmax.js
index d2587e5ea49cb..c7cf0dd85b62b 100644
--- a/jstests/noPassthroughWithMongod/validate_timeseries_minmax.js
+++ b/jstests/noPassthroughWithMongod/validate_timeseries_minmax.js
@@ -5,8 +5,7 @@
* @tags: [requires_fcv_62]
*/
-(function() {
-"use strict";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
const collPrefix = "validate_timeseries_minmax";
const bucketPrefix = "system.buckets.validate_timeseries_minmax";
@@ -257,35 +256,38 @@ assert(res.warnings.length == 1, tojson(res));
assert(res.nNonCompliantDocuments == 1, tojson(res));
// Tests collections with 'control.version' : 2.
-jsTestLog("Running validate on a version 2 bucket with incorrect 'max' object field.");
-setUpCollection(lotsOfData);
-coll = db.getCollection(collName);
-bucket = db.getCollection(bucketName);
-bucket.updateOne({"meta.sensorId": 2, "control.version": 2}, {"$set": {"control.max.temp": 800}});
-res = bucket.validate();
-assert(res.valid, tojson(res));
-assert.eq(res.nNonCompliantDocuments, 1);
-assert.eq(res.warnings.length, 1);
+if (!FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) {
+ // TODO SERVER-77454: Investigate re-enabling this.
+ jsTestLog("Running validate on a version 2 bucket with incorrect 'max' object field.");
+ setUpCollection(lotsOfData);
+ coll = db.getCollection(collName);
+ bucket = db.getCollection(bucketName);
+ bucket.updateOne({"meta.sensorId": 2, "control.version": 2},
+ {"$set": {"control.max.temp": 800}});
+ res = bucket.validate();
+ assert(res.valid, tojson(res));
+ assert.eq(res.nNonCompliantDocuments, 1);
+ assert.eq(res.warnings.length, 1);
-// "Checks no errors are thrown with a valid closed bucket."
-jsTestLog(
- "Running validate on a version 2 bucket with everything correct, checking that no warnings are found.");
-setUpCollection(lotsOfData);
-coll = db.getCollection(collName);
-bucket = db.getCollection(bucketName);
-res = bucket.validate();
-assert(res.valid, tojson(res));
-assert.eq(res.nNonCompliantDocuments, 0);
-assert.eq(res.warnings.length, 0);
+ // "Checks no errors are thrown with a valid closed bucket."
+ jsTestLog(
+ "Running validate on a version 2 bucket with everything correct, checking that no warnings are found.");
+ setUpCollection(lotsOfData);
+ coll = db.getCollection(collName);
+ bucket = db.getCollection(bucketName);
+ res = bucket.validate();
+ assert(res.valid, tojson(res));
+ assert.eq(res.nNonCompliantDocuments, 0);
+ assert.eq(res.warnings.length, 0);
-// "Checks no errors are thrown with a valid closed bucket with skipped data fields."
-jsTestLog(
- "Running validate on a correct version 2 bucket with skipped data fields, checking that no warnings are found.");
-setUpCollection(skipFieldData);
-coll = db.getCollection(collName);
-bucket = db.getCollection(bucketName);
-res = bucket.validate();
-assert(res.valid, tojson(res));
-assert.eq(res.nNonCompliantDocuments, 0);
-assert.eq(res.warnings.length, 0);
-})();
\ No newline at end of file
+ // "Checks no errors are thrown with a valid closed bucket with skipped data fields."
+ jsTestLog(
+ "Running validate on a correct version 2 bucket with skipped data fields, checking that no warnings are found.");
+ setUpCollection(skipFieldData);
+ coll = db.getCollection(collName);
+ bucket = db.getCollection(bucketName);
+ res = bucket.validate();
+ assert(res.valid, tojson(res));
+ assert.eq(res.nNonCompliantDocuments, 0);
+ assert.eq(res.warnings.length, 0);
+}
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/validate_timeseries_version.js b/jstests/noPassthroughWithMongod/validate_timeseries_version.js
index 2b4d9af7d03cf..56e30f052a47e 100644
--- a/jstests/noPassthroughWithMongod/validate_timeseries_version.js
+++ b/jstests/noPassthroughWithMongod/validate_timeseries_version.js
@@ -7,8 +7,8 @@
* ]
*/
-(function() {
-"use strict";
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
let testCount = 0;
const collNamePrefix = "validate_timeseries_version";
const bucketNamePrefix = "system.buckets.validate_timeseries_version";
@@ -66,27 +66,32 @@ assert.eq(res.warnings.length, 1);
// Inserts enough documents to close a bucket and then manually changes the version to 1.
// Expects warnings from validation.
-jsTestLog(
- "Changing the 'control.version' of a closed bucket from 2 to 1, and checking for warnings from validation.");
-testCount += 1;
-collName = collNamePrefix + testCount;
-bucketName = bucketNamePrefix + testCount;
-db.getCollection(collName).drop();
-assert.commandWorked(db.createCollection(
- collName, {timeseries: {timeField: "timestamp", metaField: "metadata", granularity: "hours"}}));
-coll = db.getCollection(collName);
-bucket = db.getCollection(bucketName);
-coll.insertMany([...Array(1200).keys()].map(i => ({
- "metadata": {"sensorId": 3, "type": "temperature"},
- "timestamp": ISODate(),
- "temp": i
- })),
- {ordered: false});
-bucket.updateOne({"meta.sensorId": 3, "control.version": 2}, {"$set": {"control.version": 1}});
-res = bucket.validate();
-assert(res.valid, tojson(res));
-assert.eq(res.nNonCompliantDocuments, 1);
-assert.eq(res.warnings.length, 1);
+if (!FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) {
+ // TODO SERVER-77454: Investigate re-enabling this.
+ jsTestLog(
+ "Changing the 'control.version' of a closed bucket from 2 to 1, and checking for warnings from validation.");
+ testCount += 1;
+ collName = collNamePrefix + testCount;
+ bucketName = bucketNamePrefix + testCount;
+ db.getCollection(collName).drop();
+ assert.commandWorked(db.createCollection(
+ collName,
+ {timeseries: {timeField: "timestamp", metaField: "metadata", granularity: "hours"}}));
+ coll = db.getCollection(collName);
+ bucket = db.getCollection(bucketName);
+ coll.insertMany(
+ [...Array(1200).keys()].map(i => ({
+ "metadata": {"sensorId": 3, "type": "temperature"},
+ "timestamp": ISODate(),
+ "temp": i
+ })),
+ {ordered: false});
+ bucket.updateOne({"meta.sensorId": 3, "control.version": 2}, {"$set": {"control.version": 1}});
+ res = bucket.validate();
+ assert(res.valid, tojson(res));
+ assert.eq(res.nNonCompliantDocuments, 1);
+ assert.eq(res.warnings.length, 1);
+}
// Returns warnings on a bucket with an unsupported version.
jsTestLog("Changing 'control.version' to an unsupported version and checking for warnings.");
@@ -104,7 +109,13 @@ coll.insertMany([...Array(1100).keys()].map(i => ({
"temp": i
})),
{ordered: false});
-bucket.updateOne({"meta.sensorId": 4, "control.version": 2}, {"$set": {"control.version": 500}});
+if (FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) {
+ bucket.updateOne({"meta.sensorId": 4, "control.version": 1},
+ {"$set": {"control.version": 500}});
+} else {
+ bucket.updateOne({"meta.sensorId": 4, "control.version": 2},
+ {"$set": {"control.version": 500}});
+}
res = bucket.validate();
assert(res.valid, tojson(res));
assert.eq(res.nNonCompliantDocuments, 1);
@@ -118,5 +129,4 @@ bucket.updateOne({"meta.sensorId": 4, "control.version": 1}, {"$set": {"control.
res = bucket.validate();
assert(res.valid, tojson(res));
assert.eq(res.nNonCompliantDocuments, 2);
-assert.eq(res.warnings.length, 1);
-})();
\ No newline at end of file
+assert.eq(res.warnings.length, 1);
\ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/views_invalid.js b/jstests/noPassthroughWithMongod/views_invalid.js
index 465c28822c490..4d56c92ad7816 100644
--- a/jstests/noPassthroughWithMongod/views_invalid.js
+++ b/jstests/noPassthroughWithMongod/views_invalid.js
@@ -23,7 +23,7 @@ assert.commandWorked(invalidDB.adminCommand(
{applyOps: [{op: "i", ns: dbname + ".system.views", o: {_id: "invalid", pipeline: 3.0}}]}));
// Make sure we logged an error message about the invalid view.
-assert(checkLog.checkContainsOnceJson(invalidDB, 7267300));
+assert(checkLog.checkContainsOnceJson(invalidDB, 20326));
// Check that view-related commands fail with an invalid view catalog, but other commands on
// existing collections still succeed.
diff --git a/jstests/ocsp/lib/mock_ocsp.js b/jstests/ocsp/lib/mock_ocsp.js
index faae43c3bdfc5..3246d1732bb37 100644
--- a/jstests/ocsp/lib/mock_ocsp.js
+++ b/jstests/ocsp/lib/mock_ocsp.js
@@ -145,4 +145,4 @@ class MockOCSPServer {
print("Mock OCSP Server stop complete");
}
-}
\ No newline at end of file
+}
diff --git a/jstests/ocsp/lib/ocsp_helpers.js b/jstests/ocsp/lib/ocsp_helpers.js
index 90ec04d35bae2..130892f04f6d1 100644
--- a/jstests/ocsp/lib/ocsp_helpers.js
+++ b/jstests/ocsp/lib/ocsp_helpers.js
@@ -103,4 +103,4 @@ var supportsStapling = function() {
return false;
}
return true;
-};
\ No newline at end of file
+};
diff --git a/jstests/ocsp/ocsp_basic_ca_responder.js b/jstests/ocsp/ocsp_basic_ca_responder.js
index e63ca98d499e7..49962ebfde643 100644
--- a/jstests/ocsp/ocsp_basic_ca_responder.js
+++ b/jstests/ocsp/ocsp_basic_ca_responder.js
@@ -56,4 +56,4 @@ test(OCSP_SERVER_SIGNED_BY_INTERMEDIATE_CA_PEM,
OCSP_INTERMEDIATE_CA_WITH_ROOT_PEM,
OCSP_INTERMEDIATE_RESPONDER);
test(OCSP_SERVER_AND_INTERMEDIATE_APPENDED_PEM, OCSP_CA_PEM, OCSP_INTERMEDIATE_RESPONDER);
-}());
\ No newline at end of file
+}());
diff --git a/jstests/ocsp/ocsp_client_verification_logging.js b/jstests/ocsp/ocsp_client_verification_logging.js
index cd883380e7e56..6e6423300b4a8 100644
--- a/jstests/ocsp/ocsp_client_verification_logging.js
+++ b/jstests/ocsp/ocsp_client_verification_logging.js
@@ -123,4 +123,4 @@ let runTest = (options) => {
runTest({connectionHealthLoggingOn: true});
runTest({connectionHealthLoggingOn: false});
runTest({ocspFaultType: FAULT_REVOKED, connectionHealthLoggingOn: true});
-}());
\ No newline at end of file
+}());
diff --git a/jstests/ocsp/ocsp_sharding_basic.js b/jstests/ocsp/ocsp_sharding_basic.js
index f8b6ed2523e6c..a196b200a5782 100644
--- a/jstests/ocsp/ocsp_sharding_basic.js
+++ b/jstests/ocsp/ocsp_sharding_basic.js
@@ -86,4 +86,4 @@ st.restartMongos(0);
mock_ocsp.stop();
st.stop();
-}());
\ No newline at end of file
+}());
diff --git a/jstests/parallel/allops.js b/jstests/parallel/allops.js
index b0d6e7188a375..d15fd09b0c89f 100644
--- a/jstests/parallel/allops.js
+++ b/jstests/parallel/allops.js
@@ -1,14 +1,14 @@
// test all operations in parallel
load('jstests/libs/parallelTester.js');
-f = db.jstests_parallel_allops;
+let f = db.jstests_parallel_allops;
f.drop();
Random.setRandomSeed();
-t = new ParallelTester();
+let t = new ParallelTester();
-for (id = 0; id < 10; ++id) {
+for (var id = 0; id < 10; ++id) {
var g = new EventGenerator(id, "jstests_parallel_allops", Random.randInt(20));
for (var j = 0; j < 1000; ++j) {
var op = Random.randInt(3);
diff --git a/jstests/parallel/checkMultiThread.js b/jstests/parallel/checkMultiThread.js
index 3f0bbe8d6e3b5..5dea82ac040ac 100644
--- a/jstests/parallel/checkMultiThread.js
+++ b/jstests/parallel/checkMultiThread.js
@@ -6,8 +6,8 @@ var func = function() {
db.runCommand({sleep: 1, seconds: 10000});
return new Date();
};
-a = new Thread(func);
-b = new Thread(func);
+let a = new Thread(func);
+let b = new Thread(func);
a.start();
b.start();
a.join();
diff --git a/jstests/parallel/del.js b/jstests/parallel/del.js
index 1a2c74db4a143..1b8e23990cb45 100644
--- a/jstests/parallel/del.js
+++ b/jstests/parallel/del.js
@@ -1,10 +1,10 @@
load('jstests/libs/parallelTester.js');
-N = 1000;
-HOST = db.getMongo().host;
+const N = 1000;
+const HOST = db.getMongo().host;
-a = db.getSiblingDB("fooa");
-b = db.getSiblingDB("foob");
+const a = db.getSiblingDB("fooa");
+const b = db.getSiblingDB("foob");
a.dropDatabase();
b.dropDatabase();
@@ -80,17 +80,17 @@ function del2(dbname, host, max, kCursorKilledErrorCodes) {
}
}
-all = [];
+const all = [];
all.push(fork(del1, "a", HOST, N, kCursorKilledErrorCodes));
all.push(fork(del2, "a", HOST, N, kCursorKilledErrorCodes));
all.push(fork(del1, "b", HOST, N, kCursorKilledErrorCodes));
all.push(fork(del2, "b", HOST, N, kCursorKilledErrorCodes));
-for (i = 0; i < all.length; i++)
+for (let i = 0; i < all.length; i++)
all[i].start();
-for (i = 0; i < 10; i++) {
+for (let i = 0; i < 10; i++) {
sleep(2000);
print("dropping");
a.dropDatabase();
@@ -100,6 +100,6 @@ for (i = 0; i < 10; i++) {
a.del_parallel.save({done: 1});
b.del_parallel.save({done: 1});
-for (i = 0; i < all.length; i++) {
+for (let i = 0; i < all.length; i++) {
assert.commandWorked(all[i].returnData());
}
diff --git a/jstests/parallel/insert.js b/jstests/parallel/insert.js
index d28eb89e2205b..d7fd72d4d6265 100644
--- a/jstests/parallel/insert.js
+++ b/jstests/parallel/insert.js
@@ -1,17 +1,17 @@
// perform inserts in parallel from several clients
load('jstests/libs/parallelTester.js');
-f = db.jstests_parallel_insert;
+let f = db.jstests_parallel_insert;
f.drop();
f.createIndex({who: 1});
Random.setRandomSeed();
-t = new ParallelTester();
+let t = new ParallelTester();
-for (id = 0; id < 10; ++id) {
+for (let id = 0; id < 10; ++id) {
var g = new EventGenerator(id, "jstests_parallel_insert", Random.randInt(20));
- for (j = 0; j < 1000; ++j) {
+ for (let j = 0; j < 1000; ++j) {
if (j % 50 == 0) {
g.addCheckCount(j, {who: id});
}
diff --git a/jstests/parallel/shellfork.js b/jstests/parallel/shellfork.js
index b7621279d08db..4646b8f7414f7 100644
--- a/jstests/parallel/shellfork.js
+++ b/jstests/parallel/shellfork.js
@@ -1,16 +1,16 @@
load('jstests/libs/parallelTester.js');
-a = fork(function(a, b) {
+let a = fork(function(a, b) {
return a / b;
}, 10, 2);
a.start();
-b = fork(function(a, b, c) {
+let b = fork(function(a, b, c) {
return a + b + c;
}, 18, " is a ", "multiple of 3");
-makeFunny = function(text) {
+let makeFunny = function(text) {
return text + " ha ha!";
};
-c = fork(makeFunny, "paisley");
+let c = fork(makeFunny, "paisley");
c.start();
b.start();
b.join();
@@ -18,7 +18,7 @@ assert.eq(5, a.returnData());
assert.eq("18 is a multiple of 3", b.returnData());
assert.eq("paisley ha ha!", c.returnData());
-z = fork(function(a) {
+let z = fork(function(a) {
load('jstests/libs/parallelTester.js');
var y = fork(function(a) {
return a + 1;
@@ -29,7 +29,7 @@ z = fork(function(a) {
z.start();
assert.eq(7, z.returnData());
-t = 1;
+let t = 1;
z = new Thread(function() {
assert(typeof (t) == "undefined", "t not undefined");
t = 5;
diff --git a/jstests/parallel/update_serializability1.js b/jstests/parallel/update_serializability1.js
index e57b51ae72154..5d91a28d084c0 100644
--- a/jstests/parallel/update_serializability1.js
+++ b/jstests/parallel/update_serializability1.js
@@ -1,10 +1,10 @@
-t = db.update_serializability1;
+let t = db.update_serializability1;
t.drop();
-N = 100000;
+let N = 100000;
-bulk = t.initializeUnorderedBulkOp();
+let bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < N; i++) {
bulk.insert({_id: i, a: i, b: N - i, x: 1, y: 1});
}
@@ -13,10 +13,11 @@ bulk.execute();
t.createIndex({a: 1});
t.createIndex({b: 1});
-s1 = startParallelShell("db.update_serializability1.update( { a : { $gte : 0 } }, { $set : { b : " +
- (N + 1) + ", x : 2 } }, false, true );");
-s2 = startParallelShell("db.update_serializability1.update( { b : { $lte : " + N +
- " } }, { $set : { a : -1, y : 2 } }, false, true );");
+let s1 = startParallelShell(
+ "db.update_serializability1.update( { a : { $gte : 0 } }, { $set : { b : " + (N + 1) +
+ ", x : 2 } }, false, true );");
+let s2 = startParallelShell("db.update_serializability1.update( { b : { $lte : " + N +
+ " } }, { $set : { a : -1, y : 2 } }, false, true );");
s1();
s2();
diff --git a/jstests/query_golden/array_index.js b/jstests/query_golden/array_index.js
index 0b3098b39254a..6177427c104c1 100644
--- a/jstests/query_golden/array_index.js
+++ b/jstests/query_golden/array_index.js
@@ -7,10 +7,8 @@
* requires_cqf,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For getPlanSkeleton.
+import {show} from "jstests/libs/golden_test.js";
+import {getPlanSkeleton} from "jstests/libs/optimizer_utils.js";
db.setLogLevel(4, "query");
@@ -38,4 +36,3 @@ function run(pipeline) {
run([{$match: {a: 2}}, {$unset: '_id'}]);
run([{$match: {a: {$lt: 2}}}, {$unset: '_id'}]);
-})();
diff --git a/jstests/query_golden/ce_accuracy.js b/jstests/query_golden/ce_accuracy.js
index 4d2d5261940bd..fd354aa70df97 100644
--- a/jstests/query_golden/ce_accuracy.js
+++ b/jstests/query_golden/ce_accuracy.js
@@ -5,10 +5,9 @@
* ]
*/
-(function() {
-
-load("jstests/query_golden/libs/ce_data.js");
-load("jstests/query_golden/libs/run_queries_ce.js");
+import {runHistogramsTest} from "jstests/libs/ce_stats_utils.js";
+import {getCEDocs, getCEDocs1} from "jstests/query_golden/libs/ce_data.js";
+import {runCETestForCollection} from "jstests/query_golden/libs/run_queries_ce.js";
runHistogramsTest(function() {
const coll = db.ce_data_20;
@@ -38,4 +37,3 @@ runHistogramsTest(function() {
const ceDebugFlag = false;
runCETestForCollection(db, collMeta, 4, ceDebugFlag);
});
-})();
diff --git a/jstests/query_golden/ce_mixed.js b/jstests/query_golden/ce_mixed.js
index d4807b9d9cbec..7f852cadb0e53 100644
--- a/jstests/query_golden/ce_mixed.js
+++ b/jstests/query_golden/ce_mixed.js
@@ -1,12 +1,17 @@
/**
- * A test for conjunctive predicates using a semi-realistic collection/queries.
+ * A test for conjunctive and disjunctive predicates using a semi-realistic collection/queries.
* @tags: [
* requires_cqf,
* ]
*/
-(function() {
-load("jstests/libs/ce_stats_utils.js"); // For 'getRootCE', 'createHistogram'.
+import {
+ createHistogram,
+ getRootCE,
+ runHistogramsTest,
+ summarizeExplainForCE
+} from "jstests/libs/ce_stats_utils.js";
+import {forceCE} from "jstests/libs/optimizer_utils.js";
const collCard = 300;
const numberBuckets = 5;
@@ -122,6 +127,24 @@ runHistogramsTest(function() {
testPredicate({likesPizza: false, name: {$lte: "Bob Bennet"}});
testPredicate({favPizzaToppings: "mushrooms", name: {$lte: "Bob Bennet"}});
+ // Test disjunctions of predicates all using histograms.
+ testPredicate({$or: [{likesPizza: true}, {date: {$lt: new ISODate("1955-01-01T00:00:00")}}]});
+ testPredicate({
+ $or: [{favPizzaToppings: "mushrooms"}, {name: {$lte: "Bob Bennet", $gte: "Alice Smith"}}]
+ });
+ testPredicate({
+ $or: [
+ {$and: [{likesPizza: false}, {name: {$lte: "Bob Bennet"}}]},
+ {$and: [{likesPizza: true}, {name: {$gte: "Tom Watson"}}]}
+ ]
+ });
+ testPredicate({
+ $or: [
+ {$and: [{likesPizza: false}, {name: {$lte: "Bob Bennet"}}]},
+ {date: {$lte: "1960-01-01T00:00:00"}}
+ ]
+ });
+
// Test conjunctions of predicates such that some use histograms and others use heuristics.
testPredicate({lastPizzaShopVisited: "Zizzi", likesPizza: true});
testPredicate({lastPizzaShopVisited: "Zizzi", likesPizza: false});
@@ -139,5 +162,42 @@ runHistogramsTest(function() {
favPizzaToppings: "mushrooms",
likesPizza: true
});
+
+ // Test disjunctions of predicates such that some use histograms and others use heuristics.
+ testPredicate({$or: [{lastPizzaShopVisited: "Zizzi"}, {likesPizza: true}]});
+ testPredicate({
+ $or: [
+ {lastPizzaShopVisited: "Zizzi"},
+ {
+ date: {
+ $gt: new ISODate("1950-01-01T00:00:00"),
+ $lt: new ISODate("1960-01-01T00:00:00")
+ }
+ }
+ ]
+ });
+ testPredicate({
+ $or: [
+ {$and: [{lastPizzaShopVisited: "Zizzi"}, {name: {$lte: "John Watson"}}]},
+ {$and: [{favPizzaToppings: "mushrooms"}, {likesPizza: true}]}
+ ]
+ });
+ testPredicate({
+ $or: [
+ {$and: [{lastPizzaShopVisited: "Zizzi"}, {name: {$lte: "John Watson"}}]},
+ {$and: [{lastPizzaShopVisited: "Zizzi"}, {name: {$gte: "Kate Knight"}}]}
+ ]
+ });
+ testPredicate({
+ $or: [
+ {$and: [{lastPizzaShopVisited: "Zizzi"}, {name: {$lte: "John Watson"}}]},
+ {favPizzaToppings: "mushrooms"}
+ ]
+ });
+ testPredicate({
+ $or: [
+ {$and: [{favPizzaToppings: "mushrooms"}, {name: {$lte: "John Watson"}}]},
+ {lastPizzaShopVisited: "Zizzi"}
+ ]
+ });
});
-})();
diff --git a/jstests/query_golden/ce_sampled_histogram.js b/jstests/query_golden/ce_sampled_histogram.js
index 56e878018e2eb..1a0d1d467a938 100644
--- a/jstests/query_golden/ce_sampled_histogram.js
+++ b/jstests/query_golden/ce_sampled_histogram.js
@@ -5,6 +5,16 @@
* requires_cqf,
* ]
*/
+import {
+ createHistogram,
+ getRootCE,
+ runHistogramsTest,
+ summarizeExplainForCE
+} from "jstests/libs/ce_stats_utils.js";
+import {forceCE, round2} from "jstests/libs/optimizer_utils.js";
+import {computeStrategyErrors} from "jstests/query_golden/libs/compute_errors.js";
+
+load("jstests/libs/load_ce_test_data.js"); // For 'loadJSONDataset'.
/**
* Returns a 2-element array containing the number of documents returned by the 'predicate' and
@@ -47,11 +57,6 @@ function testMatchPredicate(baseColl, sampleColl, predicate, collSize, totSample
print(`Sample error: ${tojson(sampleErr)}`);
}
-(function() {
-load("jstests/libs/load_ce_test_data.js"); // For 'loadJSONDataset'.
-load("jstests/libs/ce_stats_utils.js"); // For 'getRootCE', 'createHistogram', runHistogramsTest
-load("jstests/query_golden/libs/compute_errors.js"); // For 'computeStrategyErrors'.
-
Random.setRandomSeed(6345);
const collData = 'ce_accuracy_test';
@@ -162,4 +167,3 @@ runHistogramsTest(function testSampleHistogram() {
print(`Average base error: ${tojson(avgBaseErr)}\n`);
print(`Average sample error: ${tojson(avgSampleErr)}`);
});
-})();
diff --git a/jstests/query_golden/elemMatch.js b/jstests/query_golden/elemMatch.js
index bb6fb52625718..47f2b38bc86f7 100644
--- a/jstests/query_golden/elemMatch.js
+++ b/jstests/query_golden/elemMatch.js
@@ -1,5 +1,4 @@
-(function() {
-"use strict";
+import {show} from "jstests/libs/golden_test.js";
const coll = db.cqf_elemMatch;
coll.drop();
@@ -61,4 +60,3 @@ runPipeline(pipeline);
pipeline = [{$match: {a: {$elemMatch: {$elemMatch: {b: {$elemMatch: {$gt: 5}}}}}}}];
runPipeline(pipeline);
-}());
diff --git a/jstests/query_golden/eq.js b/jstests/query_golden/eq.js
index 5847088a1b227..c595b58b74773 100644
--- a/jstests/query_golden/eq.js
+++ b/jstests/query_golden/eq.js
@@ -2,7 +2,8 @@
* Tests $eq against a variety of BSON types and shapes.
*/
-load('jstests/query_golden/libs/example_data.js');
+import {show} from "jstests/libs/golden_test.js";
+import {leafs, smallDocs} from "jstests/query_golden/libs/example_data.js";
const docs = smallDocs();
diff --git a/jstests/query_golden/example.js b/jstests/query_golden/example.js
index 29ae77f34e9c8..c7ae0b5651bd1 100644
--- a/jstests/query_golden/example.js
+++ b/jstests/query_golden/example.js
@@ -1,6 +1,7 @@
/**
* Example query-correctness test using the golden-data framework.
*/
+import {show} from "jstests/libs/golden_test.js";
const coll = db.query_golden_example;
coll.drop();
diff --git a/jstests/query_golden/exclusion_projection.js b/jstests/query_golden/exclusion_projection.js
index 78e3155650f23..63a723ff9d990 100644
--- a/jstests/query_golden/exclusion_projection.js
+++ b/jstests/query_golden/exclusion_projection.js
@@ -3,10 +3,11 @@
* jstests/cqf/projection.js; both tests will exist pending a decision about the future of golden
* jstesting for CQF.
*/
-
-(function() {
-"use strict";
-load("jstests/query_golden/libs/projection_helpers.js");
+import {
+ getIdProjectionDocs,
+ getProjectionDocs,
+ runProjectionsAgainstColl
+} from "jstests/query_golden/libs/projection_helpers.js";
const coll = db.cqf_exclusion_project;
const exclusionProjSpecs = [
@@ -34,4 +35,3 @@ const idExclusionProjectSpecs = [
{"_id.a.b": 0},
];
runProjectionsAgainstColl(coll, getIdProjectionDocs(), [] /*no indexes*/, idExclusionProjectSpecs);
-}());
diff --git a/jstests/query_golden/expected_output/array_index b/jstests/query_golden/expected_output/array_index
index d11fcf001b018..5be96c8b6e23e 100644
--- a/jstests/query_golden/expected_output/array_index
+++ b/jstests/query_golden/expected_output/array_index
@@ -9,7 +9,8 @@
{ "a" : [ 2 ] }
{ "a" : [ 2, 3, 4 ] }
nReturned: 4
-Plan skeleton: {
+Plan skeleton:
+{
"queryPlanner" : {
"winningPlan" : {
"optimizerPlan" : {
@@ -34,6 +35,7 @@ Plan skeleton: {
}
}
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "a" : { "$lt" : 2 } } }, { "$unset" : "_id" } ]
[jsTest] ----
@@ -41,7 +43,8 @@ Plan skeleton: {
{ "a" : [ 1, 2, 3, 4 ] }
{ "a" : [ 1, 3 ] }
nReturned: 2
-Plan skeleton: {
+Plan skeleton:
+{
"queryPlanner" : {
"winningPlan" : {
"optimizerPlan" : {
@@ -67,4 +70,4 @@ Plan skeleton: {
}
}
}
-}
\ No newline at end of file
+}
diff --git a/jstests/query_golden/expected_output/ce_accuracy b/jstests/query_golden/expected_output/ce_accuracy
index 4e0e7bab41b4b..8f4ae7bf079b5 100644
--- a/jstests/query_golden/expected_output/ce_accuracy
+++ b/jstests/query_golden/expected_output/ce_accuracy
@@ -14,6 +14,7 @@ Collection count: 20
Running CE accuracy test for collection ce_data_20 of 20 documents.
Begin query generation
+
[jsTest] ----
[jsTest] Sample positions: [ 2, 7, 12, 17 ]
[jsTest]
@@ -100,10 +101,14 @@ Running query batch [0 - 37) with fields [ "a", "b", "c_int", "mixed" ]
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+heuristicIdx: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -112,10 +117,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+heuristicIdx: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -124,10 +133,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 7
Cardinality estimates:
-heuristic: 6.6 QError: 1.06, RelError: -0.06, SelError: -2%
-heuristicIdx: 6.6 QError: 1.06, RelError: -0.06, SelError: -2%
-histogram: 7 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 7 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.06, RelError: -0.06, SelError: -2%
+heuristicIdx: 6.6
+QError: 1.06, RelError: -0.06, SelError: -2%
+histogram: 7
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 7
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -136,10 +149,14 @@ histogramIdx: 7 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 8
Cardinality estimates:
-heuristic: 6.6 QError: 1.21, RelError: -0.18, SelError: -7%
-heuristicIdx: 6.6 QError: 1.21, RelError: -0.18, SelError: -7%
-histogram: 8 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 8 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.21, RelError: -0.18, SelError: -7%
+heuristicIdx: 6.6
+QError: 1.21, RelError: -0.18, SelError: -7%
+histogram: 8
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 8
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -148,10 +165,14 @@ histogramIdx: 8 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 12
Cardinality estimates:
-heuristic: 9 QError: 1.33, RelError: -0.25, SelError: -15%
-heuristicIdx: 9 QError: 1.33, RelError: -0.25, SelError: -15%
-histogram: 12 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 12 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 1.33, RelError: -0.25, SelError: -15%
+heuristicIdx: 9
+QError: 1.33, RelError: -0.25, SelError: -15%
+histogram: 12
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 12
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -160,10 +181,14 @@ histogramIdx: 12 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 13
Cardinality estimates:
-heuristic: 9 QError: 1.44, RelError: -0.31, SelError: -20%
-heuristicIdx: 9 QError: 1.44, RelError: -0.31, SelError: -20%
-histogram: 13 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 13 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 1.44, RelError: -0.31, SelError: -20%
+heuristicIdx: 9
+QError: 1.44, RelError: -0.31, SelError: -20%
+histogram: 13
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 13
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -172,10 +197,14 @@ histogramIdx: 13 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 10
Cardinality estimates:
-heuristic: 6.6 QError: 1.52, RelError: -0.34, SelError: -17%
-heuristicIdx: 6.6 QError: 1.52, RelError: -0.34, SelError: -17%
-histogram: 10 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 10 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.52, RelError: -0.34, SelError: -17%
+heuristicIdx: 6.6
+QError: 1.52, RelError: -0.34, SelError: -17%
+histogram: 10
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 10
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -184,10 +213,14 @@ histogramIdx: 10 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 13
Cardinality estimates:
-heuristic: 6.6 QError: 1.97, RelError: -0.49, SelError: -32%
-heuristicIdx: 6.6 QError: 1.97, RelError: -0.49, SelError: -32%
-histogram: 13 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 13 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.97, RelError: -0.49, SelError: -32%
+heuristicIdx: 6.6
+QError: 1.97, RelError: -0.49, SelError: -32%
+histogram: 13
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 13
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -196,10 +229,14 @@ histogramIdx: 13 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 9 QError: 4.5, RelError: 3.5, SelError: 35%
-heuristicIdx: 9 QError: 4.5, RelError: 3.5, SelError: 35%
-histogram: 2 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 4.5, RelError: 3.5, SelError: 35%
+heuristicIdx: 9
+QError: 4.5, RelError: 3.5, SelError: 35%
+histogram: 2
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -208,10 +245,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+heuristicIdx: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -220,10 +261,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 18
Cardinality estimates:
-heuristic: 6.6 QError: 2.73, RelError: -0.63, SelError: -57%
-heuristicIdx: 6.6 QError: 2.73, RelError: -0.63, SelError: -57%
-histogram: 18 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 18 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 2.73, RelError: -0.63, SelError: -57%
+heuristicIdx: 6.6
+QError: 2.73, RelError: -0.63, SelError: -57%
+histogram: 18
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 18
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -232,10 +277,14 @@ histogramIdx: 18 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 19
Cardinality estimates:
-heuristic: 6.6 QError: 2.88, RelError: -0.65, SelError: -62%
-heuristicIdx: 6.6 QError: 2.88, RelError: -0.65, SelError: -62%
-histogram: 19 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 19 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 2.88, RelError: -0.65, SelError: -62%
+heuristicIdx: 6.6
+QError: 2.88, RelError: -0.65, SelError: -62%
+histogram: 19
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 19
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -244,10 +293,14 @@ histogramIdx: 19 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 9 QError: 9, RelError: 8, SelError: 40%
-heuristicIdx: 9 QError: 9, RelError: 8, SelError: 40%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 9, RelError: 8, SelError: 40%
+heuristicIdx: 9
+QError: 9, RelError: 8, SelError: 40%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -256,10 +309,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 9 QError: 4.5, RelError: 3.5, SelError: 35%
-heuristicIdx: 9 QError: 4.5, RelError: 3.5, SelError: 35%
-histogram: 2 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 4.5, RelError: 3.5, SelError: 35%
+heuristicIdx: 9
+QError: 4.5, RelError: 3.5, SelError: 35%
+histogram: 2
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -268,10 +325,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28%
-histogram: 3.69 QError: 3.69, RelError: 2.69, SelError: 13.45%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 3.43, SelError: 17.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 5.6, SelError: 28%
+histogram: 3.69
+QError: 3.69, RelError: 2.69, SelError: 13.45%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -280,10 +341,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23%
-histogram: 7.75 QError: 3.88, RelError: 2.88, SelError: 28.75%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+heuristicIdx: 6.6
+QError: 3.3, RelError: 2.3, SelError: 23%
+histogram: 7.75
+QError: 3.88, RelError: 2.88, SelError: 28.75%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -292,10 +357,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23%
-histogram: 7.75 QError: 3.88, RelError: 2.88, SelError: 28.75%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+heuristicIdx: 6.6
+QError: 3.3, RelError: 2.3, SelError: 23%
+histogram: 7.75
+QError: 3.88, RelError: 2.88, SelError: 28.75%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -304,10 +373,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15%
-heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18%
-histogram: 6.93 QError: 2.31, RelError: 1.31, SelError: 19.65%
-histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.48, RelError: 0.48, SelError: 7.15%
+heuristicIdx: 6.6
+QError: 2.2, RelError: 1.2, SelError: 18%
+histogram: 6.93
+QError: 2.31, RelError: 1.31, SelError: 19.65%
+histogramIdx: 3
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -316,10 +389,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28%
-histogram: 1.95 QError: 1.95, RelError: 0.95, SelError: 4.75%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 3.43, SelError: 17.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 5.6, SelError: 28%
+histogram: 1.95
+QError: 1.95, RelError: 0.95, SelError: 4.75%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -328,10 +405,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 3.43, SelError: 17.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 5.6, SelError: 28%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -340,10 +421,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+heuristicIdx: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -352,10 +437,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+heuristicIdx: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -364,10 +453,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 10
Cardinality estimates:
-heuristic: 6.6 QError: 1.52, RelError: -0.34, SelError: -17%
-heuristicIdx: 6.6 QError: 1.52, RelError: -0.34, SelError: -17%
-histogram: 10 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 10 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.52, RelError: -0.34, SelError: -17%
+heuristicIdx: 6.6
+QError: 1.52, RelError: -0.34, SelError: -17%
+histogram: 10
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 10
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -376,10 +469,14 @@ histogramIdx: 10 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 11
Cardinality estimates:
-heuristic: 6.6 QError: 1.67, RelError: -0.4, SelError: -22%
-heuristicIdx: 6.6 QError: 1.67, RelError: -0.4, SelError: -22%
-histogram: 11 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 11 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.67, RelError: -0.4, SelError: -22%
+heuristicIdx: 6.6
+QError: 1.67, RelError: -0.4, SelError: -22%
+histogram: 11
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 11
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -388,10 +485,14 @@ histogramIdx: 11 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 9
Cardinality estimates:
-heuristic: 9 QError: 1, RelError: 0, SelError: 0%
-heuristicIdx: 9 QError: 1, RelError: 0, SelError: 0%
-histogram: 9 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 1, RelError: 0, SelError: 0%
+heuristicIdx: 9
+QError: 1, RelError: 0, SelError: 0%
+histogram: 9
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 9
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -400,10 +501,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 10
Cardinality estimates:
-heuristic: 9 QError: 1.11, RelError: -0.1, SelError: -5%
-heuristicIdx: 9 QError: 1.11, RelError: -0.1, SelError: -5%
-histogram: 10 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 10 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 1.11, RelError: -0.1, SelError: -5%
+heuristicIdx: 9
+QError: 1.11, RelError: -0.1, SelError: -5%
+histogram: 10
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 10
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -412,10 +517,14 @@ histogramIdx: 10 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 13
Cardinality estimates:
-heuristic: 6.6 QError: 1.97, RelError: -0.49, SelError: -32%
-heuristicIdx: 6.6 QError: 1.97, RelError: -0.49, SelError: -32%
-histogram: 13 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 13 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.97, RelError: -0.49, SelError: -32%
+heuristicIdx: 6.6
+QError: 1.97, RelError: -0.49, SelError: -32%
+histogram: 13
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 13
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -424,10 +533,14 @@ histogramIdx: 13 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 18
Cardinality estimates:
-heuristic: 6.6 QError: 2.73, RelError: -0.63, SelError: -57%
-heuristicIdx: 6.6 QError: 2.73, RelError: -0.63, SelError: -57%
-histogram: 18 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 18 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 2.73, RelError: -0.63, SelError: -57%
+heuristicIdx: 6.6
+QError: 2.73, RelError: -0.63, SelError: -57%
+histogram: 18
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 18
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -436,10 +549,14 @@ histogramIdx: 18 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 9 QError: 9, RelError: 8, SelError: 40%
-heuristicIdx: 9 QError: 9, RelError: 8, SelError: 40%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 9, RelError: 8, SelError: 40%
+heuristicIdx: 9
+QError: 9, RelError: 8, SelError: 40%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -448,10 +565,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+heuristicIdx: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -460,10 +581,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 19
Cardinality estimates:
-heuristic: 6.6 QError: 2.88, RelError: -0.65, SelError: -62%
-heuristicIdx: 6.6 QError: 2.88, RelError: -0.65, SelError: -62%
-histogram: 19 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 19 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 2.88, RelError: -0.65, SelError: -62%
+heuristicIdx: 6.6
+QError: 2.88, RelError: -0.65, SelError: -62%
+histogram: 19
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 19
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -472,10 +597,14 @@ histogramIdx: 19 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 20
Cardinality estimates:
-heuristic: 6.6 QError: 3.03, RelError: -0.67, SelError: -67%
-heuristicIdx: 6.6 QError: 3.03, RelError: -0.67, SelError: -67%
-histogram: 20 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 20 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 3.03, RelError: -0.67, SelError: -67%
+heuristicIdx: 6.6
+QError: 3.03, RelError: -0.67, SelError: -67%
+histogram: 20
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 20
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -484,10 +613,14 @@ histogramIdx: 20 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 9 QError: 9, RelError: 0.9, SelError: 45%
-heuristicIdx: 9 QError: 9, RelError: 0.9, SelError: 45%
-histogram: 0 QError: 0, RelError: 0, SelError: 0%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 9, RelError: 0.9, SelError: 45%
+heuristicIdx: 9
+QError: 9, RelError: 0.9, SelError: 45%
+histogram: 0
+QError: 0, RelError: 0, SelError: 0%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -496,10 +629,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 9 QError: 9, RelError: 8, SelError: 40%
-heuristicIdx: 9 QError: 9, RelError: 8, SelError: 40%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 9, RelError: 8, SelError: 40%
+heuristicIdx: 9
+QError: 9, RelError: 8, SelError: 40%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -508,10 +645,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28%
-histogram: 3.69 QError: 3.69, RelError: 2.69, SelError: 13.45%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 3.43, SelError: 17.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 5.6, SelError: 28%
+histogram: 3.69
+QError: 3.69, RelError: 2.69, SelError: 13.45%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -520,10 +661,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 0.66, SelError: 33%
-histogram: 6.67 QError: 6.67, RelError: 0.67, SelError: 33.35%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 0.44, SelError: 22.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 0.66, SelError: 33%
+histogram: 6.67
+QError: 6.67, RelError: 0.67, SelError: 33.35%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -532,10 +677,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28%
-histogram: 5.86 QError: 5.86, RelError: 4.86, SelError: 24.3%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 3.43, SelError: 17.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 5.6, SelError: 28%
+histogram: 5.86
+QError: 5.86, RelError: 4.86, SelError: 24.3%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -544,10 +693,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28%
-histogram: 2.85 QError: 2.85, RelError: 1.85, SelError: 9.25%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 3.43, SelError: 17.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 5.6, SelError: 28%
+histogram: 2.85
+QError: 2.85, RelError: 1.85, SelError: 9.25%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -556,10 +709,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 3.43, SelError: 17.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 5.6, SelError: 28%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -568,10 +725,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 0.66, SelError: 33%
-histogram: 0 QError: 0, RelError: 0, SelError: 0%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 0.44, SelError: 22.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 0.66, SelError: 33%
+histogram: 0
+QError: 0, RelError: 0, SelError: 0%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -580,10 +741,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23%
-histogram: 3.79 QError: 1.9, RelError: 0.9, SelError: 8.95%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+heuristicIdx: 6.6
+QError: 3.3, RelError: 2.3, SelError: 23%
+histogram: 3.79
+QError: 1.9, RelError: 0.9, SelError: 8.95%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -592,10 +757,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28%
-histogram: 7.42 QError: 7.42, RelError: 6.42, SelError: 32.1%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 3.43, SelError: 17.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 5.6, SelError: 28%
+histogram: 7.42
+QError: 7.42, RelError: 6.42, SelError: 32.1%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -604,10 +773,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 0.66, SelError: 33%
-histogram: 5.02 QError: 5.02, RelError: 0.5, SelError: 25.1%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 0.44, SelError: 22.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 0.66, SelError: 33%
+histogram: 5.02
+QError: 5.02, RelError: 0.5, SelError: 25.1%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -616,10 +789,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 0.66, SelError: 33%
-histogram: 1.9 QError: 1.9, RelError: 0.19, SelError: 9.5%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 0.44, SelError: 22.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 0.66, SelError: 33%
+histogram: 1.9
+QError: 1.9, RelError: 0.19, SelError: 9.5%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -628,10 +805,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23%
-histogram: 2 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+heuristicIdx: 6.6
+QError: 3.3, RelError: 2.3, SelError: 23%
+histogram: 2
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -640,10 +821,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 3.43, SelError: 17.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 5.6, SelError: 28%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -652,10 +837,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23%
-histogram: 4.61 QError: 2.31, RelError: 1.31, SelError: 13.05%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+heuristicIdx: 6.6
+QError: 3.3, RelError: 2.3, SelError: 23%
+histogram: 4.61
+QError: 2.31, RelError: 1.31, SelError: 13.05%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -664,10 +853,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28%
-histogram: 6.97 QError: 6.97, RelError: 5.97, SelError: 29.85%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 3.43, SelError: 17.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 5.6, SelError: 28%
+histogram: 6.97
+QError: 6.97, RelError: 5.97, SelError: 29.85%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -676,10 +869,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15%
-heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18%
-histogram: 6.26 QError: 2.09, RelError: 1.09, SelError: 16.3%
-histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.48, RelError: 0.48, SelError: 7.15%
+heuristicIdx: 6.6
+QError: 2.2, RelError: 1.2, SelError: 18%
+histogram: 6.26
+QError: 2.09, RelError: 1.09, SelError: 16.3%
+histogramIdx: 3
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -688,10 +885,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15%
-heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18%
-histogram: 3 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.48, RelError: 0.48, SelError: 7.15%
+heuristicIdx: 6.6
+QError: 2.2, RelError: 1.2, SelError: 18%
+histogram: 3
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 3
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -700,10 +901,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 3.43, SelError: 17.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 5.6, SelError: 28%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -712,10 +917,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15%
-heuristicIdx: 6.6 QError: 6.6, RelError: 0.66, SelError: 33%
-histogram: 0 QError: 0, RelError: 0, SelError: 0%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 0.44, SelError: 22.15%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 0.66, SelError: 33%
+histogram: 0
+QError: 0, RelError: 0, SelError: 0%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -724,10 +933,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.47 QError: 2.24, RelError: 1.23, SelError: 12.35%
-heuristicIdx: 4.47 QError: 2.24, RelError: 1.23, SelError: 12.35%
-histogram: 2 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 2.24, RelError: 1.23, SelError: 12.35%
+heuristicIdx: 4.47
+QError: 2.24, RelError: 1.23, SelError: 12.35%
+histogram: 2
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -736,10 +949,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.47 QError: 2.24, RelError: 1.23, SelError: 12.35%
-heuristicIdx: 4.47 QError: 2.24, RelError: 1.23, SelError: 12.35%
-histogram: 2 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 2.24, RelError: 1.23, SelError: 12.35%
+heuristicIdx: 4.47
+QError: 2.24, RelError: 1.23, SelError: 12.35%
+histogram: 2
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -748,10 +965,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 6.6 QError: 3.3, RelError: 2.3, SelError: 23%
-heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23%
-histogram: 2 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 3.3, RelError: 2.3, SelError: 23%
+heuristicIdx: 6.6
+QError: 3.3, RelError: 2.3, SelError: 23%
+histogram: 2
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -760,10 +981,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 6.6 QError: 2.2, RelError: 1.2, SelError: 18%
-heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18%
-histogram: 3 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 2.2, RelError: 1.2, SelError: 18%
+heuristicIdx: 6.6
+QError: 2.2, RelError: 1.2, SelError: 18%
+histogram: 3
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 3
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -772,10 +997,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 18
Cardinality estimates:
-heuristic: 9 QError: 2, RelError: -0.5, SelError: -45%
-heuristicIdx: 9 QError: 2, RelError: -0.5, SelError: -45%
-histogram: 18 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 18 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 2, RelError: -0.5, SelError: -45%
+heuristicIdx: 9
+QError: 2, RelError: -0.5, SelError: -45%
+histogram: 18
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 18
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -784,10 +1013,14 @@ histogramIdx: 18 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 19
Cardinality estimates:
-heuristic: 9 QError: 2.11, RelError: -0.53, SelError: -50%
-heuristicIdx: 9 QError: 2.11, RelError: -0.53, SelError: -50%
-histogram: 19 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 19 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 2.11, RelError: -0.53, SelError: -50%
+heuristicIdx: 9
+QError: 2.11, RelError: -0.53, SelError: -50%
+histogram: 19
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 19
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -796,10 +1029,14 @@ histogramIdx: 19 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 6.6 QError: 2.2, RelError: 1.2, SelError: 18%
-heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18%
-histogram: 3 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 2.2, RelError: 1.2, SelError: 18%
+heuristicIdx: 6.6
+QError: 2.2, RelError: 1.2, SelError: 18%
+histogram: 3
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 3
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -808,10 +1045,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 6
Cardinality estimates:
-heuristic: 6.6 QError: 1.1, RelError: 0.1, SelError: 3%
-heuristicIdx: 6.6 QError: 1.1, RelError: 0.1, SelError: 3%
-histogram: 6 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 6 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.1, RelError: 0.1, SelError: 3%
+heuristicIdx: 6.6
+QError: 1.1, RelError: 0.1, SelError: 3%
+histogram: 6
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 6
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -820,10 +1061,14 @@ histogramIdx: 6 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 10
Cardinality estimates:
-heuristic: 9 QError: 1.11, RelError: -0.1, SelError: -5%
-heuristicIdx: 9 QError: 1.11, RelError: -0.1, SelError: -5%
-histogram: 10 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 10 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 1.11, RelError: -0.1, SelError: -5%
+heuristicIdx: 9
+QError: 1.11, RelError: -0.1, SelError: -5%
+histogram: 10
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 10
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -832,10 +1077,14 @@ histogramIdx: 10 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 4.47 QError: 1.49, RelError: 0.49, SelError: 7.35%
-heuristicIdx: 4.47 QError: 1.49, RelError: 0.49, SelError: 7.35%
-histogram: 3 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 1.49, RelError: 0.49, SelError: 7.35%
+heuristicIdx: 4.47
+QError: 1.49, RelError: 0.49, SelError: 7.35%
+histogram: 3
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 3
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -844,10 +1093,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 12
Cardinality estimates:
-heuristic: 6.6 QError: 1.82, RelError: -0.45, SelError: -27%
-heuristicIdx: 6.6 QError: 1.82, RelError: -0.45, SelError: -27%
-histogram: 12 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 12 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.82, RelError: -0.45, SelError: -27%
+heuristicIdx: 6.6
+QError: 1.82, RelError: -0.45, SelError: -27%
+histogram: 12
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 12
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -856,10 +1109,14 @@ histogramIdx: 12 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 14
Cardinality estimates:
-heuristic: 6.6 QError: 2.12, RelError: -0.53, SelError: -37%
-heuristicIdx: 6.6 QError: 2.12, RelError: -0.53, SelError: -37%
-histogram: 14 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 14 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 2.12, RelError: -0.53, SelError: -37%
+heuristicIdx: 6.6
+QError: 2.12, RelError: -0.53, SelError: -37%
+histogram: 14
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 14
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -868,10 +1125,14 @@ histogramIdx: 14 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 9
Cardinality estimates:
-heuristic: 9 QError: 1, RelError: 0, SelError: 0%
-heuristicIdx: 9 QError: 1, RelError: 0, SelError: 0%
-histogram: 9 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 1, RelError: 0, SelError: 0%
+heuristicIdx: 9
+QError: 1, RelError: 0, SelError: 0%
+histogram: 9
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 9
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -880,10 +1141,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 9
Cardinality estimates:
-heuristic: 9 QError: 1, RelError: 0, SelError: 0%
-heuristicIdx: 9 QError: 1, RelError: 0, SelError: 0%
-histogram: 9 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 1, RelError: 0, SelError: 0%
+heuristicIdx: 9
+QError: 1, RelError: 0, SelError: 0%
+histogram: 9
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 9
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -892,10 +1157,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 9
Cardinality estimates:
-heuristic: 9 QError: 1, RelError: 0, SelError: 0%
-heuristicIdx: 9 QError: 1, RelError: 0, SelError: 0%
-histogram: 9 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 1, RelError: 0, SelError: 0%
+heuristicIdx: 9
+QError: 1, RelError: 0, SelError: 0%
+histogram: 9
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 9
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -904,10 +1173,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 5
Cardinality estimates:
-heuristic: 4.47 QError: 1.12, RelError: -0.11, SelError: -2.65%
-heuristicIdx: 4.47 QError: 1.12, RelError: -0.11, SelError: -2.65%
-histogram: 5 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 5 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 1.12, RelError: -0.11, SelError: -2.65%
+heuristicIdx: 4.47
+QError: 1.12, RelError: -0.11, SelError: -2.65%
+histogram: 5
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 5
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -916,10 +1189,14 @@ histogramIdx: 5 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 16
Cardinality estimates:
-heuristic: 6.6 QError: 2.42, RelError: -0.59, SelError: -47%
-heuristicIdx: 6.6 QError: 2.42, RelError: -0.59, SelError: -47%
-histogram: 16 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 16 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 2.42, RelError: -0.59, SelError: -47%
+heuristicIdx: 6.6
+QError: 2.42, RelError: -0.59, SelError: -47%
+histogram: 16
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 16
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -928,10 +1205,14 @@ histogramIdx: 16 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 5
Cardinality estimates:
-heuristic: 4.47 QError: 1.12, RelError: -0.11, SelError: -2.65%
-heuristicIdx: 4.47 QError: 1.12, RelError: -0.11, SelError: -2.65%
-histogram: 5 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 5 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 1.12, RelError: -0.11, SelError: -2.65%
+heuristicIdx: 4.47
+QError: 1.12, RelError: -0.11, SelError: -2.65%
+histogram: 5
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 5
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -940,10 +1221,14 @@ histogramIdx: 5 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 18
Cardinality estimates:
-heuristic: 6.6 QError: 2.73, RelError: -0.63, SelError: -57%
-heuristicIdx: 6.6 QError: 2.73, RelError: -0.63, SelError: -57%
-histogram: 18 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 18 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 2.73, RelError: -0.63, SelError: -57%
+heuristicIdx: 6.6
+QError: 2.73, RelError: -0.63, SelError: -57%
+histogram: 18
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 18
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -952,10 +1237,14 @@ histogramIdx: 18 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 19
Cardinality estimates:
-heuristic: 6.6 QError: 2.88, RelError: -0.65, SelError: -62%
-heuristicIdx: 6.6 QError: 2.88, RelError: -0.65, SelError: -62%
-histogram: 19 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 19 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 2.88, RelError: -0.65, SelError: -62%
+heuristicIdx: 6.6
+QError: 2.88, RelError: -0.65, SelError: -62%
+histogram: 19
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 19
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -964,10 +1253,14 @@ histogramIdx: 19 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 4
Cardinality estimates:
-heuristic: 9 QError: 2.25, RelError: 1.25, SelError: 25%
-heuristicIdx: 9 QError: 2.25, RelError: 1.25, SelError: 25%
-histogram: 4 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 2.25, RelError: 1.25, SelError: 25%
+heuristicIdx: 9
+QError: 2.25, RelError: 1.25, SelError: 25%
+histogram: 4
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 4
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -976,10 +1269,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 6
Cardinality estimates:
-heuristic: 9 QError: 1.5, RelError: 0.5, SelError: 15%
-heuristicIdx: 9 QError: 1.5, RelError: 0.5, SelError: 15%
-histogram: 6 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 6 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 1.5, RelError: 0.5, SelError: 15%
+heuristicIdx: 9
+QError: 1.5, RelError: 0.5, SelError: 15%
+histogram: 6
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 6
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -988,10 +1285,14 @@ histogramIdx: 6 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 20
Cardinality estimates:
-heuristic: 6.6 QError: 3.03, RelError: -0.67, SelError: -67%
-heuristicIdx: 6.6 QError: 3.03, RelError: -0.67, SelError: -67%
-histogram: 20 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 20 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 3.03, RelError: -0.67, SelError: -67%
+heuristicIdx: 6.6
+QError: 3.03, RelError: -0.67, SelError: -67%
+histogram: 20
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 20
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1000,10 +1301,14 @@ histogramIdx: 20 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-heuristicIdx: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-histogram: 2.92 QError: 1.46, RelError: 0.46, SelError: 4.6%
-histogramIdx: 2.92 QError: 1.46, RelError: 0.46, SelError: 4.6%
+heuristic: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+heuristicIdx: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+histogram: 2.92
+QError: 1.46, RelError: 0.46, SelError: 4.6%
+histogramIdx: 2.92
+QError: 1.46, RelError: 0.46, SelError: 4.6%
[jsTest] ----
@@ -1012,10 +1317,14 @@ histogramIdx: 2.92 QError: 1.46, RelError: 0.46, SelError: 4.6%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-heuristicIdx: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-histogram: 1.63 QError: 1.23, RelError: -0.19, SelError: -1.85%
-histogramIdx: 1.63 QError: 1.23, RelError: -0.19, SelError: -1.85%
+heuristic: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+heuristicIdx: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+histogram: 1.63
+QError: 1.23, RelError: -0.19, SelError: -1.85%
+histogramIdx: 1.63
+QError: 1.23, RelError: -0.19, SelError: -1.85%
[jsTest] ----
@@ -1024,10 +1333,14 @@ histogramIdx: 1.63 QError: 1.23, RelError: -0.19, SelError: -1.85%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15%
-heuristicIdx: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15%
-histogram: 4.74 QError: 1.58, RelError: 0.58, SelError: 8.7%
-histogramIdx: 4.74 QError: 1.58, RelError: 0.58, SelError: 8.7%
+heuristic: 4.43
+QError: 1.48, RelError: 0.48, SelError: 7.15%
+heuristicIdx: 4.43
+QError: 1.48, RelError: 0.48, SelError: 7.15%
+histogram: 4.74
+QError: 1.58, RelError: 0.58, SelError: 8.7%
+histogramIdx: 4.74
+QError: 1.58, RelError: 0.58, SelError: 8.7%
[jsTest] ----
@@ -1036,10 +1349,14 @@ histogramIdx: 4.74 QError: 1.58, RelError: 0.58, SelError: 8.7%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15%
-heuristicIdx: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15%
-histogram: 4.08 QError: 1.36, RelError: 0.36, SelError: 5.4%
-histogramIdx: 4.08 QError: 1.36, RelError: 0.36, SelError: 5.4%
+heuristic: 4.43
+QError: 1.48, RelError: 0.48, SelError: 7.15%
+heuristicIdx: 4.43
+QError: 1.48, RelError: 0.48, SelError: 7.15%
+histogram: 4.08
+QError: 1.36, RelError: 0.36, SelError: 5.4%
+histogramIdx: 4.08
+QError: 1.36, RelError: 0.36, SelError: 5.4%
[jsTest] ----
@@ -1048,10 +1365,14 @@ histogramIdx: 4.08 QError: 1.36, RelError: 0.36, SelError: 5.4%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15%
-heuristicIdx: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15%
-histogram: 4.74 QError: 1.58, RelError: 0.58, SelError: 8.7%
-histogramIdx: 4.74 QError: 1.58, RelError: 0.58, SelError: 8.7%
+heuristic: 4.43
+QError: 1.48, RelError: 0.48, SelError: 7.15%
+heuristicIdx: 4.43
+QError: 1.48, RelError: 0.48, SelError: 7.15%
+histogram: 4.74
+QError: 1.58, RelError: 0.58, SelError: 8.7%
+histogramIdx: 4.74
+QError: 1.58, RelError: 0.58, SelError: 8.7%
[jsTest] ----
@@ -1060,10 +1381,14 @@ histogramIdx: 4.74 QError: 1.58, RelError: 0.58, SelError: 8.7%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15%
-heuristicIdx: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15%
-histogram: 4.08 QError: 1.36, RelError: 0.36, SelError: 5.4%
-histogramIdx: 4.08 QError: 1.36, RelError: 0.36, SelError: 5.4%
+heuristic: 4.43
+QError: 1.48, RelError: 0.48, SelError: 7.15%
+heuristicIdx: 4.43
+QError: 1.48, RelError: 0.48, SelError: 7.15%
+histogram: 4.08
+QError: 1.36, RelError: 0.36, SelError: 5.4%
+histogramIdx: 4.08
+QError: 1.36, RelError: 0.36, SelError: 5.4%
[jsTest] ----
@@ -1072,10 +1397,14 @@ histogramIdx: 4.08 QError: 1.36, RelError: 0.36, SelError: 5.4%
Actual cardinality: 7
Cardinality estimates:
-heuristic: 4.43 QError: 1.58, RelError: -0.37, SelError: -12.85%
-heuristicIdx: 4.43 QError: 1.58, RelError: -0.37, SelError: -12.85%
-histogram: 8.54 QError: 1.22, RelError: 0.22, SelError: 7.7%
-histogramIdx: 8.54 QError: 1.22, RelError: 0.22, SelError: 7.7%
+heuristic: 4.43
+QError: 1.58, RelError: -0.37, SelError: -12.85%
+heuristicIdx: 4.43
+QError: 1.58, RelError: -0.37, SelError: -12.85%
+histogram: 8.54
+QError: 1.22, RelError: 0.22, SelError: 7.7%
+histogramIdx: 8.54
+QError: 1.22, RelError: 0.22, SelError: 7.7%
[jsTest] ----
@@ -1084,10 +1413,14 @@ histogramIdx: 8.54 QError: 1.22, RelError: 0.22, SelError: 7.7%
Actual cardinality: 7
Cardinality estimates:
-heuristic: 4.43 QError: 1.58, RelError: -0.37, SelError: -12.85%
-heuristicIdx: 4.43 QError: 1.58, RelError: -0.37, SelError: -12.85%
-histogram: 8.99 QError: 1.28, RelError: 0.28, SelError: 9.95%
-histogramIdx: 8.99 QError: 1.28, RelError: 0.28, SelError: 9.95%
+heuristic: 4.43
+QError: 1.58, RelError: -0.37, SelError: -12.85%
+heuristicIdx: 4.43
+QError: 1.58, RelError: -0.37, SelError: -12.85%
+histogram: 8.99
+QError: 1.28, RelError: 0.28, SelError: 9.95%
+histogramIdx: 8.99
+QError: 1.28, RelError: 0.28, SelError: 9.95%
[jsTest] ----
@@ -1096,10 +1429,14 @@ histogramIdx: 8.99 QError: 1.28, RelError: 0.28, SelError: 9.95%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-heuristicIdx: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-histogram: 7.75 QError: 3.88, RelError: 2.88, SelError: 28.75%
-histogramIdx: 7.75 QError: 3.88, RelError: 2.88, SelError: 28.75%
+heuristic: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+heuristicIdx: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+histogram: 7.75
+QError: 3.88, RelError: 2.88, SelError: 28.75%
+histogramIdx: 7.75
+QError: 3.88, RelError: 2.88, SelError: 28.75%
[jsTest] ----
@@ -1108,10 +1445,14 @@ histogramIdx: 7.75 QError: 3.88, RelError: 2.88, SelError: 28.75%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-heuristicIdx: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-histogram: 1.63 QError: 1.23, RelError: -0.19, SelError: -1.85%
-histogramIdx: 1.63 QError: 1.23, RelError: -0.19, SelError: -1.85%
+heuristic: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+heuristicIdx: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+histogram: 1.63
+QError: 1.23, RelError: -0.19, SelError: -1.85%
+histogramIdx: 1.63
+QError: 1.23, RelError: -0.19, SelError: -1.85%
[jsTest] ----
@@ -1120,10 +1461,14 @@ histogramIdx: 1.63 QError: 1.23, RelError: -0.19, SelError: -1.85%
Actual cardinality: 5
Cardinality estimates:
-heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-heuristicIdx: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-histogram: 8.05 QError: 1.61, RelError: 0.61, SelError: 15.25%
-histogramIdx: 8.05 QError: 1.61, RelError: 0.61, SelError: 15.25%
+heuristic: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+heuristicIdx: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+histogram: 8.05
+QError: 1.61, RelError: 0.61, SelError: 15.25%
+histogramIdx: 8.05
+QError: 1.61, RelError: 0.61, SelError: 15.25%
[jsTest] ----
@@ -1132,10 +1477,14 @@ histogramIdx: 8.05 QError: 1.61, RelError: 0.61, SelError: 15.25%
Actual cardinality: 5
Cardinality estimates:
-heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-heuristicIdx: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-histogram: 7.35 QError: 1.47, RelError: 0.47, SelError: 11.75%
-histogramIdx: 7.35 QError: 1.47, RelError: 0.47, SelError: 11.75%
+heuristic: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+heuristicIdx: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+histogram: 7.35
+QError: 1.47, RelError: 0.47, SelError: 11.75%
+histogramIdx: 7.35
+QError: 1.47, RelError: 0.47, SelError: 11.75%
[jsTest] ----
@@ -1144,10 +1493,14 @@ histogramIdx: 7.35 QError: 1.47, RelError: 0.47, SelError: 11.75%
Actual cardinality: 5
Cardinality estimates:
-heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-heuristicIdx: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-histogram: 8.05 QError: 1.61, RelError: 0.61, SelError: 15.25%
-histogramIdx: 8.05 QError: 1.61, RelError: 0.61, SelError: 15.25%
+heuristic: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+heuristicIdx: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+histogram: 8.05
+QError: 1.61, RelError: 0.61, SelError: 15.25%
+histogramIdx: 8.05
+QError: 1.61, RelError: 0.61, SelError: 15.25%
[jsTest] ----
@@ -1156,10 +1509,14 @@ histogramIdx: 8.05 QError: 1.61, RelError: 0.61, SelError: 15.25%
Actual cardinality: 5
Cardinality estimates:
-heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-heuristicIdx: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-histogram: 7.35 QError: 1.47, RelError: 0.47, SelError: 11.75%
-histogramIdx: 7.35 QError: 1.47, RelError: 0.47, SelError: 11.75%
+heuristic: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+heuristicIdx: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+histogram: 7.35
+QError: 1.47, RelError: 0.47, SelError: 11.75%
+histogramIdx: 7.35
+QError: 1.47, RelError: 0.47, SelError: 11.75%
[jsTest] ----
@@ -1168,10 +1525,14 @@ histogramIdx: 7.35 QError: 1.47, RelError: 0.47, SelError: 11.75%
Actual cardinality: 8
Cardinality estimates:
-heuristic: 4.43 QError: 1.81, RelError: -0.45, SelError: -17.85%
-heuristicIdx: 4.43 QError: 1.81, RelError: -0.45, SelError: -17.85%
-histogram: 8.77 QError: 1.1, RelError: 0.1, SelError: 3.85%
-histogramIdx: 8.77 QError: 1.1, RelError: 0.1, SelError: 3.85%
+heuristic: 4.43
+QError: 1.81, RelError: -0.45, SelError: -17.85%
+heuristicIdx: 4.43
+QError: 1.81, RelError: -0.45, SelError: -17.85%
+histogram: 8.77
+QError: 1.1, RelError: 0.1, SelError: 3.85%
+histogramIdx: 8.77
+QError: 1.1, RelError: 0.1, SelError: 3.85%
[jsTest] ----
@@ -1180,10 +1541,14 @@ histogramIdx: 8.77 QError: 1.1, RelError: 0.1, SelError: 3.85%
Actual cardinality: 8
Cardinality estimates:
-heuristic: 4.43 QError: 1.81, RelError: -0.45, SelError: -17.85%
-heuristicIdx: 4.43 QError: 1.81, RelError: -0.45, SelError: -17.85%
-histogram: 13.07 QError: 1.63, RelError: 0.63, SelError: 25.35%
-histogramIdx: 13.07 QError: 1.63, RelError: 0.63, SelError: 25.35%
+heuristic: 4.43
+QError: 1.81, RelError: -0.45, SelError: -17.85%
+heuristicIdx: 4.43
+QError: 1.81, RelError: -0.45, SelError: -17.85%
+histogram: 13.07
+QError: 1.63, RelError: 0.63, SelError: 25.35%
+histogramIdx: 13.07
+QError: 1.63, RelError: 0.63, SelError: 25.35%
[jsTest] ----
@@ -1192,10 +1557,14 @@ histogramIdx: 13.07 QError: 1.63, RelError: 0.63, SelError: 25.35%
Actual cardinality: 5
Cardinality estimates:
-heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-heuristicIdx: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-histogram: 5.85 QError: 1.17, RelError: 0.17, SelError: 4.25%
-histogramIdx: 5.85 QError: 1.17, RelError: 0.17, SelError: 4.25%
+heuristic: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+heuristicIdx: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+histogram: 5.85
+QError: 1.17, RelError: 0.17, SelError: 4.25%
+histogramIdx: 5.85
+QError: 1.17, RelError: 0.17, SelError: 4.25%
[jsTest] ----
@@ -1204,10 +1573,14 @@ histogramIdx: 5.85 QError: 1.17, RelError: 0.17, SelError: 4.25%
Actual cardinality: 5
Cardinality estimates:
-heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-heuristicIdx: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-histogram: 4.08 QError: 1.23, RelError: -0.18, SelError: -4.6%
-histogramIdx: 4.08 QError: 1.23, RelError: -0.18, SelError: -4.6%
+heuristic: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+heuristicIdx: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+histogram: 4.08
+QError: 1.23, RelError: -0.18, SelError: -4.6%
+histogramIdx: 4.08
+QError: 1.23, RelError: -0.18, SelError: -4.6%
[jsTest] ----
@@ -1216,10 +1589,14 @@ histogramIdx: 4.08 QError: 1.23, RelError: -0.18, SelError: -4.6%
Actual cardinality: 4
Cardinality estimates:
-heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15%
-heuristicIdx: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15%
-histogram: 4 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.11, RelError: 0.11, SelError: 2.15%
+heuristicIdx: 4.43
+QError: 1.11, RelError: 0.11, SelError: 2.15%
+histogram: 4
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 4
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1228,10 +1605,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 4
Cardinality estimates:
-heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15%
-heuristicIdx: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15%
-histogram: 4 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.11, RelError: 0.11, SelError: 2.15%
+heuristicIdx: 4.43
+QError: 1.11, RelError: 0.11, SelError: 2.15%
+histogram: 4
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 4
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1240,10 +1621,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 4
Cardinality estimates:
-heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15%
-heuristicIdx: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15%
-histogram: 4 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.11, RelError: 0.11, SelError: 2.15%
+heuristicIdx: 4.43
+QError: 1.11, RelError: 0.11, SelError: 2.15%
+histogram: 4
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 4
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1252,10 +1637,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 4
Cardinality estimates:
-heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15%
-heuristicIdx: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15%
-histogram: 4 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.11, RelError: 0.11, SelError: 2.15%
+heuristicIdx: 4.43
+QError: 1.11, RelError: 0.11, SelError: 2.15%
+histogram: 4
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 4
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1264,10 +1653,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+heuristicIdx: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1276,10 +1669,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+heuristicIdx: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1288,10 +1685,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 7
Cardinality estimates:
-heuristic: 6.6 QError: 1.06, RelError: -0.06, SelError: -2%
-heuristicIdx: 6.6 QError: 1.06, RelError: -0.06, SelError: -2%
-histogram: 7 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 7 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.06, RelError: -0.06, SelError: -2%
+heuristicIdx: 6.6
+QError: 1.06, RelError: -0.06, SelError: -2%
+histogram: 7
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 7
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1300,10 +1701,14 @@ histogramIdx: 7 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 8
Cardinality estimates:
-heuristic: 6.6 QError: 1.21, RelError: -0.18, SelError: -7%
-heuristicIdx: 6.6 QError: 1.21, RelError: -0.18, SelError: -7%
-histogram: 8 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 8 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.21, RelError: -0.18, SelError: -7%
+heuristicIdx: 6.6
+QError: 1.21, RelError: -0.18, SelError: -7%
+histogram: 8
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 8
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1312,10 +1717,14 @@ histogramIdx: 8 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 9 QError: 4.5, RelError: 3.5, SelError: 35%
-heuristicIdx: 9 QError: 4.5, RelError: 3.5, SelError: 35%
-histogram: 2 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 4.5, RelError: 3.5, SelError: 35%
+heuristicIdx: 9
+QError: 4.5, RelError: 3.5, SelError: 35%
+histogram: 2
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1324,10 +1733,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 9 QError: 3, RelError: 2, SelError: 30%
-heuristicIdx: 9 QError: 3, RelError: 2, SelError: 30%
-histogram: 3 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 3, RelError: 2, SelError: 30%
+heuristicIdx: 9
+QError: 3, RelError: 2, SelError: 30%
+histogram: 3
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 3
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1336,10 +1749,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 6.6 QError: 6.6, RelError: 0.66, SelError: 33%
-heuristicIdx: 6.6 QError: 6.6, RelError: 0.66, SelError: 33%
-histogram: 0 QError: 0, RelError: 0, SelError: 0%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 6.6, RelError: 0.66, SelError: 33%
+heuristicIdx: 6.6
+QError: 6.6, RelError: 0.66, SelError: 33%
+histogram: 0
+QError: 0, RelError: 0, SelError: 0%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1348,10 +1765,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 6.6 QError: 2.2, RelError: 1.2, SelError: 18%
-heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18%
-histogram: 3 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 2.2, RelError: 1.2, SelError: 18%
+heuristicIdx: 6.6
+QError: 2.2, RelError: 1.2, SelError: 18%
+histogram: 3
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 3
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1360,10 +1781,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 4
Cardinality estimates:
-heuristic: 9 QError: 2.25, RelError: 1.25, SelError: 25%
-heuristicIdx: 9 QError: 2.25, RelError: 1.25, SelError: 25%
-histogram: 4 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 2.25, RelError: 1.25, SelError: 25%
+heuristicIdx: 9
+QError: 2.25, RelError: 1.25, SelError: 25%
+histogram: 4
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 4
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1372,10 +1797,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+heuristicIdx: 4.47
+QError: 4.47, RelError: 3.47, SelError: 17.35%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1384,10 +1813,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 8
Cardinality estimates:
-heuristic: 6.6 QError: 1.21, RelError: -0.18, SelError: -7%
-heuristicIdx: 6.6 QError: 1.21, RelError: -0.18, SelError: -7%
-histogram: 8 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 8 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.21, RelError: -0.18, SelError: -7%
+heuristicIdx: 6.6
+QError: 1.21, RelError: -0.18, SelError: -7%
+histogram: 8
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 8
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1396,10 +1829,14 @@ histogramIdx: 8 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 9
Cardinality estimates:
-heuristic: 6.6 QError: 1.36, RelError: -0.27, SelError: -12%
-heuristicIdx: 6.6 QError: 1.36, RelError: -0.27, SelError: -12%
-histogram: 9 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
+heuristic: 6.6
+QError: 1.36, RelError: -0.27, SelError: -12%
+heuristicIdx: 6.6
+QError: 1.36, RelError: -0.27, SelError: -12%
+histogram: 9
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 9
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1408,10 +1845,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 9 QError: 9, RelError: 8, SelError: 40%
-heuristicIdx: 9 QError: 9, RelError: 8, SelError: 40%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 9, RelError: 8, SelError: 40%
+heuristicIdx: 9
+QError: 9, RelError: 8, SelError: 40%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1420,10 +1861,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 9 QError: 4.5, RelError: 3.5, SelError: 35%
-heuristicIdx: 9 QError: 4.5, RelError: 3.5, SelError: 35%
-histogram: 2 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 9
+QError: 4.5, RelError: 3.5, SelError: 35%
+heuristicIdx: 9
+QError: 4.5, RelError: 3.5, SelError: 35%
+histogram: 2
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1432,10 +1877,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 4
Cardinality estimates:
-heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15%
-heuristicIdx: 6.6 QError: 1.65, RelError: 0.65, SelError: 13%
-histogram: 4.14 QError: 1.03, RelError: 0.03, SelError: 0.7%
-histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.11, RelError: 0.11, SelError: 2.15%
+heuristicIdx: 6.6
+QError: 1.65, RelError: 0.65, SelError: 13%
+histogram: 4.14
+QError: 1.03, RelError: 0.03, SelError: 0.7%
+histogramIdx: 4
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1444,10 +1893,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15%
-heuristicIdx: 0 QError: 0, RelError: 0, SelError: 0%
-histogram: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 0.44, SelError: 22.15%
+heuristicIdx: 0
+QError: 0, RelError: 0, SelError: 0%
+histogram: 0.32
+QError: 0.32, RelError: 0.03, SelError: 1.6%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1456,10 +1909,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23%
-histogram: 1.41 QError: 1.42, RelError: -0.3, SelError: -2.95%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+heuristicIdx: 6.6
+QError: 3.3, RelError: 2.3, SelError: 23%
+histogram: 1.41
+QError: 1.42, RelError: -0.3, SelError: -2.95%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1468,10 +1925,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 4
Cardinality estimates:
-heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15%
-heuristicIdx: 6.6 QError: 1.65, RelError: 0.65, SelError: 13%
-histogram: 3.79 QError: 1.06, RelError: -0.05, SelError: -1.05%
-histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.11, RelError: 0.11, SelError: 2.15%
+heuristicIdx: 6.6
+QError: 1.65, RelError: 0.65, SelError: 13%
+histogram: 3.79
+QError: 1.06, RelError: -0.05, SelError: -1.05%
+histogramIdx: 4
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1480,10 +1941,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23%
-histogram: 2.53 QError: 1.26, RelError: 0.26, SelError: 2.65%
-histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+heuristicIdx: 6.6
+QError: 3.3, RelError: 2.3, SelError: 23%
+histogram: 2.53
+QError: 1.26, RelError: 0.26, SelError: 2.65%
+histogramIdx: 2
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1492,10 +1957,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15%
-heuristicIdx: 0 QError: 0, RelError: 0, SelError: 0%
-histogram: 0.59 QError: 0.59, RelError: 0.06, SelError: 2.95%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 0.44, SelError: 22.15%
+heuristicIdx: 0
+QError: 0, RelError: 0, SelError: 0%
+histogram: 0.59
+QError: 0.59, RelError: 0.06, SelError: 2.95%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1504,10 +1973,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15%
-heuristicIdx: 0 QError: 0, RelError: 0, SelError: 0%
-histogram: 0.77 QError: 0.77, RelError: 0.08, SelError: 3.85%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 0.44, SelError: 22.15%
+heuristicIdx: 0
+QError: 0, RelError: 0, SelError: 0%
+histogram: 0.77
+QError: 0.77, RelError: 0.08, SelError: 3.85%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1516,10 +1989,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 6
Cardinality estimates:
-heuristic: 4.43 QError: 1.35, RelError: -0.26, SelError: -7.85%
-heuristicIdx: 6.6 QError: 1.1, RelError: 0.1, SelError: 3%
-histogram: 4.24 QError: 1.42, RelError: -0.29, SelError: -8.8%
-histogramIdx: 6 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.35, RelError: -0.26, SelError: -7.85%
+heuristicIdx: 6.6
+QError: 1.1, RelError: 0.1, SelError: 3%
+histogram: 4.24
+QError: 1.42, RelError: -0.29, SelError: -8.8%
+histogramIdx: 6
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1528,10 +2005,14 @@ histogramIdx: 6 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 5
Cardinality estimates:
-heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-heuristicIdx: 6.6 QError: 1.32, RelError: 0.32, SelError: 8%
-histogram: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85%
-histogramIdx: 5 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+heuristicIdx: 6.6
+QError: 1.32, RelError: 0.32, SelError: 8%
+histogram: 4.43
+QError: 1.13, RelError: -0.11, SelError: -2.85%
+histogramIdx: 5
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1540,10 +2021,14 @@ histogramIdx: 5 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15%
-heuristicIdx: 0 QError: 0, RelError: 0, SelError: 0%
-histogram: 1.77 QError: 1.77, RelError: 0.18, SelError: 8.85%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 0.44, SelError: 22.15%
+heuristicIdx: 0
+QError: 0, RelError: 0, SelError: 0%
+histogram: 1.77
+QError: 1.77, RelError: 0.18, SelError: 8.85%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1552,10 +2037,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15%
-heuristicIdx: 0 QError: 0, RelError: 0, SelError: 0%
-histogram: 1.5 QError: 1.5, RelError: 0.15, SelError: 7.5%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 4.43, RelError: 0.44, SelError: 22.15%
+heuristicIdx: 0
+QError: 0, RelError: 0, SelError: 0%
+histogram: 1.5
+QError: 1.5, RelError: 0.15, SelError: 7.5%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1564,10 +2053,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 9
Cardinality estimates:
-heuristic: 4.43 QError: 2.03, RelError: -0.51, SelError: -22.85%
-heuristicIdx: 6.6 QError: 1.36, RelError: -0.27, SelError: -12%
-histogram: 6.36 QError: 1.42, RelError: -0.29, SelError: -13.2%
-histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 2.03, RelError: -0.51, SelError: -22.85%
+heuristicIdx: 6.6
+QError: 1.36, RelError: -0.27, SelError: -12%
+histogram: 6.36
+QError: 1.42, RelError: -0.29, SelError: -13.2%
+histogramIdx: 9
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1576,10 +2069,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 2.11 QError: 2.11, RelError: 0.21, SelError: 10.55%
-heuristicIdx: 2.11 QError: 2.11, RelError: 0.21, SelError: 10.55%
-histogram: 0.22 QError: 0.22, RelError: 0.02, SelError: 1.1%
-histogramIdx: 0.22 QError: 0.22, RelError: 0.02, SelError: 1.1%
+heuristic: 2.11
+QError: 2.11, RelError: 0.21, SelError: 10.55%
+heuristicIdx: 2.11
+QError: 2.11, RelError: 0.21, SelError: 10.55%
+histogram: 0.22
+QError: 0.22, RelError: 0.02, SelError: 1.1%
+histogramIdx: 0.22
+QError: 0.22, RelError: 0.02, SelError: 1.1%
[jsTest] ----
@@ -1588,10 +2085,14 @@ histogramIdx: 0.22 QError: 0.22, RelError: 0.02, SelError: 1.1%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 2.1 QError: 2.1, RelError: 0.21, SelError: 10.5%
-heuristicIdx: 2.57 QError: 2.57, RelError: 0.26, SelError: 12.85%
-histogram: 0.59 QError: 0.59, RelError: 0.06, SelError: 2.95%
-histogramIdx: 0.22 QError: 0.22, RelError: 0.02, SelError: 1.1%
+heuristic: 2.1
+QError: 2.1, RelError: 0.21, SelError: 10.5%
+heuristicIdx: 2.57
+QError: 2.57, RelError: 0.26, SelError: 12.85%
+histogram: 0.59
+QError: 0.59, RelError: 0.06, SelError: 2.95%
+histogramIdx: 0.22
+QError: 0.22, RelError: 0.02, SelError: 1.1%
[jsTest] ----
@@ -1600,10 +2101,14 @@ histogramIdx: 0.22 QError: 0.22, RelError: 0.02, SelError: 1.1%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 2.57 QError: 2.57, RelError: 1.57, SelError: 7.85%
-heuristicIdx: 2.57 QError: 2.57, RelError: 1.57, SelError: 7.85%
-histogram: 1 QError: 1, RelError: 0, SelError: 0%
-histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
+heuristic: 2.57
+QError: 2.57, RelError: 1.57, SelError: 7.85%
+heuristicIdx: 2.57
+QError: 2.57, RelError: 1.57, SelError: 7.85%
+histogram: 1
+QError: 1, RelError: 0, SelError: 0%
+histogramIdx: 1
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1612,10 +2117,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 3 QError: 3, RelError: 0.3, SelError: 15%
-heuristicIdx: 3 QError: 3, RelError: 0.3, SelError: 15%
-histogram: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6%
-histogramIdx: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6%
+heuristic: 3
+QError: 3, RelError: 0.3, SelError: 15%
+heuristicIdx: 3
+QError: 3, RelError: 0.3, SelError: 15%
+histogram: 0.32
+QError: 0.32, RelError: 0.03, SelError: 1.6%
+histogramIdx: 0.32
+QError: 0.32, RelError: 0.03, SelError: 1.6%
[jsTest] ----
@@ -1624,10 +2133,14 @@ histogramIdx: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 2.57 QError: 1.29, RelError: 0.28, SelError: 2.85%
-heuristicIdx: 2.57 QError: 1.29, RelError: 0.28, SelError: 2.85%
-histogram: 1.9 QError: 1.05, RelError: -0.05, SelError: -0.5%
-histogramIdx: 1.9 QError: 1.05, RelError: -0.05, SelError: -0.5%
+heuristic: 2.57
+QError: 1.29, RelError: 0.28, SelError: 2.85%
+heuristicIdx: 2.57
+QError: 1.29, RelError: 0.28, SelError: 2.85%
+histogram: 1.9
+QError: 1.05, RelError: -0.05, SelError: -0.5%
+histogramIdx: 1.9
+QError: 1.05, RelError: -0.05, SelError: -0.5%
[jsTest] ----
@@ -1636,10 +2149,14 @@ histogramIdx: 1.9 QError: 1.05, RelError: -0.05, SelError: -0.5%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 3.11 QError: 1.04, RelError: 0.04, SelError: 0.55%
-heuristicIdx: 3.11 QError: 1.04, RelError: 0.04, SelError: 0.55%
-histogram: 4.62 QError: 1.54, RelError: 0.54, SelError: 8.1%
-histogramIdx: 4.62 QError: 1.54, RelError: 0.54, SelError: 8.1%
+heuristic: 3.11
+QError: 1.04, RelError: 0.04, SelError: 0.55%
+heuristicIdx: 3.11
+QError: 1.04, RelError: 0.04, SelError: 0.55%
+histogram: 4.62
+QError: 1.54, RelError: 0.54, SelError: 8.1%
+histogramIdx: 4.62
+QError: 1.54, RelError: 0.54, SelError: 8.1%
[jsTest] ----
@@ -1648,10 +2165,14 @@ histogramIdx: 4.62 QError: 1.54, RelError: 0.54, SelError: 8.1%
Actual cardinality: 1
Cardinality estimates:
-heuristic: 2.57 QError: 2.57, RelError: 1.57, SelError: 7.85%
-heuristicIdx: 2.57 QError: 2.57, RelError: 1.57, SelError: 7.85%
-histogram: 0.95 QError: 1.05, RelError: -0.05, SelError: -0.25%
-histogramIdx: 0.95 QError: 1.05, RelError: -0.05, SelError: -0.25%
+heuristic: 2.57
+QError: 2.57, RelError: 1.57, SelError: 7.85%
+heuristicIdx: 2.57
+QError: 2.57, RelError: 1.57, SelError: 7.85%
+histogram: 0.95
+QError: 1.05, RelError: -0.05, SelError: -0.25%
+histogramIdx: 0.95
+QError: 1.05, RelError: -0.05, SelError: -0.25%
[jsTest] ----
@@ -1660,10 +2181,14 @@ histogramIdx: 0.95 QError: 1.05, RelError: -0.05, SelError: -0.25%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 2.57 QError: 2.57, RelError: 0.26, SelError: 12.85%
-heuristicIdx: 2.57 QError: 2.57, RelError: 0.26, SelError: 12.85%
-histogram: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6%
-histogramIdx: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6%
+heuristic: 2.57
+QError: 2.57, RelError: 0.26, SelError: 12.85%
+heuristicIdx: 2.57
+QError: 2.57, RelError: 0.26, SelError: 12.85%
+histogram: 0.32
+QError: 0.32, RelError: 0.03, SelError: 1.6%
+histogramIdx: 0.32
+QError: 0.32, RelError: 0.03, SelError: 1.6%
[jsTest] ----
@@ -1672,10 +2197,14 @@ histogramIdx: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 2.1 QError: 2.1, RelError: 0.21, SelError: 10.5%
-heuristicIdx: 2.1 QError: 2.1, RelError: 0.21, SelError: 10.5%
-histogram: 0.65 QError: 0.65, RelError: 0.07, SelError: 3.25%
-histogramIdx: 0.65 QError: 0.65, RelError: 0.07, SelError: 3.25%
+heuristic: 2.1
+QError: 2.1, RelError: 0.21, SelError: 10.5%
+heuristicIdx: 2.1
+QError: 2.1, RelError: 0.21, SelError: 10.5%
+histogram: 0.65
+QError: 0.65, RelError: 0.07, SelError: 3.25%
+histogramIdx: 0.65
+QError: 0.65, RelError: 0.07, SelError: 3.25%
[jsTest] ----
@@ -1684,10 +2213,14 @@ histogramIdx: 0.65 QError: 0.65, RelError: 0.07, SelError: 3.25%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 1.21 QError: 1.21, RelError: 0.12, SelError: 6.05%
-heuristicIdx: 1.21 QError: 1.21, RelError: 0.12, SelError: 6.05%
-histogram: 0.08 QError: 0.08, RelError: 0.01, SelError: 0.4%
-histogramIdx: 0.08 QError: 0.08, RelError: 0.01, SelError: 0.4%
+heuristic: 1.21
+QError: 1.21, RelError: 0.12, SelError: 6.05%
+heuristicIdx: 1.21
+QError: 1.21, RelError: 0.12, SelError: 6.05%
+histogram: 0.08
+QError: 0.08, RelError: 0.01, SelError: 0.4%
+histogramIdx: 0.08
+QError: 0.08, RelError: 0.01, SelError: 0.4%
[jsTest] ----
@@ -1696,10 +2229,14 @@ histogramIdx: 0.08 QError: 0.08, RelError: 0.01, SelError: 0.4%
Actual cardinality: 6
Cardinality estimates:
-heuristic: 4.43 QError: 1.35, RelError: -0.26, SelError: -7.85%
-heuristicIdx: 6.6 QError: 1.1, RelError: 0.1, SelError: 3%
-histogram: 7.59 QError: 1.26, RelError: 0.26, SelError: 7.95%
-histogramIdx: 6 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.35, RelError: -0.26, SelError: -7.85%
+heuristicIdx: 6.6
+QError: 1.1, RelError: 0.1, SelError: 3%
+histogram: 7.59
+QError: 1.26, RelError: 0.26, SelError: 7.95%
+histogramIdx: 6
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1708,10 +2245,14 @@ histogramIdx: 6 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 2.81 QError: 1.07, RelError: -0.06, SelError: -0.95%
-heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18%
-histogram: 7.09 QError: 2.36, RelError: 1.36, SelError: 20.45%
-histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
+heuristic: 2.81
+QError: 1.07, RelError: -0.06, SelError: -0.95%
+heuristicIdx: 6.6
+QError: 2.2, RelError: 1.2, SelError: 18%
+histogram: 7.09
+QError: 2.36, RelError: 1.36, SelError: 20.45%
+histogramIdx: 3
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1720,10 +2261,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 16
Cardinality estimates:
-heuristic: 11 QError: 1.45, RelError: -0.31, SelError: -25%
-heuristicIdx: 11 QError: 1.45, RelError: -0.31, SelError: -25%
-histogram: 13.55 QError: 1.18, RelError: -0.15, SelError: -12.25%
-histogramIdx: 13.55 QError: 1.18, RelError: -0.15, SelError: -12.25%
+heuristic: 11
+QError: 1.45, RelError: -0.31, SelError: -25%
+heuristicIdx: 11
+QError: 1.45, RelError: -0.31, SelError: -25%
+histogram: 13.55
+QError: 1.18, RelError: -0.15, SelError: -12.25%
+histogramIdx: 13.55
+QError: 1.18, RelError: -0.15, SelError: -12.25%
[jsTest] ----
@@ -1732,10 +2277,14 @@ histogramIdx: 13.55 QError: 1.18, RelError: -0.15, SelError: -12.25%
Actual cardinality: 14
Cardinality estimates:
-heuristic: 10.31 QError: 1.36, RelError: -0.26, SelError: -18.45%
-heuristicIdx: 10.31 QError: 1.36, RelError: -0.26, SelError: -18.45%
-histogram: 13.18 QError: 1.06, RelError: -0.06, SelError: -4.1%
-histogramIdx: 13.18 QError: 1.06, RelError: -0.06, SelError: -4.1%
+heuristic: 10.31
+QError: 1.36, RelError: -0.26, SelError: -18.45%
+heuristicIdx: 10.31
+QError: 1.36, RelError: -0.26, SelError: -18.45%
+histogram: 13.18
+QError: 1.06, RelError: -0.06, SelError: -4.1%
+histogramIdx: 13.18
+QError: 1.06, RelError: -0.06, SelError: -4.1%
[jsTest] ----
@@ -1744,10 +2293,14 @@ histogramIdx: 13.18 QError: 1.06, RelError: -0.06, SelError: -4.1%
Actual cardinality: 12
Cardinality estimates:
-heuristic: 11.81 QError: 1.02, RelError: -0.02, SelError: -0.95%
-heuristicIdx: 11.81 QError: 1.02, RelError: -0.02, SelError: -0.95%
-histogram: 8.83 QError: 1.36, RelError: -0.26, SelError: -15.85%
-histogramIdx: 8.83 QError: 1.36, RelError: -0.26, SelError: -15.85%
+heuristic: 11.81
+QError: 1.02, RelError: -0.02, SelError: -0.95%
+heuristicIdx: 11.81
+QError: 1.02, RelError: -0.02, SelError: -0.95%
+histogram: 8.83
+QError: 1.36, RelError: -0.26, SelError: -15.85%
+histogramIdx: 8.83
+QError: 1.36, RelError: -0.26, SelError: -15.85%
[jsTest] ----
@@ -1756,10 +2309,14 @@ histogramIdx: 8.83 QError: 1.36, RelError: -0.26, SelError: -15.85%
Actual cardinality: 6
Cardinality estimates:
-heuristic: 3.36 QError: 1.79, RelError: -0.44, SelError: -13.2%
-heuristicIdx: 9.03 QError: 1.51, RelError: 0.5, SelError: 15.15%
-histogram: 8.88 QError: 1.48, RelError: 0.48, SelError: 14.4%
-histogramIdx: 4.82 QError: 1.24, RelError: -0.2, SelError: -5.9%
+heuristic: 3.36
+QError: 1.79, RelError: -0.44, SelError: -13.2%
+heuristicIdx: 9.03
+QError: 1.51, RelError: 0.5, SelError: 15.15%
+histogram: 8.88
+QError: 1.48, RelError: 0.48, SelError: 14.4%
+histogramIdx: 4.82
+QError: 1.24, RelError: -0.2, SelError: -5.9%
[jsTest] ----
@@ -1768,10 +2325,14 @@ histogramIdx: 4.82 QError: 1.24, RelError: -0.2, SelError: -5.9%
Actual cardinality: 9
Cardinality estimates:
-heuristic: 4.43 QError: 2.03, RelError: -0.51, SelError: -22.85%
-heuristicIdx: 6.6 QError: 1.36, RelError: -0.27, SelError: -12%
-histogram: 10.44 QError: 1.16, RelError: 0.16, SelError: 7.2%
-histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 2.03, RelError: -0.51, SelError: -22.85%
+heuristicIdx: 6.6
+QError: 1.36, RelError: -0.27, SelError: -12%
+histogram: 10.44
+QError: 1.16, RelError: 0.16, SelError: 7.2%
+histogramIdx: 9
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1780,10 +2341,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 3
Cardinality estimates:
-heuristic: 2.81 QError: 1.07, RelError: -0.06, SelError: -0.95%
-heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18%
-histogram: 7.64 QError: 2.55, RelError: 1.55, SelError: 23.2%
-histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
+heuristic: 2.81
+QError: 1.07, RelError: -0.06, SelError: -0.95%
+heuristicIdx: 6.6
+QError: 2.2, RelError: 1.2, SelError: 18%
+histogram: 7.64
+QError: 2.55, RelError: 1.55, SelError: 23.2%
+histogramIdx: 3
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1792,10 +2357,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 13
Cardinality estimates:
-heuristic: 11 QError: 1.18, RelError: -0.15, SelError: -10%
-heuristicIdx: 11 QError: 1.18, RelError: -0.15, SelError: -10%
-histogram: 10.78 QError: 1.21, RelError: -0.17, SelError: -11.1%
-histogramIdx: 10.78 QError: 1.21, RelError: -0.17, SelError: -11.1%
+heuristic: 11
+QError: 1.18, RelError: -0.15, SelError: -10%
+heuristicIdx: 11
+QError: 1.18, RelError: -0.15, SelError: -10%
+histogram: 10.78
+QError: 1.21, RelError: -0.17, SelError: -11.1%
+histogramIdx: 10.78
+QError: 1.21, RelError: -0.17, SelError: -11.1%
[jsTest] ----
@@ -1804,10 +2373,14 @@ histogramIdx: 10.78 QError: 1.21, RelError: -0.17, SelError: -11.1%
Actual cardinality: 11
Cardinality estimates:
-heuristic: 10.31 QError: 1.07, RelError: -0.06, SelError: -3.45%
-heuristicIdx: 10.31 QError: 1.07, RelError: -0.06, SelError: -3.45%
-histogram: 10.25 QError: 1.07, RelError: -0.07, SelError: -3.75%
-histogramIdx: 10.25 QError: 1.07, RelError: -0.07, SelError: -3.75%
+heuristic: 10.31
+QError: 1.07, RelError: -0.06, SelError: -3.45%
+heuristicIdx: 10.31
+QError: 1.07, RelError: -0.06, SelError: -3.45%
+histogram: 10.25
+QError: 1.07, RelError: -0.07, SelError: -3.75%
+histogramIdx: 10.25
+QError: 1.07, RelError: -0.07, SelError: -3.75%
[jsTest] ----
@@ -1816,10 +2389,14 @@ histogramIdx: 10.25 QError: 1.07, RelError: -0.07, SelError: -3.75%
Actual cardinality: 7
Cardinality estimates:
-heuristic: 11.81 QError: 1.69, RelError: 0.69, SelError: 24.05%
-heuristicIdx: 11.81 QError: 1.69, RelError: 0.69, SelError: 24.05%
-histogram: 4.18 QError: 1.67, RelError: -0.4, SelError: -14.1%
-histogramIdx: 4.18 QError: 1.67, RelError: -0.4, SelError: -14.1%
+heuristic: 11.81
+QError: 1.69, RelError: 0.69, SelError: 24.05%
+heuristicIdx: 11.81
+QError: 1.69, RelError: 0.69, SelError: 24.05%
+histogram: 4.18
+QError: 1.67, RelError: -0.4, SelError: -14.1%
+histogramIdx: 4.18
+QError: 1.67, RelError: -0.4, SelError: -14.1%
[jsTest] ----
@@ -1828,10 +2405,14 @@ histogramIdx: 4.18 QError: 1.67, RelError: -0.4, SelError: -14.1%
Actual cardinality: 11
Cardinality estimates:
-heuristic: 3.36 QError: 3.27, RelError: -0.69, SelError: -38.2%
-heuristicIdx: 9.03 QError: 1.22, RelError: -0.18, SelError: -9.85%
-histogram: 10.57 QError: 1.04, RelError: -0.04, SelError: -2.15%
-histogramIdx: 8.37 QError: 1.31, RelError: -0.24, SelError: -13.15%
+heuristic: 3.36
+QError: 3.27, RelError: -0.69, SelError: -38.2%
+heuristicIdx: 9.03
+QError: 1.22, RelError: -0.18, SelError: -9.85%
+histogram: 10.57
+QError: 1.04, RelError: -0.04, SelError: -2.15%
+histogramIdx: 8.37
+QError: 1.31, RelError: -0.24, SelError: -13.15%
[jsTest] ----
@@ -1840,10 +2421,14 @@ histogramIdx: 8.37 QError: 1.31, RelError: -0.24, SelError: -13.15%
Actual cardinality: 4
Cardinality estimates:
-heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15%
-heuristicIdx: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15%
-histogram: 5.69 QError: 1.42, RelError: 0.42, SelError: 8.45%
-histogramIdx: 5.69 QError: 1.42, RelError: 0.42, SelError: 8.45%
+heuristic: 4.43
+QError: 1.11, RelError: 0.11, SelError: 2.15%
+heuristicIdx: 4.43
+QError: 1.11, RelError: 0.11, SelError: 2.15%
+histogram: 5.69
+QError: 1.42, RelError: 0.42, SelError: 8.45%
+histogramIdx: 5.69
+QError: 1.42, RelError: 0.42, SelError: 8.45%
[jsTest] ----
@@ -1852,10 +2437,14 @@ histogramIdx: 5.69 QError: 1.42, RelError: 0.42, SelError: 8.45%
Actual cardinality: 8
Cardinality estimates:
-heuristic: 2.81 QError: 2.85, RelError: -0.65, SelError: -25.95%
-heuristicIdx: 2.81 QError: 2.85, RelError: -0.65, SelError: -25.95%
-histogram: 8.04 QError: 1, RelError: 0, SelError: 0.2%
-histogramIdx: 8.04 QError: 1, RelError: 0, SelError: 0.2%
+heuristic: 2.81
+QError: 2.85, RelError: -0.65, SelError: -25.95%
+heuristicIdx: 2.81
+QError: 2.85, RelError: -0.65, SelError: -25.95%
+histogram: 8.04
+QError: 1, RelError: 0, SelError: 0.2%
+histogramIdx: 8.04
+QError: 1, RelError: 0, SelError: 0.2%
[jsTest] ----
@@ -1864,10 +2453,14 @@ histogramIdx: 8.04 QError: 1, RelError: 0, SelError: 0.2%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-heuristicIdx: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15%
-histogram: 3.79 QError: 1.9, RelError: 0.9, SelError: 8.95%
-histogramIdx: 3.79 QError: 1.9, RelError: 0.9, SelError: 8.95%
+heuristic: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+heuristicIdx: 4.43
+QError: 2.22, RelError: 1.21, SelError: 12.15%
+histogram: 3.79
+QError: 1.9, RelError: 0.9, SelError: 8.95%
+histogramIdx: 3.79
+QError: 1.9, RelError: 0.9, SelError: 8.95%
[jsTest] ----
@@ -1876,10 +2469,14 @@ histogramIdx: 3.79 QError: 1.9, RelError: 0.9, SelError: 8.95%
Actual cardinality: 2
Cardinality estimates:
-heuristic: 2.57 QError: 1.29, RelError: 0.28, SelError: 2.85%
-heuristicIdx: 2.57 QError: 1.29, RelError: 0.28, SelError: 2.85%
-histogram: 1.79 QError: 1.12, RelError: -0.1, SelError: -1.05%
-histogramIdx: 1.79 QError: 1.12, RelError: -0.1, SelError: -1.05%
+heuristic: 2.57
+QError: 1.29, RelError: 0.28, SelError: 2.85%
+heuristicIdx: 2.57
+QError: 1.29, RelError: 0.28, SelError: 2.85%
+histogram: 1.79
+QError: 1.12, RelError: -0.1, SelError: -1.05%
+histogramIdx: 1.79
+QError: 1.12, RelError: -0.1, SelError: -1.05%
[jsTest] ----
@@ -1888,10 +2485,14 @@ histogramIdx: 1.79 QError: 1.12, RelError: -0.1, SelError: -1.05%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 2.81 QError: 2.81, RelError: 0.28, SelError: 14.05%
-heuristicIdx: 2.81 QError: 2.81, RelError: 0.28, SelError: 14.05%
-histogram: 4.04 QError: 4.04, RelError: 0.4, SelError: 20.2%
-histogramIdx: 4.04 QError: 4.04, RelError: 0.4, SelError: 20.2%
+heuristic: 2.81
+QError: 2.81, RelError: 0.28, SelError: 14.05%
+heuristicIdx: 2.81
+QError: 2.81, RelError: 0.28, SelError: 14.05%
+histogram: 4.04
+QError: 4.04, RelError: 0.4, SelError: 20.2%
+histogramIdx: 4.04
+QError: 4.04, RelError: 0.4, SelError: 20.2%
[jsTest] ----
@@ -1900,10 +2501,14 @@ histogramIdx: 4.04 QError: 4.04, RelError: 0.4, SelError: 20.2%
Actual cardinality: 20
Cardinality estimates:
-heuristic: 11 QError: 1.82, RelError: -0.45, SelError: -45%
-heuristicIdx: 11 QError: 1.82, RelError: -0.45, SelError: -45%
-histogram: 18.21 QError: 1.1, RelError: -0.09, SelError: -8.95%
-histogramIdx: 18.21 QError: 1.1, RelError: -0.09, SelError: -8.95%
+heuristic: 11
+QError: 1.82, RelError: -0.45, SelError: -45%
+heuristicIdx: 11
+QError: 1.82, RelError: -0.45, SelError: -45%
+histogram: 18.21
+QError: 1.1, RelError: -0.09, SelError: -8.95%
+histogramIdx: 18.21
+QError: 1.1, RelError: -0.09, SelError: -8.95%
[jsTest] ----
@@ -1912,10 +2517,14 @@ histogramIdx: 18.21 QError: 1.1, RelError: -0.09, SelError: -8.95%
Actual cardinality: 18
Cardinality estimates:
-heuristic: 10.31 QError: 1.75, RelError: -0.43, SelError: -38.45%
-heuristicIdx: 10.31 QError: 1.75, RelError: -0.43, SelError: -38.45%
-histogram: 18.1 QError: 1.01, RelError: 0.01, SelError: 0.5%
-histogramIdx: 18.1 QError: 1.01, RelError: 0.01, SelError: 0.5%
+heuristic: 10.31
+QError: 1.75, RelError: -0.43, SelError: -38.45%
+heuristicIdx: 10.31
+QError: 1.75, RelError: -0.43, SelError: -38.45%
+histogram: 18.1
+QError: 1.01, RelError: 0.01, SelError: 0.5%
+histogramIdx: 18.1
+QError: 1.01, RelError: 0.01, SelError: 0.5%
[jsTest] ----
@@ -1924,10 +2533,14 @@ histogramIdx: 18.1 QError: 1.01, RelError: 0.01, SelError: 0.5%
Actual cardinality: 18
Cardinality estimates:
-heuristic: 11.81 QError: 1.52, RelError: -0.34, SelError: -30.95%
-heuristicIdx: 11.81 QError: 1.52, RelError: -0.34, SelError: -30.95%
-histogram: 10.74 QError: 1.68, RelError: -0.4, SelError: -36.3%
-histogramIdx: 10.74 QError: 1.68, RelError: -0.4, SelError: -36.3%
+heuristic: 11.81
+QError: 1.52, RelError: -0.34, SelError: -30.95%
+heuristicIdx: 11.81
+QError: 1.52, RelError: -0.34, SelError: -30.95%
+histogram: 10.74
+QError: 1.68, RelError: -0.4, SelError: -36.3%
+histogramIdx: 10.74
+QError: 1.68, RelError: -0.4, SelError: -36.3%
[jsTest] ----
@@ -1936,10 +2549,14 @@ histogramIdx: 10.74 QError: 1.68, RelError: -0.4, SelError: -36.3%
Actual cardinality: 7
Cardinality estimates:
-heuristic: 3.36 QError: 2.08, RelError: -0.52, SelError: -18.2%
-heuristicIdx: 3.36 QError: 2.08, RelError: -0.52, SelError: -18.2%
-histogram: 9.45 QError: 1.35, RelError: 0.35, SelError: 12.25%
-histogramIdx: 9.45 QError: 1.35, RelError: 0.35, SelError: 12.25%
+heuristic: 3.36
+QError: 2.08, RelError: -0.52, SelError: -18.2%
+heuristicIdx: 3.36
+QError: 2.08, RelError: -0.52, SelError: -18.2%
+histogram: 9.45
+QError: 1.35, RelError: 0.35, SelError: 12.25%
+histogramIdx: 9.45
+QError: 1.35, RelError: 0.35, SelError: 12.25%
[jsTest] ----
@@ -1948,10 +2565,14 @@ histogramIdx: 9.45 QError: 1.35, RelError: 0.35, SelError: 12.25%
Actual cardinality: 6
Cardinality estimates:
-heuristic: 4.43 QError: 1.35, RelError: -0.26, SelError: -7.85%
-heuristicIdx: 6.6 QError: 1.1, RelError: 0.1, SelError: 3%
-histogram: 5.06 QError: 1.19, RelError: -0.16, SelError: -4.7%
-histogramIdx: 6 QError: 1, RelError: 0, SelError: 0%
+heuristic: 4.43
+QError: 1.35, RelError: -0.26, SelError: -7.85%
+heuristicIdx: 6.6
+QError: 1.1, RelError: 0.1, SelError: 3%
+histogram: 5.06
+QError: 1.19, RelError: -0.16, SelError: -4.7%
+histogramIdx: 6
+QError: 1, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1960,10 +2581,14 @@ histogramIdx: 6 QError: 1, RelError: 0, SelError: 0%
Actual cardinality: 0
Cardinality estimates:
-heuristic: 2.81 QError: 2.81, RelError: 0.28, SelError: 14.05%
-heuristicIdx: 0 QError: 0, RelError: 0, SelError: 0%
-histogram: 0 QError: 0, RelError: 0, SelError: 0%
-histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
+heuristic: 2.81
+QError: 2.81, RelError: 0.28, SelError: 14.05%
+heuristicIdx: 0
+QError: 0, RelError: 0, SelError: 0%
+histogram: 0
+QError: 0, RelError: 0, SelError: 0%
+histogramIdx: 0
+QError: 0, RelError: 0, SelError: 0%
[jsTest] ----
@@ -1972,10 +2597,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0%
Actual cardinality: 6
Cardinality estimates:
-heuristic: 11 QError: 1.83, RelError: 0.83, SelError: 25%
-heuristicIdx: 11 QError: 1.83, RelError: 0.83, SelError: 25%
-histogram: 4.33 QError: 1.39, RelError: -0.28, SelError: -8.35%
-histogramIdx: 4.33 QError: 1.39, RelError: -0.28, SelError: -8.35%
+heuristic: 11
+QError: 1.83, RelError: 0.83, SelError: 25%
+heuristicIdx: 11
+QError: 1.83, RelError: 0.83, SelError: 25%
+histogram: 4.33
+QError: 1.39, RelError: -0.28, SelError: -8.35%
+histogramIdx: 4.33
+QError: 1.39, RelError: -0.28, SelError: -8.35%
[jsTest] ----
@@ -1984,10 +2613,14 @@ histogramIdx: 4.33 QError: 1.39, RelError: -0.28, SelError: -8.35%
Actual cardinality: 4
Cardinality estimates:
-heuristic: 10.31 QError: 2.58, RelError: 1.58, SelError: 31.55%
-heuristicIdx: 10.31 QError: 2.58, RelError: 1.58, SelError: 31.55%
-histogram: 3.43 QError: 1.17, RelError: -0.14, SelError: -2.85%
-histogramIdx: 3.43 QError: 1.17, RelError: -0.14, SelError: -2.85%
+heuristic: 10.31
+QError: 2.58, RelError: 1.58, SelError: 31.55%
+heuristicIdx: 10.31
+QError: 2.58, RelError: 1.58, SelError: 31.55%
+histogram: 3.43
+QError: 1.17, RelError: -0.14, SelError: -2.85%
+histogramIdx: 3.43
+QError: 1.17, RelError: -0.14, SelError: -2.85%
[jsTest] ----
@@ -1996,10 +2629,14 @@ histogramIdx: 3.43 QError: 1.17, RelError: -0.14, SelError: -2.85%
Actual cardinality: 12
Cardinality estimates:
-heuristic: 11.81 QError: 1.02, RelError: -0.02, SelError: -0.95%
-heuristicIdx: 11.81 QError: 1.02, RelError: -0.02, SelError: -0.95%
-histogram: 8.83 QError: 1.36, RelError: -0.26, SelError: -15.85%
-histogramIdx: 8.83 QError: 1.36, RelError: -0.26, SelError: -15.85%
+heuristic: 11.81
+QError: 1.02, RelError: -0.02, SelError: -0.95%
+heuristicIdx: 11.81
+QError: 1.02, RelError: -0.02, SelError: -0.95%
+histogram: 8.83
+QError: 1.36, RelError: -0.26, SelError: -15.85%
+histogramIdx: 8.83
+QError: 1.36, RelError: -0.26, SelError: -15.85%
[jsTest] ----
@@ -2008,10 +2645,14 @@ histogramIdx: 8.83 QError: 1.36, RelError: -0.26, SelError: -15.85%
Actual cardinality: 6
Cardinality estimates:
-heuristic: 4.88 QError: 1.23, RelError: -0.19, SelError: -5.6%
-heuristicIdx: 9.03 QError: 1.51, RelError: 0.5, SelError: 15.15%
-histogram: 4.24 QError: 1.42, RelError: -0.29, SelError: -8.8%
-histogramIdx: 4.82 QError: 1.24, RelError: -0.2, SelError: -5.9%
+heuristic: 4.88
+QError: 1.23, RelError: -0.19, SelError: -5.6%
+heuristicIdx: 9.03
+QError: 1.51, RelError: 0.5, SelError: 15.15%
+histogram: 4.24
+QError: 1.42, RelError: -0.29, SelError: -8.8%
+histogramIdx: 4.82
+QError: 1.24, RelError: -0.2, SelError: -5.9%
[jsTest] ----
@@ -2240,50 +2881,70 @@ histogramIdx:
[jsTest] Mean errors per strategy for predicate { "dtype" : { "$ne" : "array" } }::
[jsTest] ----
-heuristic: { "RMSQError" : 4.022, "RMSRelError" : 2.698, "meanSelError" : 21.329 }
-heuristicIdx: { "RMSQError" : 4.524, "RMSRelError" : 3.231, "meanSelError" : 23.669 }
-histogram: { "RMSQError" : 2.156, "RMSRelError" : 1.351, "meanSelError" : 4.712 }
-histogramIdx: { "RMSQError" : 0.92, "RMSRelError" : 0, "meanSelError" : 0 }
+heuristic:
+{ "RMSQError" : 4.022, "RMSRelError" : 2.698, "meanSelError" : 21.329 }
+heuristicIdx:
+{ "RMSQError" : 4.524, "RMSRelError" : 3.231, "meanSelError" : 23.669 }
+histogram:
+{ "RMSQError" : 2.156, "RMSRelError" : 1.351, "meanSelError" : 4.712 }
+histogramIdx:
+{ "RMSQError" : 0.92, "RMSRelError" : 0, "meanSelError" : 0 }
[jsTest] ----
[jsTest] Mean errors per strategy for predicate { "dtype" : "array" }::
[jsTest] ----
-heuristic: { "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 }
-heuristicIdx: { "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 }
-histogram: { "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 }
-histogramIdx: { "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 }
+heuristic:
+{ "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 }
+heuristicIdx:
+{ "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 }
+histogram:
+{ "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 }
+histogramIdx:
+{ "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 }
[jsTest] ----
[jsTest] Mean errors per strategy for predicate { "$and" : [ { "elemMatch" : true }, { "dtype" : "array" } ] }::
[jsTest] ----
-heuristic: { "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 }
-heuristicIdx: { "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 }
-histogram: { "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 }
-histogramIdx: { "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 }
+heuristic:
+{ "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 }
+heuristicIdx:
+{ "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 }
+histogram:
+{ "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 }
+histogramIdx:
+{ "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 }
[jsTest] ----
[jsTest] Mean errors per strategy for predicate { "$and" : [ { "elemMatch" : false }, { "dtype" : "array" } ] }::
[jsTest] ----
-heuristic: { "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 }
-heuristicIdx: { "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 }
-histogram: { "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 }
-histogramIdx: { "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 }
+heuristic:
+{ "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 }
+heuristicIdx:
+{ "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 }
+histogram:
+{ "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 }
+histogramIdx:
+{ "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 }
[jsTest] ----
[jsTest] Mean errors per strategy for all queries:
[jsTest] ----
-heuristic: { "RMSQError" : 3.385, "RMSRelError" : 2.192, "meanSelError" : 19.222 }
-heuristicIdx: { "RMSQError" : 3.766, "RMSRelError" : 2.61, "meanSelError" : 20.706 }
-histogram: { "RMSQError" : 1.887, "RMSRelError" : 1.118, "meanSelError" : 4.41 }
-histogramIdx: { "RMSQError" : 1.072, "RMSRelError" : 0.303, "meanSelError" : 1.422 }
+heuristic:
+{ "RMSQError" : 3.385, "RMSRelError" : 2.192, "meanSelError" : 19.222 }
+heuristicIdx:
+{ "RMSQError" : 3.766, "RMSRelError" : 2.61, "meanSelError" : 20.706 }
+histogram:
+{ "RMSQError" : 1.887, "RMSRelError" : 1.118, "meanSelError" : 4.41 }
+histogramIdx:
+{ "RMSQError" : 1.072, "RMSRelError" : 0.303, "meanSelError" : 1.422 }
[jsTest] ----
@@ -2494,10 +3155,14 @@ histogramIdx:
[jsTest] Mean errors per strategy for all queries:
[jsTest] ----
-heuristic: { "RMSQError" : 1.931, "RMSRelError" : 0.597, "meanSelError" : 14.195 }
-heuristicIdx: { "RMSQError" : 1.851, "RMSRelError" : 0.648, "meanSelError" : 13.789 }
-histogram: { "RMSQError" : 1.417, "RMSRelError" : 0.432, "meanSelError" : 7.936 }
-histogramIdx: { "RMSQError" : 1.295, "RMSRelError" : 0.252, "meanSelError" : 6.159 }
+heuristic:
+{ "RMSQError" : 1.931, "RMSRelError" : 0.597, "meanSelError" : 14.195 }
+heuristicIdx:
+{ "RMSQError" : 1.851, "RMSRelError" : 0.648, "meanSelError" : 13.789 }
+histogram:
+{ "RMSQError" : 1.417, "RMSRelError" : 0.432, "meanSelError" : 7.936 }
+histogramIdx:
+{ "RMSQError" : 1.295, "RMSRelError" : 0.252, "meanSelError" : 6.159 }
[jsTest] ----
@@ -2562,11 +3227,18 @@ cardinality: 7, histogramIdx estimation: 9.45, errors: { "qError" : 1.35, "rel
[jsTest] Mean errors per strategy for all queries:
[jsTest] ----
-heuristic: { "RMSQError" : 3.11, "RMSRelError" : 1.943, "meanSelError" : 18.059 }
-heuristicIdx: { "RMSQError" : 3.42, "RMSRelError" : 2.31, "meanSelError" : 19.106 }
-histogram: { "RMSQError" : 1.789, "RMSRelError" : 1.002, "meanSelError" : 5.226 }
-histogramIdx: { "RMSQError" : 1.127, "RMSRelError" : 0.292, "meanSelError" : 2.518 }
-===============================================================================Errors excluding empty queries.Non-empty simple error entries: 111; complex error entries: 29
+heuristic:
+{ "RMSQError" : 3.11, "RMSRelError" : 1.943, "meanSelError" : 18.059 }
+heuristicIdx:
+{ "RMSQError" : 3.42, "RMSRelError" : 2.31, "meanSelError" : 19.106 }
+histogram:
+{ "RMSQError" : 1.789, "RMSRelError" : 1.002, "meanSelError" : 5.226 }
+histogramIdx:
+{ "RMSQError" : 1.127, "RMSRelError" : 0.292, "meanSelError" : 2.518 }
+===============================================================================
+Errors excluding empty queries.
+Non-empty simple error entries: 111; complex error entries: 29
+
[jsTest] ----
[jsTest] Aggregate errors for all simple predicate queries
@@ -2794,50 +3466,70 @@ histogramIdx:
[jsTest] Mean errors per strategy for predicate { "dtype" : { "$ne" : "array" } }::
[jsTest] ----
-heuristic: { "RMSQError" : 3.775, "RMSRelError" : 2.924, "meanSelError" : 20.669 }
-heuristicIdx: { "RMSQError" : 4.359, "RMSRelError" : 3.505, "meanSelError" : 24.29 }
-histogram: { "RMSQError" : 2.071, "RMSRelError" : 1.465, "meanSelError" : 4.164 }
-histogramIdx: { "RMSQError" : 1, "RMSRelError" : 0, "meanSelError" : 0 }
+heuristic:
+{ "RMSQError" : 3.775, "RMSRelError" : 2.924, "meanSelError" : 20.669 }
+heuristicIdx:
+{ "RMSQError" : 4.359, "RMSRelError" : 3.505, "meanSelError" : 24.29 }
+histogram:
+{ "RMSQError" : 2.071, "RMSRelError" : 1.465, "meanSelError" : 4.164 }
+histogramIdx:
+{ "RMSQError" : 1, "RMSRelError" : 0, "meanSelError" : 0 }
[jsTest] ----
[jsTest] Mean errors per strategy for predicate { "dtype" : "array" }::
[jsTest] ----
-heuristic: { "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 }
-heuristicIdx: { "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 }
-histogram: { "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 }
-histogramIdx: { "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 }
+heuristic:
+{ "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 }
+heuristicIdx:
+{ "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 }
+histogram:
+{ "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 }
+histogramIdx:
+{ "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 }
[jsTest] ----
[jsTest] Mean errors per strategy for predicate { "$and" : [ { "elemMatch" : true }, { "dtype" : "array" } ] }::
[jsTest] ----
-heuristic: { "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 }
-heuristicIdx: { "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 }
-histogram: { "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 }
-histogramIdx: { "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 }
+heuristic:
+{ "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 }
+heuristicIdx:
+{ "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 }
+histogram:
+{ "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 }
+histogramIdx:
+{ "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 }
[jsTest] ----
[jsTest] Mean errors per strategy for predicate { "$and" : [ { "elemMatch" : false }, { "dtype" : "array" } ] }::
[jsTest] ----
-heuristic: { "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 }
-heuristicIdx: { "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 }
-histogram: { "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 }
-histogramIdx: { "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 }
+heuristic:
+{ "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 }
+heuristicIdx:
+{ "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 }
+histogram:
+{ "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 }
+histogramIdx:
+{ "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 }
[jsTest] ----
[jsTest] Mean errors per strategy for all queries:
[jsTest] ----
-heuristic: { "RMSQError" : 3.132, "RMSRelError" : 2.301, "meanSelError" : 18.602 }
-heuristicIdx: { "RMSQError" : 3.554, "RMSRelError" : 2.742, "meanSelError" : 20.755 }
-histogram: { "RMSQError" : 1.797, "RMSRelError" : 1.174, "meanSelError" : 4.052 }
-histogramIdx: { "RMSQError" : 1.128, "RMSRelError" : 0.319, "meanSelError" : 1.576 }
+heuristic:
+{ "RMSQError" : 3.132, "RMSRelError" : 2.301, "meanSelError" : 18.602 }
+heuristicIdx:
+{ "RMSQError" : 3.554, "RMSRelError" : 2.742, "meanSelError" : 20.755 }
+histogram:
+{ "RMSQError" : 1.797, "RMSRelError" : 1.174, "meanSelError" : 4.052 }
+histogramIdx:
+{ "RMSQError" : 1.128, "RMSRelError" : 0.319, "meanSelError" : 1.576 }
[jsTest] ----
@@ -3040,10 +3732,14 @@ histogramIdx:
[jsTest] Mean errors per strategy for all queries:
[jsTest] ----
-heuristic: { "RMSQError" : 1.779, "RMSRelError" : 0.662, "meanSelError" : 14.884 }
-heuristicIdx: { "RMSQError" : 1.726, "RMSRelError" : 0.722, "meanSelError" : 14.771 }
-histogram: { "RMSQError" : 1.401, "RMSRelError" : 0.482, "meanSelError" : 9.053 }
-histogramIdx: { "RMSQError" : 1.246, "RMSRelError" : 0.274, "meanSelError" : 6.85 }
+heuristic:
+{ "RMSQError" : 1.779, "RMSRelError" : 0.662, "meanSelError" : 14.884 }
+heuristicIdx:
+{ "RMSQError" : 1.726, "RMSRelError" : 0.722, "meanSelError" : 14.771 }
+histogram:
+{ "RMSQError" : 1.401, "RMSRelError" : 0.482, "meanSelError" : 9.053 }
+histogramIdx:
+{ "RMSQError" : 1.246, "RMSRelError" : 0.274, "meanSelError" : 6.85 }
[jsTest] ----
@@ -3098,10 +3794,14 @@ cardinality: 11, histogramIdx estimation: 8.37, errors: { "qError" : 1.31, "re
[jsTest] Mean errors per strategy for all queries:
[jsTest] ----
-heuristic: { "RMSQError" : 2.904, "RMSRelError" : 2.071, "meanSelError" : 17.832 }
-heuristicIdx: { "RMSQError" : 3.261, "RMSRelError" : 2.463, "meanSelError" : 19.515 }
-histogram: { "RMSQError" : 1.722, "RMSRelError" : 1.068, "meanSelError" : 5.088 }
-histogramIdx: { "RMSQError" : 1.154, "RMSRelError" : 0.31, "meanSelError" : 2.669 }
+heuristic:
+{ "RMSQError" : 2.904, "RMSRelError" : 2.071, "meanSelError" : 17.832 }
+heuristicIdx:
+{ "RMSQError" : 3.261, "RMSRelError" : 2.463, "meanSelError" : 19.515 }
+histogram:
+{ "RMSQError" : 1.722, "RMSRelError" : 1.068, "meanSelError" : 5.088 }
+histogramIdx:
+{ "RMSQError" : 1.154, "RMSRelError" : 0.31, "meanSelError" : 2.669 }
[jsTest] ----
diff --git a/jstests/query_golden/expected_output/ce_mixed b/jstests/query_golden/expected_output/ce_mixed
index f7855be70c2b8..fe37f9023ff45 100644
--- a/jstests/query_golden/expected_output/ce_mixed
+++ b/jstests/query_golden/expected_output/ce_mixed
@@ -2821,6 +2821,7 @@
}
]
+
[jsTest] ----
[jsTest] Settings before: internalQueryCardinalityEstimatorMode: sampling, internalQueryFrameworkControl: forceBonsai
[jsTest] ----
@@ -2832,7 +2833,7 @@
[jsTest] ----
Histogram estimate: 150.
-Heuristic estimate: 17.320508075688778.
+Heuristic estimate: 17.32050807568877.
Histogram explain: {
"nodeType" : "Root",
"logicalCE" : 150,
@@ -2845,15 +2846,16 @@ Histogram explain: {
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"leftChild" : {
"nodeType" : "IndexScan",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"interval" : "[ true, true ]"
},
"rightChild" : {
@@ -2867,12 +2869,13 @@ Heuristic explain: {
}
}
+
[jsTest] ----
[jsTest] Query: { "likesPizza" : false } returned 150 documents.
[jsTest] ----
Histogram estimate: 150.
-Heuristic estimate: 17.320508075688778.
+Heuristic estimate: 17.32050807568877.
Histogram explain: {
"nodeType" : "Root",
"logicalCE" : 150,
@@ -2885,15 +2888,16 @@ Histogram explain: {
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"leftChild" : {
"nodeType" : "IndexScan",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"interval" : "[ false, false ]"
},
"rightChild" : {
@@ -2907,12 +2911,13 @@ Heuristic explain: {
}
}
+
[jsTest] ----
[jsTest] Query: { "date" : { "$gt" : ISODate("1950-01-01T00:00:00Z") } } returned 299 documents.
[jsTest] ----
Histogram estimate: 299.
-Heuristic estimate: 59.999999999999986.
+Heuristic estimate: 60.
Histogram explain: {
"nodeType" : "Root",
"logicalCE" : 299,
@@ -2925,15 +2930,16 @@ Histogram explain: {
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 59.999999999999986,
+ "logicalCE" : 60,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 59.999999999999986,
+ "logicalCE" : 60,
"leftChild" : {
"nodeType" : "IndexScan",
- "logicalCE" : 59.999999999999986,
+ "logicalCE" : 60,
"interval" : "( ISODate(\"1950-01-01T00:00:00Z\"), ISODate(\"0NaN-NaN-NaNTNaN:NaN:NaNZ\") ]"
},
"rightChild" : {
@@ -2947,12 +2953,13 @@ Heuristic explain: {
}
}
+
[jsTest] ----
[jsTest] Query: { "date" : { "$lt" : ISODate("1979-12-06T00:00:00Z") } } returned 179 documents.
[jsTest] ----
Histogram estimate: 179.33675564681724.
-Heuristic estimate: 59.999999999999986.
+Heuristic estimate: 60.
Histogram explain: {
"nodeType" : "Root",
"logicalCE" : 179.33675564681724,
@@ -2965,15 +2972,16 @@ Histogram explain: {
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 59.999999999999986,
+ "logicalCE" : 60,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 59.999999999999986,
+ "logicalCE" : 60,
"leftChild" : {
"nodeType" : "IndexScan",
- "logicalCE" : 59.999999999999986,
+ "logicalCE" : 60,
"interval" : "[ ISODate(\"0NaN-NaN-NaNTNaN:NaN:NaNZ\"), ISODate(\"1979-12-06T00:00:00Z\") )"
},
"rightChild" : {
@@ -2987,12 +2995,13 @@ Heuristic explain: {
}
}
+
[jsTest] ----
[jsTest] Query: { "name" : { "$lte" : "Bob Bennet" } } returned 37 documents.
[jsTest] ----
Histogram estimate: 61.99999971987415.
-Heuristic estimate: 99.00000000000003.
+Heuristic estimate: 99.
Histogram explain: {
"nodeType" : "Root",
"logicalCE" : 61.99999971987415,
@@ -3014,25 +3023,27 @@ Histogram explain: {
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 99.00000000000003,
+ "logicalCE" : 99,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 99.00000000000003,
+ "logicalCE" : 99,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 99.00000000000003
+ "logicalCE" : 99
}
}
}
+
[jsTest] ----
[jsTest] Query: { "favPizzaToppings" : "mushrooms" } returned 120 documents.
[jsTest] ----
Histogram estimate: 120.
-Heuristic estimate: 17.320508075688778.
+Heuristic estimate: 17.32050807568877.
Histogram explain: {
"nodeType" : "Root",
"logicalCE" : 120,
@@ -3045,87 +3056,93 @@ Histogram explain: {
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 17.320508075688778
+ "logicalCE" : 17.32050807568877
}
}
}
+
[jsTest] ----
[jsTest] Query: { "lastPizzaShopVisited" : "Zizzi" } returned 62 documents.
[jsTest] ----
-Histogram and heuristic estimates were equal: 17.320508075688778.
+Histogram and heuristic estimates were equal: 17.32050807568877.
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 17.320508075688778
+ "logicalCE" : 17.32050807568877
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 17.320508075688778
+ "logicalCE" : 17.32050807568877
}
}
}
+
[jsTest] ----
[jsTest] Query: { "lastPizzaShopVisited" : "Pacinos" } returned 113 documents.
[jsTest] ----
-Histogram and heuristic estimates were equal: 17.320508075688778.
+Histogram and heuristic estimates were equal: 17.32050807568877.
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 17.320508075688778
+ "logicalCE" : 17.32050807568877
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 17.320508075688778
+ "logicalCE" : 17.32050807568877
}
}
}
+
[jsTest] ----
[jsTest] Query: { "likesPizza" : true, "date" : { "$gt" : ISODate("1950-01-01T00:00:00Z"), "$lt" : ISODate("1979-12-06T00:00:00Z") } } returned 89 documents.
[jsTest] ----
Histogram estimate: 115.65144475323812.
-Heuristic estimate: 7.745966692414835.
+Heuristic estimate: 7.745966692414833.
Histogram explain: {
"nodeType" : "Root",
"logicalCE" : 115.65144475323812,
@@ -3142,38 +3159,40 @@ Histogram explain: {
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 7.745966692414835,
+ "logicalCE" : 7.745966692414833,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 7.745966692414835,
+ "logicalCE" : 7.745966692414833,
"leftChild" : {
"nodeType" : "IndexScan",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"interval" : "[ true, true ]"
},
"rightChild" : {
"nodeType" : "Filter",
- "logicalCE" : 59.999999999999986,
+ "logicalCE" : 60,
"child" : {
"nodeType" : "LimitSkip",
- "logicalCE" : 59.999999999999986,
+ "logicalCE" : 60,
"child" : {
"nodeType" : "Seek",
- "logicalCE" : 59.999999999999986
+ "logicalCE" : 60
}
}
}
}
}
+
[jsTest] ----
[jsTest] Query: { "likesPizza" : false, "name" : { "$lte" : "Bob Bennet" } } returned 17 documents.
[jsTest] ----
Histogram estimate: 43.84062023548705.
-Heuristic estimate: 9.949874371066203.
+Heuristic estimate: 9.949874371066198.
Histogram explain: {
"nodeType" : "Root",
"logicalCE" : 43.84062023548705,
@@ -3199,38 +3218,40 @@ Histogram explain: {
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 9.949874371066203,
+ "logicalCE" : 9.949874371066198,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 9.949874371066203,
+ "logicalCE" : 9.949874371066198,
"leftChild" : {
"nodeType" : "IndexScan",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"interval" : "[ false, false ]"
},
"rightChild" : {
"nodeType" : "Filter",
- "logicalCE" : 99.00000000000003,
+ "logicalCE" : 99,
"child" : {
"nodeType" : "LimitSkip",
- "logicalCE" : 99.00000000000003,
+ "logicalCE" : 99,
"child" : {
"nodeType" : "Seek",
- "logicalCE" : 99.00000000000003
+ "logicalCE" : 99
}
}
}
}
}
+
[jsTest] ----
[jsTest] Query: { "favPizzaToppings" : "mushrooms", "name" : { "$lte" : "Bob Bennet" } } returned 16 documents.
[jsTest] ----
Histogram estimate: 39.21224280892076.
-Heuristic estimate: 9.949874371066203.
+Heuristic estimate: 9.949874371066198.
Histogram explain: {
"nodeType" : "Root",
"logicalCE" : 39.21224280892076,
@@ -3256,275 +3277,447 @@ Histogram explain: {
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 9.949874371066203,
+ "logicalCE" : 9.949874371066198,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 9.949874371066203,
+ "logicalCE" : 9.949874371066198,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 9.949874371066203,
+ "logicalCE" : 9.949874371066198,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 9.949874371066203
+ "logicalCE" : 9.949874371066198
}
}
}
}
+
+[jsTest] ----
+[jsTest] Query: { "$or" : [ { "likesPizza" : true }, { "date" : { "$lt" : ISODate("1955-01-01T00:00:00Z") } } ] } returned 168 documents.
+[jsTest] ----
+
+Histogram estimate: 157.54345865842552.
+Heuristic estimate: 67.03119854910238.
+Histogram explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 157.54345865842552,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 157.54345865842552,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 157.54345865842552
+ }
+ }
+}
+
+Heuristic explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 67.03119854910238,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 67.03119854910238,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 67.03119854910238
+ }
+ }
+}
+
+
+[jsTest] ----
+[jsTest] Query: { "$or" : [ { "favPizzaToppings" : "mushrooms" }, { "name" : { "$lte" : "Bob Bennet", "$gte" : "Alice Smith" } } ] } returned 130 documents.
+[jsTest] ----
+
+Histogram estimate: 138.3336768827409.
+Heuristic estimate: 67.03119854910238.
+Histogram explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 138.3336768827409,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 138.3336768827409,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 138.3336768827409
+ }
+ }
+}
+
+Heuristic explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 67.03119854910238,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 67.03119854910238,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 67.03119854910238
+ }
+ }
+}
+
+
+[jsTest] ----
+[jsTest] Query: { "$or" : [ { "$and" : [ { "likesPizza" : false }, { "name" : { "$lte" : "Bob Bennet" } } ] }, { "$and" : [ { "likesPizza" : true }, { "name" : { "$gte" : "Tom Watson" } } ] } ] } returned 34 documents.
+[jsTest] ----
+
+Histogram estimate: 53.66673009508964.
+Heuristic estimate: 14.800368851048507.
+Histogram explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 53.66673009508964,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 53.66673009508964,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 53.66673009508964
+ }
+ }
+}
+
+Heuristic explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 14.800368851048507,
+ "child" : {
+ "nodeType" : "NestedLoopJoin",
+ "logicalCE" : 14.800368851048507,
+ "leftChild" : {
+ "nodeType" : "Unique",
+ "logicalCE" : 25.602073215185506,
+ "child" : {
+ "nodeType" : "Union",
+ "logicalCE" : 25.602073215185506,
+ "children" : [
+ {
+ "nodeType" : "IndexScan",
+ "logicalCE" : 17.32050807568877,
+ "interval" : "[ false, false ]"
+ },
+ {
+ "nodeType" : "IndexScan",
+ "logicalCE" : 17.32050807568877,
+ "interval" : "[ true, true ]"
+ }
+ ]
+ }
+ },
+ "rightChild" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 14.800368851048507,
+ "child" : {
+ "nodeType" : "LimitSkip",
+ "logicalCE" : 14.800368851048507,
+ "child" : {
+ "nodeType" : "Seek",
+ "logicalCE" : 14.800368851048507
+ }
+ }
+ }
+ }
+}
+
+
+[jsTest] ----
+[jsTest] Query: { "$or" : [ { "$and" : [ { "likesPizza" : false }, { "name" : { "$lte" : "Bob Bennet" } } ] }, { "date" : { "$lte" : "1960-01-01T00:00:00" } } ] } returned 17 documents.
+[jsTest] ----
+
+Histogram estimate: 43.84062023548707.
+Heuristic estimate: 102.36131345698406.
+Histogram explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 43.84062023548707,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 43.84062023548707,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 43.84062023548707
+ }
+ }
+}
+
+Heuristic explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 102.36131345698406,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 102.36131345698406,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 102.36131345698406
+ }
+ }
+}
+
+
[jsTest] ----
[jsTest] Query: { "lastPizzaShopVisited" : "Zizzi", "likesPizza" : true } returned 62 documents.
[jsTest] ----
-Histogram estimate: 12.247448713915896.
-Heuristic estimate: 4.1617914502878195.
+Histogram estimate: 12.247448713915889.
+Heuristic estimate: 4.161791450287816.
Histogram explain: {
"nodeType" : "Root",
- "logicalCE" : 12.247448713915896,
+ "logicalCE" : 12.247448713915889,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 12.247448713915896,
+ "logicalCE" : 12.247448713915889,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 12.247448713915896,
+ "logicalCE" : 12.247448713915889,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 12.247448713915896
+ "logicalCE" : 12.247448713915889
}
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 4.1617914502878195,
+ "logicalCE" : 4.161791450287816,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 4.1617914502878195,
+ "logicalCE" : 4.161791450287816,
"leftChild" : {
"nodeType" : "IndexScan",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"interval" : "[ true, true ]"
},
"rightChild" : {
"nodeType" : "Filter",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "LimitSkip",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "Seek",
- "logicalCE" : 17.320508075688778
+ "logicalCE" : 17.32050807568877
}
}
}
}
}
+
[jsTest] ----
[jsTest] Query: { "lastPizzaShopVisited" : "Zizzi", "likesPizza" : false } returned 0 documents.
[jsTest] ----
-Histogram estimate: 12.247448713915896.
-Heuristic estimate: 4.1617914502878195.
+Histogram estimate: 12.247448713915889.
+Heuristic estimate: 4.161791450287816.
Histogram explain: {
"nodeType" : "Root",
- "logicalCE" : 12.247448713915896,
+ "logicalCE" : 12.247448713915889,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 12.247448713915896,
+ "logicalCE" : 12.247448713915889,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 12.247448713915896,
+ "logicalCE" : 12.247448713915889,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 12.247448713915896
+ "logicalCE" : 12.247448713915889
}
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 4.1617914502878195,
+ "logicalCE" : 4.161791450287816,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 4.1617914502878195,
+ "logicalCE" : 4.161791450287816,
"leftChild" : {
"nodeType" : "IndexScan",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"interval" : "[ false, false ]"
},
"rightChild" : {
"nodeType" : "Filter",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "LimitSkip",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "Seek",
- "logicalCE" : 17.320508075688778
+ "logicalCE" : 17.32050807568877
}
}
}
}
}
+
[jsTest] ----
[jsTest] Query: { "lastPizzaShopVisited" : "Zizzi", "date" : { "$gt" : ISODate("1950-01-01T00:00:00Z") } } returned 62 documents.
[jsTest] ----
-Histogram estimate: 17.29161646579059.
-Heuristic estimate: 7.745966692414835.
+Histogram estimate: 17.29161646579058.
+Heuristic estimate: 7.745966692414833.
Histogram explain: {
"nodeType" : "Root",
- "logicalCE" : 17.29161646579059,
+ "logicalCE" : 17.29161646579058,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 17.29161646579059,
+ "logicalCE" : 17.29161646579058,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 17.29161646579059,
+ "logicalCE" : 17.29161646579058,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 17.29161646579059
+ "logicalCE" : 17.29161646579058
}
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 7.745966692414835,
+ "logicalCE" : 7.745966692414833,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 7.745966692414835,
+ "logicalCE" : 7.745966692414833,
"leftChild" : {
"nodeType" : "IndexScan",
- "logicalCE" : 59.999999999999986,
+ "logicalCE" : 60,
"interval" : "( ISODate(\"1950-01-01T00:00:00Z\"), ISODate(\"0NaN-NaN-NaNTNaN:NaN:NaNZ\") ]"
},
"rightChild" : {
"nodeType" : "Filter",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "LimitSkip",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "Seek",
- "logicalCE" : 17.320508075688778
+ "logicalCE" : 17.32050807568877
}
}
}
}
}
+
[jsTest] ----
[jsTest] Query: { "lastPizzaShopVisited" : "Zizzi", "date" : { "$lt" : ISODate("1979-12-06T00:00:00Z") } } returned 37 documents.
[jsTest] ----
-Histogram estimate: 13.391667396064515.
-Heuristic estimate: 7.745966692414835.
+Histogram estimate: 13.39166739606451.
+Heuristic estimate: 7.745966692414833.
Histogram explain: {
"nodeType" : "Root",
- "logicalCE" : 13.391667396064515,
+ "logicalCE" : 13.39166739606451,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 13.391667396064515,
+ "logicalCE" : 13.39166739606451,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 13.391667396064515,
+ "logicalCE" : 13.39166739606451,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 13.391667396064515
+ "logicalCE" : 13.39166739606451
}
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 7.745966692414835,
+ "logicalCE" : 7.745966692414833,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 7.745966692414835,
+ "logicalCE" : 7.745966692414833,
"leftChild" : {
"nodeType" : "IndexScan",
- "logicalCE" : 59.999999999999986,
+ "logicalCE" : 60,
"interval" : "[ ISODate(\"0NaN-NaN-NaNTNaN:NaN:NaNZ\"), ISODate(\"1979-12-06T00:00:00Z\") )"
},
"rightChild" : {
"nodeType" : "Filter",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "LimitSkip",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "Seek",
- "logicalCE" : 17.320508075688778
+ "logicalCE" : 17.32050807568877
}
}
}
}
}
+
[jsTest] ----
[jsTest] Query: { "lastPizzaShopVisited" : "Zizzi", "date" : { "$gt" : ISODate("1950-01-01T00:00:00Z"), "$lt" : ISODate("1979-12-06T00:00:00Z") } } returned 37 documents.
[jsTest] ----
-Histogram estimate: 13.354278552090237.
-Heuristic estimate: 7.745966692414835.
+Histogram estimate: 13.354278552090232.
+Heuristic estimate: 7.745966692414833.
Histogram explain: {
"nodeType" : "Root",
- "logicalCE" : 13.354278552090237,
+ "logicalCE" : 13.354278552090232,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 13.354278552090237,
+ "logicalCE" : 13.354278552090232,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 13.354278552090237,
+ "logicalCE" : 13.354278552090232,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 13.354278552090237
+ "logicalCE" : 13.354278552090232
}
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 7.745966692414835,
+ "logicalCE" : 7.745966692414833,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 7.745966692414835,
+ "logicalCE" : 7.745966692414833,
"leftChild" : {
"nodeType" : "IndexScan",
- "logicalCE" : 59.999999999999986,
+ "logicalCE" : 60,
"interval" : "( ISODate(\"1950-01-01T00:00:00Z\"), ISODate(\"1979-12-06T00:00:00Z\") )"
},
"rightChild" : {
"nodeType" : "Filter",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "LimitSkip",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "Seek",
- "logicalCE" : 17.320508075688778
+ "logicalCE" : 17.32050807568877
}
}
}
}
}
+
[jsTest] ----
[jsTest] Query: { "lastPizzaShopVisited" : "Pacinos", "name" : { "$lte" : "Bob Bennet" } } returned 13 documents.
[jsTest] ----
-Histogram estimate: 7.8740078562238045.
-Heuristic estimate: 9.949874371066203.
+Histogram estimate: 7.8740078562238.
+Heuristic estimate: 9.949874371066198.
Histogram explain: {
"nodeType" : "Root",
- "logicalCE" : 7.8740078562238045,
+ "logicalCE" : 7.8740078562238,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 7.8740078562238045,
+ "logicalCE" : 7.8740078562238,
"leftChild" : {
"nodeType" : "IndexScan",
"logicalCE" : 61.99999971987415,
@@ -3532,130 +3725,135 @@ Histogram explain: {
},
"rightChild" : {
"nodeType" : "Filter",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "LimitSkip",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"child" : {
"nodeType" : "Seek",
- "logicalCE" : 17.320508075688778
+ "logicalCE" : 17.32050807568877
}
}
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 9.949874371066203,
+ "logicalCE" : 9.949874371066198,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 9.949874371066203,
+ "logicalCE" : 9.949874371066198,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 9.949874371066203,
+ "logicalCE" : 9.949874371066198,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 9.949874371066203
+ "logicalCE" : 9.949874371066198
}
}
}
}
+
[jsTest] ----
[jsTest] Query: { "lastPizzaShopVisited" : "Pacinos", "favPizzaToppings" : "mushrooms" } returned 20 documents.
[jsTest] ----
-Histogram estimate: 10.954451150103326.
-Heuristic estimate: 4.1617914502878195.
+Histogram estimate: 10.95445115010332.
+Heuristic estimate: 4.161791450287816.
Histogram explain: {
"nodeType" : "Root",
- "logicalCE" : 10.954451150103326,
+ "logicalCE" : 10.95445115010332,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 10.954451150103326,
+ "logicalCE" : 10.95445115010332,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 10.954451150103326,
+ "logicalCE" : 10.95445115010332,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 10.954451150103326
+ "logicalCE" : 10.95445115010332
}
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 4.1617914502878195,
+ "logicalCE" : 4.161791450287816,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 4.1617914502878195,
+ "logicalCE" : 4.161791450287816,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 4.1617914502878195,
+ "logicalCE" : 4.161791450287816,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 4.1617914502878195
+ "logicalCE" : 4.161791450287816
}
}
}
}
+
[jsTest] ----
[jsTest] Query: { "lastPizzaShopVisited" : "Pacinos", "date" : { "$gt" : ISODate("1950-01-01T00:00:00Z") }, "favPizzaToppings" : "mushrooms", "likesPizza" : true } returned 20 documents.
[jsTest] ----
-Histogram estimate: 9.207714944743804.
-Heuristic estimate: 1.6682798577318325.
+Histogram estimate: 9.207714944743799.
+Heuristic estimate: 1.6682798577318312.
Histogram explain: {
"nodeType" : "Root",
- "logicalCE" : 9.207714944743804,
+ "logicalCE" : 9.207714944743799,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 9.207714944743804,
+ "logicalCE" : 9.207714944743799,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 9.207714944743804,
+ "logicalCE" : 9.207714944743799,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 9.207714944743804,
+ "logicalCE" : 9.207714944743799,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 9.207714944743804,
+ "logicalCE" : 9.207714944743799,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 9.207714944743804
+ "logicalCE" : 9.207714944743799
}
}
}
}
}
}
+
Heuristic explain: {
"nodeType" : "Root",
- "logicalCE" : 1.6682798577318325,
+ "logicalCE" : 1.6682798577318312,
"child" : {
"nodeType" : "NestedLoopJoin",
- "logicalCE" : 1.6682798577318325,
+ "logicalCE" : 1.6682798577318312,
"leftChild" : {
"nodeType" : "IndexScan",
- "logicalCE" : 17.320508075688778,
+ "logicalCE" : 17.32050807568877,
"interval" : "[ true, true ]"
},
"rightChild" : {
"nodeType" : "Filter",
- "logicalCE" : 2.783157683713742,
+ "logicalCE" : 2.7831576837137395,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 2.783157683713742,
+ "logicalCE" : 2.7831576837137395,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 2.783157683713742,
+ "logicalCE" : 2.7831576837137395,
"child" : {
"nodeType" : "LimitSkip",
- "logicalCE" : 2.783157683713742,
+ "logicalCE" : 2.7831576837137395,
"child" : {
"nodeType" : "Seek",
- "logicalCE" : 2.783157683713742
+ "logicalCE" : 2.7831576837137395
}
}
}
@@ -3664,6 +3862,205 @@ Heuristic explain: {
}
}
+
+[jsTest] ----
+[jsTest] Query: { "$or" : [ { "lastPizzaShopVisited" : "Zizzi" }, { "likesPizza" : true } ] } returned 150 documents.
+[jsTest] ----
+
+Histogram estimate: 154.39449909318898.
+Heuristic estimate: 25.602073215185506.
+Histogram explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 154.39449909318898,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 154.39449909318898,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 154.39449909318898
+ }
+ }
+}
+
+Heuristic explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 25.602073215185506,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 25.602073215185506,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 25.602073215185506
+ }
+ }
+}
+
+
+[jsTest] ----
+[jsTest] Query: { "$or" : [ { "lastPizzaShopVisited" : "Zizzi" }, { "date" : { "$gt" : ISODate("1950-01-01T00:00:00Z"), "$lt" : ISODate("1960-01-01T00:00:00Z") } } ] } returned 109 documents.
+[jsTest] ----
+
+Histogram estimate: 65.96349869353018.
+Heuristic estimate: 67.03119854910238.
+Histogram explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 65.96349869353018,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 65.96349869353018,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 65.96349869353018
+ }
+ }
+}
+
+Heuristic explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 67.03119854910238,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 67.03119854910238,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 67.03119854910238
+ }
+ }
+}
+
+
+[jsTest] ----
+[jsTest] Query: { "$or" : [ { "$and" : [ { "lastPizzaShopVisited" : "Zizzi" }, { "name" : { "$lte" : "John Watson" } } ] }, { "$and" : [ { "favPizzaToppings" : "mushrooms" }, { "likesPizza" : true } ] } ] } returned 126 documents.
+[jsTest] ----
+
+Histogram estimate: 89.72910201968833.
+Heuristic estimate: 11.968780936383162.
+Histogram explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 89.72910201968833,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 89.72910201968833,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 89.72910201968833
+ }
+ }
+}
+
+Heuristic explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 11.968780936383162,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 11.968780936383162,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 11.968780936383162
+ }
+ }
+}
+
+
+[jsTest] ----
+[jsTest] Query: { "$or" : [ { "$and" : [ { "lastPizzaShopVisited" : "Zizzi" }, { "name" : { "$lte" : "John Watson" } } ] }, { "$and" : [ { "lastPizzaShopVisited" : "Zizzi" }, { "name" : { "$gte" : "Kate Knight" } } ] } ] } returned 56 documents.
+[jsTest] ----
+
+Histogram estimate: 18.45591859401975.
+Heuristic estimate: 14.800368851048507.
+Histogram explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 18.45591859401975,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 18.45591859401975,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 18.45591859401975
+ }
+ }
+}
+
+Heuristic explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 14.800368851048507,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 14.800368851048507,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 14.800368851048507
+ }
+ }
+}
+
+
+[jsTest] ----
+[jsTest] Query: { "$or" : [ { "$and" : [ { "lastPizzaShopVisited" : "Zizzi" }, { "name" : { "$lte" : "John Watson" } } ] }, { "favPizzaToppings" : "mushrooms" } ] } returned 126 documents.
+[jsTest] ----
+
+Histogram estimate: 124.07968100899764.
+Heuristic estimate: 22.04774379816937.
+Histogram explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 124.07968100899764,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 124.07968100899764,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 124.07968100899764
+ }
+ }
+}
+
+Heuristic explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 22.04774379816937,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 22.04774379816937,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 22.04774379816937
+ }
+ }
+}
+
+
+[jsTest] ----
+[jsTest] Query: { "$or" : [ { "$and" : [ { "favPizzaToppings" : "mushrooms" }, { "name" : { "$lte" : "John Watson" } } ] }, { "lastPizzaShopVisited" : "Zizzi" } ] } returned 101 documents.
+[jsTest] ----
+
+Histogram estimate: 99.20855534651972.
+Heuristic estimate: 22.04774379816937.
+Histogram explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 99.20855534651972,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 99.20855534651972,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 99.20855534651972
+ }
+ }
+}
+
+Heuristic explain: {
+ "nodeType" : "Root",
+ "logicalCE" : 22.04774379816937,
+ "child" : {
+ "nodeType" : "Filter",
+ "logicalCE" : 22.04774379816937,
+ "child" : {
+ "nodeType" : "PhysicalScan",
+ "logicalCE" : 22.04774379816937
+ }
+ }
+}
+
+
[jsTest] ----
[jsTest] Settings after: { "internalQueryFrameworkControl" : "forceBonsai", "ok" : 1 }
[jsTest] ----
diff --git a/jstests/query_golden/expected_output/ce_sampled_histogram b/jstests/query_golden/expected_output/ce_sampled_histogram
index 23007b8cd7ef0..0e07244e855cb 100644
--- a/jstests/query_golden/expected_output/ce_sampled_histogram
+++ b/jstests/query_golden/expected_output/ce_sampled_histogram
@@ -1,5 +1,6 @@
setting random seed: 6345
+
[jsTest] ----
[jsTest] Settings before: internalQueryCardinalityEstimatorMode: sampling, internalQueryFrameworkControl: forceBonsai
[jsTest] ----
@@ -32,27 +33,29 @@ Loading chunk file: jstests/query_golden/libs/data/ce_data_500_5
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$gte" : 122, "$lte" : 381 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 159.69375639952514,
+ "logicalCE" : 159.6937563995251,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 159.69375639952514,
+ "logicalCE" : 159.6937563995251,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 159.69375639952514,
+ "logicalCE" : 159.6937563995251,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 159.69375639952514
+ "logicalCE" : 159.6937563995251
}
}
}
}
+
[jsTest] ----
[jsTest] CE: { "uniform_int_0-1000-1" : { "$gte" : 122, "$lte" : 381 } }, base = 183.77, sample = 159.69, actual = 134
[jsTest] ----
@@ -60,40 +63,43 @@ Loading chunk file: jstests/query_golden/libs/data/ce_data_500_5
Base error: { "qError" : 1.37, "relError" : 0.37, "selError" : 9.95 }
Sample error: { "qError" : 1.19, "relError" : 0.19, "selError" : 5.14 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$lt" : 122 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 60.45571245186137,
+ "logicalCE" : 60.45571245186136,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 60.45571245186137,
+ "logicalCE" : 60.45571245186136,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 60.45571245186137
+ "logicalCE" : 60.45571245186136
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$lt" : 122 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 56.52173913043479,
+ "logicalCE" : 56.52173913043478,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 56.52173913043479,
+ "logicalCE" : 56.52173913043478,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 56.52173913043479
+ "logicalCE" : 56.52173913043478
}
}
}
+
[jsTest] ----
[jsTest] CE: { "uniform_int_0-1000-1" : { "$lt" : 122 } }, base = 60.46, sample = 56.52, actual = 60
[jsTest] ----
@@ -101,23 +107,25 @@ Sample error: { "qError" : 1.19, "relError" : 0.19, "selError" : 5.14 }
Base error: { "qError" : 1.01, "relError" : 0.01, "selError" : 0.09 }
Sample error: { "qError" : 1.06, "relError" : -0.06, "selError" : -0.7 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$eq" : 122 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 1.1052631578947203,
+ "logicalCE" : 1.105263157894737,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 1.1052631578947203,
+ "logicalCE" : 1.105263157894737,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 1.1052631578947203
+ "logicalCE" : 1.105263157894737
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$eq" : 122 } }
[jsTest] ----
@@ -135,6 +143,7 @@ Sample error: { "qError" : 1.06, "relError" : -0.06, "selError" : -0.7 }
}
}
+
[jsTest] ----
[jsTest] CE: { "uniform_int_0-1000-1" : { "$eq" : 122 } }, base = 1.11, sample = 0, actual = 1
[jsTest] ----
@@ -142,6 +151,7 @@ Sample error: { "qError" : 1.06, "relError" : -0.06, "selError" : -0.7 }
Base error: { "qError" : 1.11, "relError" : 0.11, "selError" : 0.02 }
Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$gte" : 381, "$lte" : 948 } }
[jsTest] ----
@@ -163,6 +173,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$gte" : 381, "$lte" : 948 } }
[jsTest] ----
@@ -184,6 +195,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
}
}
+
[jsTest] ----
[jsTest] CE: { "uniform_int_0-1000-1" : { "$gte" : 381, "$lte" : 948 } }, base = 295.64, sample = 315.74, actual = 277
[jsTest] ----
@@ -191,40 +203,43 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
Base error: { "qError" : 1.07, "relError" : 0.07, "selError" : 3.73 }
Sample error: { "qError" : 1.14, "relError" : 0.14, "selError" : 7.75 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$lt" : 381 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 194.74074074074076,
+ "logicalCE" : 194.74074074074073,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 194.74074074074076,
+ "logicalCE" : 194.74074074074073,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 194.74074074074076
+ "logicalCE" : 194.74074074074073
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$lt" : 381 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 169.56521739130437,
+ "logicalCE" : 169.56521739130434,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 169.56521739130437,
+ "logicalCE" : 169.56521739130434,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 169.56521739130437
+ "logicalCE" : 169.56521739130434
}
}
}
+
[jsTest] ----
[jsTest] CE: { "uniform_int_0-1000-1" : { "$lt" : 381 } }, base = 194.74, sample = 169.57, actual = 193
[jsTest] ----
@@ -232,23 +247,25 @@ Sample error: { "qError" : 1.14, "relError" : 0.14, "selError" : 7.75 }
Base error: { "qError" : 1.01, "relError" : 0.01, "selError" : 0.35 }
Sample error: { "qError" : 1.14, "relError" : -0.12, "selError" : -4.69 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$eq" : 381 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 1.2592592592592378,
+ "logicalCE" : 1.2592592592592593,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 1.2592592592592378,
+ "logicalCE" : 1.2592592592592593,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 1.2592592592592378
+ "logicalCE" : 1.2592592592592593
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$eq" : 381 } }
[jsTest] ----
@@ -266,6 +283,7 @@ Sample error: { "qError" : 1.14, "relError" : -0.12, "selError" : -4.69 }
}
}
+
[jsTest] ----
[jsTest] CE: { "uniform_int_0-1000-1" : { "$eq" : 381 } }, base = 1.26, sample = 0, actual = 1
[jsTest] ----
@@ -273,6 +291,7 @@ Sample error: { "qError" : 1.14, "relError" : -0.12, "selError" : -4.69 }
Base error: { "qError" : 1.26, "relError" : 0.26, "selError" : 0.05 }
Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$gte" : 469, "$lte" : 613 } }
[jsTest] ----
@@ -294,6 +313,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$gte" : 469, "$lte" : 613 } }
[jsTest] ----
@@ -315,6 +335,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
}
}
+
[jsTest] ----
[jsTest] CE: { "normal_int_0-1000-1" : { "$gte" : 469, "$lte" : 613 } }, base = 259.28, sample = 240.97, actual = 173
[jsTest] ----
@@ -322,6 +343,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
Base error: { "qError" : 1.5, "relError" : 0.5, "selError" : 17.26 }
Sample error: { "qError" : 1.39, "relError" : 0.39, "selError" : 13.59 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$lt" : 469 } }
[jsTest] ----
@@ -339,23 +361,25 @@ Sample error: { "qError" : 1.39, "relError" : 0.39, "selError" : 13.59 }
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$lt" : 469 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 226.08695652173915,
+ "logicalCE" : 226.08695652173913,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 226.08695652173915,
+ "logicalCE" : 226.08695652173913,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 226.08695652173915
+ "logicalCE" : 226.08695652173913
}
}
}
+
[jsTest] ----
[jsTest] CE: { "normal_int_0-1000-1" : { "$lt" : 469 } }, base = 199, sample = 226.09, actual = 199
[jsTest] ----
@@ -363,40 +387,43 @@ Sample error: { "qError" : 1.39, "relError" : 0.39, "selError" : 13.59 }
Base error: { "qError" : 1, "relError" : 0, "selError" : 0 }
Sample error: { "qError" : 1.14, "relError" : 0.14, "selError" : 5.42 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$eq" : 469 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 3.0000000000000027,
+ "logicalCE" : 3,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 3.0000000000000027,
+ "logicalCE" : 3,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 3.0000000000000027
+ "logicalCE" : 3
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$eq" : 469 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 4.347826086956497,
+ "logicalCE" : 4.3478260869565215,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 4.347826086956497,
+ "logicalCE" : 4.3478260869565215,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 4.347826086956497
+ "logicalCE" : 4.3478260869565215
}
}
}
+
[jsTest] ----
[jsTest] CE: { "normal_int_0-1000-1" : { "$eq" : 469 } }, base = 3, sample = 4.35, actual = 3
[jsTest] ----
@@ -404,6 +431,7 @@ Sample error: { "qError" : 1.14, "relError" : 0.14, "selError" : 5.42 }
Base error: { "qError" : 1, "relError" : 0, "selError" : 0 }
Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$gte" : 449, "$lte" : 469 } }
[jsTest] ----
@@ -425,6 +453,7 @@ Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 }
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$gte" : 449, "$lte" : 469 } }
[jsTest] ----
@@ -446,6 +475,7 @@ Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 }
}
}
+
[jsTest] ----
[jsTest] CE: { "normal_int_0-1000-1" : { "$gte" : 449, "$lte" : 469 } }, base = 162.36, sample = 181.06, actual = 25
[jsTest] ----
@@ -453,6 +483,7 @@ Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 }
Base error: { "qError" : 6.49, "relError" : 5.49, "selError" : 27.47 }
Sample error: { "qError" : 7.24, "relError" : 6.24, "selError" : 31.21 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$lt" : 449 } }
[jsTest] ----
@@ -470,6 +501,7 @@ Sample error: { "qError" : 7.24, "relError" : 6.24, "selError" : 31.21 }
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$lt" : 449 } }
[jsTest] ----
@@ -487,6 +519,7 @@ Sample error: { "qError" : 7.24, "relError" : 6.24, "selError" : 31.21 }
}
}
+
[jsTest] ----
[jsTest] CE: { "normal_int_0-1000-1" : { "$lt" : 449 } }, base = 177, sample = 191.3, actual = 177
[jsTest] ----
@@ -494,40 +527,43 @@ Sample error: { "qError" : 7.24, "relError" : 6.24, "selError" : 31.21 }
Base error: { "qError" : 1, "relError" : 0, "selError" : 0 }
Sample error: { "qError" : 1.08, "relError" : 0.08, "selError" : 2.86 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$eq" : 449 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 3.0000000000000027,
+ "logicalCE" : 3,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 3.0000000000000027,
+ "logicalCE" : 3,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 3.0000000000000027
+ "logicalCE" : 3
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$eq" : 449 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 4.347826086956497,
+ "logicalCE" : 4.3478260869565215,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 4.347826086956497,
+ "logicalCE" : 4.3478260869565215,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 4.347826086956497
+ "logicalCE" : 4.3478260869565215
}
}
}
+
[jsTest] ----
[jsTest] CE: { "normal_int_0-1000-1" : { "$eq" : 449 } }, base = 3, sample = 4.35, actual = 3
[jsTest] ----
@@ -535,27 +571,29 @@ Sample error: { "qError" : 1.08, "relError" : 0.08, "selError" : 2.86 }
Base error: { "qError" : 1, "relError" : 0, "selError" : 0 }
Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$gte" : 408, "$lte" : 438 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 137.8193600333422,
+ "logicalCE" : 137.81936003334218,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 137.8193600333422,
+ "logicalCE" : 137.81936003334218,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 137.8193600333422,
+ "logicalCE" : 137.81936003334218,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 137.8193600333422
+ "logicalCE" : 137.81936003334218
}
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$gte" : 408, "$lte" : 438 } }
[jsTest] ----
@@ -577,6 +615,7 @@ Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 }
}
}
+
[jsTest] ----
[jsTest] CE: { "chi2_int_0-1000-1" : { "$gte" : 408, "$lte" : 438 } }, base = 137.82, sample = 142.59, actual = 102
[jsTest] ----
@@ -584,6 +623,7 @@ Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 }
Base error: { "qError" : 1.35, "relError" : 0.35, "selError" : 7.16 }
Sample error: { "qError" : 1.4, "relError" : 0.4, "selError" : 8.12 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$lt" : 408 } }
[jsTest] ----
@@ -601,6 +641,7 @@ Sample error: { "qError" : 1.4, "relError" : 0.4, "selError" : 8.12 }
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$lt" : 408 } }
[jsTest] ----
@@ -618,6 +659,7 @@ Sample error: { "qError" : 1.4, "relError" : 0.4, "selError" : 8.12 }
}
}
+
[jsTest] ----
[jsTest] CE: { "chi2_int_0-1000-1" : { "$lt" : 408 } }, base = 356, sample = 352.17, actual = 356
[jsTest] ----
@@ -625,23 +667,25 @@ Sample error: { "qError" : 1.4, "relError" : 0.4, "selError" : 8.12 }
Base error: { "qError" : 1, "relError" : 0, "selError" : 0 }
Sample error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.77 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$eq" : 408 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 4.0000000000000036,
+ "logicalCE" : 4,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 4.0000000000000036,
+ "logicalCE" : 4,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 4.0000000000000036
+ "logicalCE" : 4
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$eq" : 408 } }
[jsTest] ----
@@ -659,6 +703,7 @@ Sample error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.77 }
}
}
+
[jsTest] ----
[jsTest] CE: { "chi2_int_0-1000-1" : { "$eq" : 408 } }, base = 4, sample = 0, actual = 4
[jsTest] ----
@@ -666,48 +711,51 @@ Sample error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.77 }
Base error: { "qError" : 1, "relError" : 0, "selError" : 0 }
Sample error: { "qError" : 4, "relError" : -1, "selError" : -0.8 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$gte" : 408, "$lte" : 437 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 137.21620895506481,
+ "logicalCE" : 137.2162089550648,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 137.21620895506481,
+ "logicalCE" : 137.2162089550648,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 137.21620895506481,
+ "logicalCE" : 137.2162089550648,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 137.21620895506481
+ "logicalCE" : 137.2162089550648
}
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$gte" : 408, "$lte" : 437 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 141.25271704696053,
+ "logicalCE" : 141.2527170469605,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 141.25271704696053,
+ "logicalCE" : 141.2527170469605,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 141.25271704696053,
+ "logicalCE" : 141.2527170469605,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 141.25271704696053
+ "logicalCE" : 141.2527170469605
}
}
}
}
+
[jsTest] ----
[jsTest] CE: { "chi2_int_0-1000-1" : { "$gte" : 408, "$lte" : 437 } }, base = 137.22, sample = 141.25, actual = 98
[jsTest] ----
@@ -715,6 +763,7 @@ Sample error: { "qError" : 4, "relError" : -1, "selError" : -0.8 }
Base error: { "qError" : 1.4, "relError" : 0.4, "selError" : 7.84 }
Sample error: { "qError" : 1.44, "relError" : 0.44, "selError" : 8.65 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$lt" : 408 } }
[jsTest] ----
@@ -732,6 +781,7 @@ Sample error: { "qError" : 1.44, "relError" : 0.44, "selError" : 8.65 }
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$lt" : 408 } }
[jsTest] ----
@@ -749,6 +799,7 @@ Sample error: { "qError" : 1.44, "relError" : 0.44, "selError" : 8.65 }
}
}
+
[jsTest] ----
[jsTest] CE: { "chi2_int_0-1000-1" : { "$lt" : 408 } }, base = 356, sample = 352.17, actual = 356
[jsTest] ----
@@ -756,23 +807,25 @@ Sample error: { "qError" : 1.44, "relError" : 0.44, "selError" : 8.65 }
Base error: { "qError" : 1, "relError" : 0, "selError" : 0 }
Sample error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.77 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$eq" : 408 } }
[jsTest] ----
{
"nodeType" : "Root",
- "logicalCE" : 4.0000000000000036,
+ "logicalCE" : 4,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 4.0000000000000036,
+ "logicalCE" : 4,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 4.0000000000000036
+ "logicalCE" : 4
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$eq" : 408 } }
[jsTest] ----
@@ -790,6 +843,7 @@ Sample error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.77 }
}
}
+
[jsTest] ----
[jsTest] CE: { "chi2_int_0-1000-1" : { "$eq" : 408 } }, base = 4, sample = 0, actual = 4
[jsTest] ----
@@ -797,6 +851,7 @@ Sample error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.77 }
Base error: { "qError" : 1, "relError" : 0, "selError" : 0 }
Sample error: { "qError" : 4, "relError" : -1, "selError" : -0.8 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -808,21 +863,22 @@ Sample error: { "qError" : 4, "relError" : -1, "selError" : -0.8 }
{
"nodeType" : "Root",
- "logicalCE" : 196.05850926700424,
+ "logicalCE" : 196.05850926700427,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 196.05850926700424,
+ "logicalCE" : 196.05850926700427,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 196.05850926700424,
+ "logicalCE" : 196.05850926700427,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 196.05850926700424
+ "logicalCE" : 196.05850926700427
}
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -849,6 +905,7 @@ Sample error: { "qError" : 4, "relError" : -1, "selError" : -0.8 }
}
}
+
[jsTest] ----
[jsTest] CE: {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -861,6 +918,7 @@ Sample error: { "qError" : 4, "relError" : -1, "selError" : -0.8 }
Base error: { "qError" : 1.57, "relError" : 0.57, "selError" : 14.21 }
Sample error: { "qError" : 1.35, "relError" : 0.35, "selError" : 8.63 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -871,17 +929,18 @@ Sample error: { "qError" : 1.35, "relError" : 0.35, "selError" : 8.63 }
{
"nodeType" : "Root",
- "logicalCE" : 89.84799999999998,
+ "logicalCE" : 89.848,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 89.84799999999998,
+ "logicalCE" : 89.848,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 89.84799999999998
+ "logicalCE" : 89.848
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -903,6 +962,7 @@ Sample error: { "qError" : 1.35, "relError" : 0.35, "selError" : 8.63 }
}
}
+
[jsTest] ----
[jsTest] CE: {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -914,6 +974,7 @@ Sample error: { "qError" : 1.35, "relError" : 0.35, "selError" : 8.63 }
Base error: { "qError" : 1.02, "relError" : -0.02, "selError" : -0.43 }
Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -924,17 +985,18 @@ Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 }
{
"nodeType" : "Root",
- "logicalCE" : 1.1999999999999789,
+ "logicalCE" : 1.2,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 1.1999999999999789,
+ "logicalCE" : 1.2,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 1.1999999999999789
+ "logicalCE" : 1.2
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -956,6 +1018,7 @@ Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 }
}
}
+
[jsTest] ----
[jsTest] CE: {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -967,6 +1030,7 @@ Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 }
Base error: { "qError" : 1.2, "relError" : 0.2, "selError" : 0.04 }
Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -978,21 +1042,22 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
{
"nodeType" : "Root",
- "logicalCE" : 141.03563731142566,
+ "logicalCE" : 141.03563731142563,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 141.03563731142566,
+ "logicalCE" : 141.03563731142563,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 141.03563731142566,
+ "logicalCE" : 141.03563731142563,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 141.03563731142566
+ "logicalCE" : 141.03563731142563
}
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -1019,6 +1084,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
}
}
+
[jsTest] ----
[jsTest] CE: {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -1031,6 +1097,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
Base error: { "qError" : 2.17, "relError" : 1.17, "selError" : 15.21 }
Sample error: { "qError" : 2.29, "relError" : 1.29, "selError" : 16.72 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -1041,17 +1108,18 @@ Sample error: { "qError" : 2.29, "relError" : 1.29, "selError" : 16.72 }
{
"nodeType" : "Root",
- "logicalCE" : 89.84799999999998,
+ "logicalCE" : 89.848,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 89.84799999999998,
+ "logicalCE" : 89.848,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 89.84799999999998
+ "logicalCE" : 89.848
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -1073,6 +1141,7 @@ Sample error: { "qError" : 2.29, "relError" : 1.29, "selError" : 16.72 }
}
}
+
[jsTest] ----
[jsTest] CE: {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -1084,6 +1153,7 @@ Sample error: { "qError" : 2.29, "relError" : 1.29, "selError" : 16.72 }
Base error: { "qError" : 1.02, "relError" : -0.02, "selError" : -0.43 }
Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -1094,17 +1164,18 @@ Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 }
{
"nodeType" : "Root",
- "logicalCE" : 1.1999999999999789,
+ "logicalCE" : 1.2,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 1.1999999999999789,
+ "logicalCE" : 1.2,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 1.1999999999999789
+ "logicalCE" : 1.2
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -1126,6 +1197,7 @@ Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 }
}
}
+
[jsTest] ----
[jsTest] CE: {
[jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : {
@@ -1137,6 +1209,7 @@ Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 }
Base error: { "qError" : 1.2, "relError" : 0.2, "selError" : 0.04 }
Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1163,6 +1236,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1189,6 +1263,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
}
}
+
[jsTest] ----
[jsTest] CE: {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1201,6 +1276,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
Base error: { "qError" : 1.48, "relError" : 0.48, "selError" : 21.38 }
Sample error: { "qError" : 1.33, "relError" : 0.33, "selError" : 14.59 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1211,17 +1287,18 @@ Sample error: { "qError" : 1.33, "relError" : 0.33, "selError" : 14.59 }
{
"nodeType" : "Root",
- "logicalCE" : 122.37898193760266,
+ "logicalCE" : 122.37898193760263,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 122.37898193760266,
+ "logicalCE" : 122.37898193760263,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 122.37898193760266
+ "logicalCE" : 122.37898193760263
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1232,17 +1309,18 @@ Sample error: { "qError" : 1.33, "relError" : 0.33, "selError" : 14.59 }
{
"nodeType" : "Root",
- "logicalCE" : 169.56521739130437,
+ "logicalCE" : 169.56521739130434,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 169.56521739130437,
+ "logicalCE" : 169.56521739130434,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 169.56521739130437
+ "logicalCE" : 169.56521739130434
}
}
}
+
[jsTest] ----
[jsTest] CE: {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1254,6 +1332,7 @@ Sample error: { "qError" : 1.33, "relError" : 0.33, "selError" : 14.59 }
Base error: { "qError" : 1.31, "relError" : -0.24, "selError" : -7.52 }
Sample error: { "qError" : 1.06, "relError" : 0.06, "selError" : 1.91 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1264,17 +1343,18 @@ Sample error: { "qError" : 1.06, "relError" : 0.06, "selError" : 1.91 }
{
"nodeType" : "Root",
- "logicalCE" : 1.1448275862068757,
+ "logicalCE" : 1.1448275862068966,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 1.1448275862068757,
+ "logicalCE" : 1.1448275862068966,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 1.1448275862068757
+ "logicalCE" : 1.1448275862068966
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1296,6 +1376,7 @@ Sample error: { "qError" : 1.06, "relError" : 0.06, "selError" : 1.91 }
}
}
+
[jsTest] ----
[jsTest] CE: {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1307,6 +1388,7 @@ Sample error: { "qError" : 1.06, "relError" : 0.06, "selError" : 1.91 }
Base error: { "qError" : 1.14, "relError" : 0.14, "selError" : 0.03 }
Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1333,6 +1415,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1344,21 +1427,22 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
{
"nodeType" : "Root",
- "logicalCE" : 156.5311906409129,
+ "logicalCE" : 156.53119064091288,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 156.5311906409129,
+ "logicalCE" : 156.53119064091288,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 156.5311906409129,
+ "logicalCE" : 156.53119064091288,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 156.5311906409129
+ "logicalCE" : 156.53119064091288
}
}
}
}
+
[jsTest] ----
[jsTest] CE: {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1371,6 +1455,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
Base error: { "qError" : 1.3, "relError" : 0.29, "selError" : 5.19 }
Sample error: { "qError" : 1.78, "relError" : 0.78, "selError" : 13.71 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1381,17 +1466,18 @@ Sample error: { "qError" : 1.78, "relError" : 0.78, "selError" : 13.71 }
{
"nodeType" : "Root",
- "logicalCE" : 72.42088669950742,
+ "logicalCE" : 72.42088669950739,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 72.42088669950742,
+ "logicalCE" : 72.42088669950739,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 72.42088669950742
+ "logicalCE" : 72.42088669950739
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1402,17 +1488,18 @@ Sample error: { "qError" : 1.78, "relError" : 0.78, "selError" : 13.71 }
{
"nodeType" : "Root",
- "logicalCE" : 73.91304347826089,
+ "logicalCE" : 73.91304347826087,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 73.91304347826089,
+ "logicalCE" : 73.91304347826087,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 73.91304347826089
+ "logicalCE" : 73.91304347826087
}
}
}
+
[jsTest] ----
[jsTest] CE: {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1424,6 +1511,7 @@ Sample error: { "qError" : 1.78, "relError" : 0.78, "selError" : 13.71 }
Base error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.12 }
Sample error: { "qError" : 1.01, "relError" : 0.01, "selError" : 0.18 }
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1434,17 +1522,18 @@ Sample error: { "qError" : 1.01, "relError" : 0.01, "selError" : 0.18 }
{
"nodeType" : "Root",
- "logicalCE" : 1.1448275862068757,
+ "logicalCE" : 1.1448275862068966,
"child" : {
"nodeType" : "Filter",
- "logicalCE" : 1.1448275862068757,
+ "logicalCE" : 1.1448275862068966,
"child" : {
"nodeType" : "PhysicalScan",
- "logicalCE" : 1.1448275862068757
+ "logicalCE" : 1.1448275862068966
}
}
}
+
[jsTest] ----
[jsTest] Query: ce_data_500 {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1466,6 +1555,7 @@ Sample error: { "qError" : 1.01, "relError" : 0.01, "selError" : 0.18 }
}
}
+
[jsTest] ----
[jsTest] CE: {
[jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : {
@@ -1477,6 +1567,7 @@ Sample error: { "qError" : 1.01, "relError" : 0.01, "selError" : 0.18 }
Base error: { "qError" : 1.14, "relError" : 0.14, "selError" : 0.03 }
Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
+
[jsTest] ----
[jsTest] Average errors (30 queries):
[jsTest] ----
@@ -1484,6 +1575,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 }
Average base error: { "absError" : NaN, "relError" : 0.35, "selError" : 4.05 }
Average sample error: { "absError" : NaN, "relError" : 0.12, "selError" : 4.36 }
+
[jsTest] ----
[jsTest] Settings after: { "internalQueryFrameworkControl" : "forceBonsai", "ok" : 1 }
[jsTest] ----
diff --git a/jstests/query_golden/expected_output/eq b/jstests/query_golden/expected_output/eq
index 60f017eb31c0b..b8be01b6ff0c1 100644
--- a/jstests/query_golden/expected_output/eq
+++ b/jstests/query_golden/expected_output/eq
@@ -225,6 +225,7 @@
{ "a" : { "$minKey" : 1 } }
Collection count: 213
+
[jsTest] ----
[jsTest] Query: { "find" : "query_golden_eq", "filter" : { "a" : { "$eq" : { "$minKey" : 1 } } }, "projection" : { "_id" : 0 } }
[jsTest] ----
diff --git a/jstests/query_golden/expected_output/exclusion_projection b/jstests/query_golden/expected_output/exclusion_projection
index 25fff0934575f..ecaaf5c13aee5 100644
--- a/jstests/query_golden/expected_output/exclusion_projection
+++ b/jstests/query_golden/expected_output/exclusion_projection
@@ -59,6 +59,7 @@
{ "_id" : 9, "a" : { "d" : 1 } }
Collection count: 53
+
[jsTest] ----
[jsTest] Query: [ { "$project" : { "a" : 0 } } ]
[jsTest] ----
@@ -603,6 +604,7 @@ Collection count: 53
{ "_id" : { "x" : 1 }, "y" : 2 }
Collection count: 7
+
[jsTest] ----
[jsTest] Query: [ { "$project" : { "_id" : 0 } } ]
[jsTest] ----
diff --git a/jstests/query_golden/expected_output/extraneous_project b/jstests/query_golden/expected_output/extraneous_project
index bf06bad74e956..971bea9604f32 100644
--- a/jstests/query_golden/expected_output/extraneous_project
+++ b/jstests/query_golden/expected_output/extraneous_project
@@ -6,7 +6,8 @@
nReturned: 0
-Plan skeleton: {
+Plan skeleton:
+{
"queryPlanner" : {
"winningPlan" : {
"optimizerPlan" : {
@@ -31,13 +32,15 @@ Plan skeleton: {
}
}
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "username" : "/^user8/" } }, { "$group" : { "_id" : 1, "count" : { "$sum" : 1 } } } ]
[jsTest] ----
nReturned: 0
-Plan skeleton: {
+Plan skeleton:
+{
"queryPlanner" : {
"winningPlan" : {
"optimizerPlan" : {
@@ -60,4 +63,4 @@ Plan skeleton: {
}
}
}
-}
\ No newline at end of file
+}
diff --git a/jstests/query_golden/expected_output/inclusion_projection b/jstests/query_golden/expected_output/inclusion_projection
index 81e63e80ac1f3..18e049074041d 100644
--- a/jstests/query_golden/expected_output/inclusion_projection
+++ b/jstests/query_golden/expected_output/inclusion_projection
@@ -59,6 +59,7 @@
{ "_id" : 9, "a" : { "d" : 1 } }
Collection count: 53
+
[jsTest] ----
[jsTest] Creating indexes:
[jsTest] ----
@@ -791,6 +792,7 @@ Collection count: 53
{ "_id" : { "x" : 1 }, "y" : 2 }
Collection count: 7
+
[jsTest] ----
[jsTest] Creating indexes:
[jsTest] ----
diff --git a/jstests/query_golden/expected_output/match_with_and_or b/jstests/query_golden/expected_output/match_with_and_or
index 4cf3b70cc0856..a9e8088fa6083 100644
--- a/jstests/query_golden/expected_output/match_with_and_or
+++ b/jstests/query_golden/expected_output/match_with_and_or
@@ -23,6 +23,7 @@
{ "_id" : 9, "a" : [ 1, 2, { "b" : 1 }, { "b" : 2 } ], "x" : 1 }
Collection count: 17
+
[jsTest] ----
[jsTest] Creating indexes:
[jsTest] ----
diff --git a/jstests/query_golden/expected_output/match_with_exists b/jstests/query_golden/expected_output/match_with_exists
index d2ed49307fb03..1658eba131092 100644
--- a/jstests/query_golden/expected_output/match_with_exists
+++ b/jstests/query_golden/expected_output/match_with_exists
@@ -13,6 +13,7 @@
{ "_id" : 6, "a" : [ { "b" : 4 } ] }
Collection count: 7
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "a" : { "$exists" : true } } } ]
[jsTest] ----
@@ -76,6 +77,7 @@ Collection count: 7
{ "_id" : 1, "a" : [ ] }
Collection count: 1
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "a" : { "$exists" : true } } } ]
[jsTest] ----
@@ -97,6 +99,7 @@ Collection count: 1
{ "_id" : 1, "a" : false }
Collection count: 1
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "a" : { "$exists" : true } } } ]
[jsTest] ----
@@ -118,6 +121,7 @@ Collection count: 1
{ "_id" : 1, "a" : [ { "b" : 2 }, { "a" : 1 } ] }
Collection count: 1
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "a.a" : { "$exists" : true } } } ]
[jsTest] ----
@@ -146,6 +150,7 @@ Collection count: 1
{ "_id" : 1, "a" : [ [ { "b" : 1 } ] ] }
Collection count: 1
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "a.b" : { "$exists" : false } } } ]
[jsTest] ----
@@ -168,6 +173,7 @@ Collection count: 1
{ "_id" : 2, "a" : [ 2 ] }
Collection count: 2
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "a" : { "$elemMatch" : { "$exists" : true } } } } ]
[jsTest] ----
diff --git a/jstests/query_golden/expected_output/match_with_in b/jstests/query_golden/expected_output/match_with_in
index 82233b6f8e81d..8e10f48d3ad1c 100644
--- a/jstests/query_golden/expected_output/match_with_in
+++ b/jstests/query_golden/expected_output/match_with_in
@@ -20,6 +20,7 @@
{ "_id" : 9, "a" : { "c" : 1 } }
Collection count: 14
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "a" : { "$in" : [ null ] } } } ]
[jsTest] ----
diff --git a/jstests/query_golden/expected_output/non_multikey_paths b/jstests/query_golden/expected_output/non_multikey_paths
index 2d6fa83adb830..a4c99aea847bb 100644
--- a/jstests/query_golden/expected_output/non_multikey_paths
+++ b/jstests/query_golden/expected_output/non_multikey_paths
@@ -4,48 +4,57 @@
[jsTest] Query: [ { "$match" : { "one.one.one.one" : 2 } } ]
[jsTest] ----
-Leaf stage: {
+Leaf stage:
+{
"nodeType" : "IndexScan",
"indexDefName" : "one.one.one.one_1",
"interval" : "[ 2, 2 ]"
}
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "one.one.one.many" : 2 } } ]
[jsTest] ----
-Leaf stage: {
+Leaf stage:
+{
"nodeType" : "IndexScan",
"indexDefName" : "one.one.one.many_1",
"interval" : "[ 2, 2 ]"
}
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "many.one.one.one" : 2 } } ]
[jsTest] ----
-Leaf stage: {
+Leaf stage:
+{
"nodeType" : "IndexScan",
"indexDefName" : "many.one.one.one_1",
"interval" : "[ 2, 2 ]"
}
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "many.one.one.many" : 2 } } ]
[jsTest] ----
-Leaf stage: {
+Leaf stage:
+{
"nodeType" : "IndexScan",
"indexDefName" : "many.one.one.many_1",
"interval" : "[ 2, 2 ]"
}
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "many.many.many.many" : 2 } } ]
[jsTest] ----
-Leaf stage: {
+Leaf stage:
+{
"nodeType" : "IndexScan",
"indexDefName" : "many.many.many.many_1",
"interval" : "[ 2, 2 ]"
-}
\ No newline at end of file
+}
diff --git a/jstests/query_golden/expected_output/not_pushdown b/jstests/query_golden/expected_output/not_pushdown
index 10b9235860c40..bf222aeec88f4 100644
--- a/jstests/query_golden/expected_output/not_pushdown
+++ b/jstests/query_golden/expected_output/not_pushdown
@@ -5,46 +5,59 @@
[jsTest] note: Should be optimized to Neq
[jsTest] ----
-Operators used: [ "Neq" ]
+Operators used:
+[ "Neq" ]
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "one.one.one.many" : { "$ne" : 7 } } } ]
[jsTest] note: Should stay as Not Traverse Eq
[jsTest] ----
-Operators used: [ "Not", "Eq" ]
+Operators used:
+[ "Not", "Eq" ]
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "many.one.one.one" : { "$ne" : 7 } } } ]
[jsTest] note: Should stay as Not Traverse Eq
[jsTest] ----
-Operators used: [ "Not", "Eq" ]
+Operators used:
+[ "Not", "Eq" ]
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "many.one.one.many" : { "$ne" : 7 } } } ]
[jsTest] note: Should stay as Not Traverse Eq
[jsTest] ----
-Operators used: [ "Not", "Eq" ]
+Operators used:
+[ "Not", "Eq" ]
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "many.many.many.many" : { "$ne" : 7 } } } ]
[jsTest] note: Should stay as Not Traverse Eq
[jsTest] ----
-Operators used: [ "Not", "Eq" ]
+Operators used:
+[ "Not", "Eq" ]
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "many" : { "$elemMatch" : { "one.one.one" : { "$ne" : 7 } } } } } ]
[jsTest] note: Should be optimized to Neq
[jsTest] ----
-Operators used: [ "Neq" ]
+Operators used:
+[ "Neq" ]
+
[jsTest] ----
[jsTest] Query: [ { "$match" : { "many.one" : { "$elemMatch" : { "one.one" : { "$ne" : 7 } } } } } ]
[jsTest] note: Should be optimized to Neq
[jsTest] ----
-Operators used: [ "Neq" ]
\ No newline at end of file
+Operators used:
+[ "Neq" ]
diff --git a/jstests/query_golden/expected_output/null_missing b/jstests/query_golden/expected_output/null_missing
index 6f0598ae154e5..c6538de172adf 100644
--- a/jstests/query_golden/expected_output/null_missing
+++ b/jstests/query_golden/expected_output/null_missing
@@ -5,7 +5,8 @@
[jsTest] ----
nReturned: 3
-Plan skeleton: {
+Plan skeleton:
+{
"queryPlanner" : {
"winningPlan" : {
"optimizerPlan" : {
@@ -21,12 +22,14 @@ Plan skeleton: {
}
}
+
[jsTest] ----
[jsTest] Index on { "a.b" : 1 }. Query: [ { "$match" : { "a.b" : null } } ]
[jsTest] ----
nReturned: 3
-Plan skeleton: {
+Plan skeleton:
+{
"queryPlanner" : {
"winningPlan" : {
"optimizerPlan" : {
@@ -46,4 +49,4 @@ Plan skeleton: {
}
}
}
-}
\ No newline at end of file
+}
diff --git a/jstests/query_golden/extraneous_project.js b/jstests/query_golden/extraneous_project.js
index 69aa4c6048815..1ae3a65ab3e24 100644
--- a/jstests/query_golden/extraneous_project.js
+++ b/jstests/query_golden/extraneous_project.js
@@ -6,10 +6,8 @@
* requires_cqf,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For getPlanSkeleton.
+import {show} from "jstests/libs/golden_test.js";
+import {getPlanSkeleton} from "jstests/libs/optimizer_utils.js";
db.setLogLevel(4, "query");
@@ -39,4 +37,3 @@ run([
]);
run([{$match: {username: "/^user8/"}}, {$group: {_id: 1, count: {$sum: 1}}}]);
-})();
diff --git a/jstests/query_golden/inclusion_projection.js b/jstests/query_golden/inclusion_projection.js
index 295785e82fb6d..abee73f3568de 100644
--- a/jstests/query_golden/inclusion_projection.js
+++ b/jstests/query_golden/inclusion_projection.js
@@ -3,10 +3,11 @@
* jstests/cqf/projection.js; both tests will exist pending a decision about the future of golden
* jstesting for CQF.
*/
-
-(function() {
-"use strict";
-load("jstests/query_golden/libs/projection_helpers.js");
+import {
+ getIdProjectionDocs,
+ getProjectionDocs,
+ runProjectionsAgainstColl
+} from "jstests/query_golden/libs/projection_helpers.js";
const coll = db.cqf_inclusion_project;
@@ -42,4 +43,3 @@ const idInclusionProjSpecs = [
];
const idIndexes = [{"_id.a": 1}, {"_id.a": 1, "_id.b": 1}, {"_id.a.b": 1}];
runProjectionsAgainstColl(coll, getIdProjectionDocs(), idIndexes, idInclusionProjSpecs);
-}());
diff --git a/jstests/query_golden/libs/ce_data.js b/jstests/query_golden/libs/ce_data.js
index eb899d047e9b4..c9b262933b697 100644
--- a/jstests/query_golden/libs/ce_data.js
+++ b/jstests/query_golden/libs/ce_data.js
@@ -1,10 +1,11 @@
// Small data generator for the purpose of developing the test framework.
-const alphabet = "abcdefghijklmnopqrstuvwxyz";
-const len = alphabet.length;
+export const alphabet = "abcdefghijklmnopqrstuvwxyz";
+
+export const len = alphabet.length;
// Returns pseudo-random string where the symbols and the length are functions of the parameter n.
-function genRandomString(n) {
+export function genRandomString(n) {
let strLen = n % 4 + 1;
let str = "";
let i = 0;
@@ -15,11 +16,11 @@ function genRandomString(n) {
return str;
}
-const seedArray = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 19, 20];
-const arrLen = seedArray.length;
+export const seedArray = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 19, 20];
+export const arrLen = seedArray.length;
// Returns pseudo-random array where the elements and the length are functions of the parameter n.
-function genRandomArray(n) {
+export function genRandomArray(n) {
let aLen = (7 * n) % 5 + 1;
let start = (13 * n) % arrLen;
return seedArray.slice(start, start + aLen);
@@ -29,14 +30,14 @@ function genRandomArray(n) {
* Returns documents for cardinality estimation tests.
*/
-function getCEDocs() {
+export function getCEDocs() {
return Array.from(
{length: 10},
(_, i) =>
({_id: i, a: i + 10, b: genRandomString(i), c_int: genRandomArray(i), mixed: i * 11}));
}
-function getCEDocs1() {
+export function getCEDocs1() {
return Array.from({length: 10}, (_, i) => ({
_id: i + 10,
a: i + 25,
diff --git a/jstests/query_golden/libs/compute_errors.js b/jstests/query_golden/libs/compute_errors.js
index 7e4bcf966d43b..1c06853eb571a 100644
--- a/jstests/query_golden/libs/compute_errors.js
+++ b/jstests/query_golden/libs/compute_errors.js
@@ -1,10 +1,12 @@
+import {round2} from "jstests/libs/optimizer_utils.js";
+
/**
* Compute cardinality estimation errors for a testcase and CE strategy.
* Example testcase:
* { _id: 2, pipeline: [...], nReturned: 2, "heuristic": 4.47, "histogram": 2, ...}
* Returns : {"qError": 2.23, "relError": 1.23, "selError": 12.35}
*/
-function computeStrategyErrors(testcase, strategy, collSize) {
+export function computeStrategyErrors(testcase, strategy, collSize) {
const absError = testcase[strategy] - testcase.nReturned;
let relError = 0.0;
if (testcase.nReturned > 0) {
@@ -26,7 +28,7 @@ function computeStrategyErrors(testcase, strategy, collSize) {
/**
* Compute cardinality estimation errors for a testcase for all CE strategies.
*/
-function computeAndPrintErrors(testcase, ceStrategies, collSize, isComplex) {
+export function computeAndPrintErrors(testcase, ceStrategies, collSize, isComplex) {
let errorDoc = {_id: testcase._id, qtype: testcase.qtype};
if (isComplex == true) {
errorDoc["numberOfTerms"] = testcase.numberOfTerms;
@@ -46,7 +48,7 @@ function computeAndPrintErrors(testcase, ceStrategies, collSize, isComplex) {
print(`${strategy}: ${testcase[strategy]} `);
print(`QError: ${errors["qError"]}, RelError: ${errors["relError"]}, SelError: ${
errors["selError"]}%\n`);
- duration = 'duration_' + strategy;
+ const duration = 'duration_' + strategy;
errorDoc[duration] = testcase[duration];
});
return errorDoc;
@@ -55,7 +57,7 @@ function computeAndPrintErrors(testcase, ceStrategies, collSize, isComplex) {
/**
* Compute CE errors for each query and populate the error collection 'errorColl'.
*/
-function populateErrorCollection(errorColl, testCases, ceStrategies, collSize, isComplex) {
+export function populateErrorCollection(errorColl, testCases, ceStrategies, collSize, isComplex) {
for (const testcase of testCases) {
jsTestLog(`Query ${testcase._id}: ${tojsononeline(testcase.pipeline)}`);
print(`Actual cardinality: ${testcase.nReturned}\n`);
@@ -69,7 +71,7 @@ function populateErrorCollection(errorColl, testCases, ceStrategies, collSize, i
* Given an array of fields on which we want to perform $group, return an expression computing the
* group key.
*/
-function makeGroupKey(groupFields) {
+export function makeGroupKey(groupFields) {
let args = [];
for (let i = 0; i < groupFields.length; i++) {
args.push({$toString: "$" + groupFields[i]});
@@ -83,7 +85,7 @@ function makeGroupKey(groupFields) {
/**
* Aggregate errors in the 'errorColl' on the 'groupFields' for each CE strategy.
*/
-function aggregateErrorsPerCategory(errorColl, groupFields, ceStrategies) {
+export function aggregateErrorsPerCategory(errorColl, groupFields, ceStrategies) {
const groupKey = makeGroupKey(groupFields);
jsTestLog(`Mean errors per ${tojsononeline(groupFields)}:`);
for (const strategy of ceStrategies) {
@@ -136,7 +138,7 @@ function aggregateErrorsPerCategory(errorColl, groupFields, ceStrategies) {
* Aggregate errors in the 'errorColl' per CE strategy. If a predicate is provided
* aggregate only the error documents which satisfy the predicate.
*/
-function aggregateErrorsPerStrategy(errorColl, ceStrategies, predicate = {}) {
+export function aggregateErrorsPerStrategy(errorColl, ceStrategies, predicate = {}) {
const msg = (Object.keys(predicate).length == 0) ? "all queries"
: `predicate ${tojsononeline(predicate)}:`;
jsTestLog(`Mean errors per strategy for ${msg}:`);
@@ -184,7 +186,7 @@ function aggregateErrorsPerStrategy(errorColl, ceStrategies, predicate = {}) {
}
}
-function aggegateOptimizationTimesPerStrategy(errorColl, ceStrategies) {
+export function aggegateOptimizationTimesPerStrategy(errorColl, ceStrategies) {
print("Average optimization time per strategy:");
for (const strategy of ceStrategies) {
const strategyDuration = "$" +
@@ -214,7 +216,8 @@ function aggegateOptimizationTimesPerStrategy(errorColl, ceStrategies) {
/**
* Find top 10 inacurate estimates for a strategy and an error field.
*/
-function printQueriesWithBadAccuracy(errorColl, testCases, strategy, errorField, count = 10) {
+export function printQueriesWithBadAccuracy(
+ errorColl, testCases, strategy, errorField, count = 10) {
const errorFieldName = strategy + "." + errorField;
const res = errorColl
.aggregate([
@@ -230,7 +233,8 @@ function printQueriesWithBadAccuracy(errorColl, testCases, strategy, errorField,
for (const doc of res) {
const i = doc["_id"];
const test = testCases[i];
- print(`Id: ${test._id}: ${tojsononeline(test.pipeline)}, qtype: ${test.qtype}, data type: ${test.dtype},
-cardinality: ${test.nReturned}, ${strategy} estimation: ${test[strategy]}, errors: ${tojsononeline(doc[strategy])}\n`);
+ print(`Id: ${test._id}: ${tojsononeline(test.pipeline)}, qtype: ${test.qtype}, data type: ${
+ test.dtype}, \ncardinality: ${test.nReturned}, ${strategy} estimation: ${
+ test[strategy]}, errors: ${tojsononeline(doc[strategy])}\n`);
}
}
diff --git a/jstests/query_golden/libs/data/ce_accuracy_test.data b/jstests/query_golden/libs/data/ce_accuracy_test.data
index 5698824c8ee4c..3f2c1bb3a69e0 100644
--- a/jstests/query_golden/libs/data/ce_accuracy_test.data
+++ b/jstests/query_golden/libs/data/ce_accuracy_test.data
@@ -1,2 +1,2 @@
// This is a generated file.
-const chunkNames = ['ce_data_500_1','ce_data_500_2','ce_data_500_3','ce_data_500_4','ce_data_500_5'];
\ No newline at end of file
+const chunkNames = ['ce_data_500_1','ce_data_500_2','ce_data_500_3','ce_data_500_4','ce_data_500_5'];
diff --git a/jstests/query_golden/libs/data/ce_accuracy_test.schema b/jstests/query_golden/libs/data/ce_accuracy_test.schema
index 40cef37674879..60513d5c65875 100644
--- a/jstests/query_golden/libs/data/ce_accuracy_test.schema
+++ b/jstests/query_golden/libs/data/ce_accuracy_test.schema
@@ -457,4 +457,4 @@ const dbMetadata = [
"compound_indexes": [],
"cardinality": 500
}
-];
\ No newline at end of file
+];
diff --git a/jstests/query_golden/libs/example_data.js b/jstests/query_golden/libs/example_data.js
index 06629907cbb65..027f492fbd82e 100644
--- a/jstests/query_golden/libs/example_data.js
+++ b/jstests/query_golden/libs/example_data.js
@@ -3,7 +3,7 @@
// Generates interesting "leaf" values: values that don't contain other values.
// This includes [] and {}.
-function leafs() {
+export function leafs() {
// See bsontypes.h or https://bsonspec.org/ for a complete list of BSON types.
// Not every type is represented here.
return [
@@ -120,16 +120,16 @@ function leafs() {
// Documents with (at most) a single field with the given name.
// Includes the "missing value" by including one empty doc.
-function unaryDocs(fieldname, values) {
+export function unaryDocs(fieldname, values) {
return values.map(v => ({[fieldname]: v}));
}
// Arrays with exactly one element.
-function unaryArrays(values) {
+export function unaryArrays(values) {
return values.map(v => [v]);
}
-function smallDocs() {
+export function smallDocs() {
let values = leafs();
values = values.concat(unaryDocs('x', values)).concat(unaryArrays(values));
return unaryDocs('a', values);
@@ -137,7 +137,7 @@ function smallDocs() {
// Prepend an '_id' field to each document, numbered sequentially from 0.
// Preserves any existing '_id' value, but always moves that field to the beginning.
-function sequentialIds(docs) {
+export function sequentialIds(docs) {
let i = 0;
return docs.map(d => Object.merge({_id: i++}, d));
}
diff --git a/jstests/query_golden/libs/generate_queries.js b/jstests/query_golden/libs/generate_queries.js
index 334bb23f0671e..66358b3da687c 100644
--- a/jstests/query_golden/libs/generate_queries.js
+++ b/jstests/query_golden/libs/generate_queries.js
@@ -1,11 +1,11 @@
/**
* Helper functions for generating of queries over a collection.
*/
-function makeMatchPredicate(field, boundary, compOp) {
+export function makeMatchPredicate(field, boundary, compOp) {
return {"$match": {[field]: {[compOp]: boundary}}};
}
-function makeRangePredicate(field, op1, bound1, op2, bound2, isElemMatch = false) {
+export function makeRangePredicate(field, op1, bound1, op2, bound2, isElemMatch = false) {
if (isElemMatch) {
return {"$match": {[field]: {"$elemMatch": {[op1]: bound1, [op2]: bound2}}}};
}
@@ -18,7 +18,7 @@ function makeRangePredicate(field, op1, bound1, op2, bound2, isElemMatch = false
* explosion in the number of predicates we create all comparison predicates only for 25% of the
* query values, while for the other 75% we pick one comparison operator in a round-robin fashion.
*/
-function generateComparisons(field, boundaries, fieldType) {
+export function generateComparisons(field, boundaries, fieldType) {
let predicates = [];
const compOps = ["$eq", "$lt", "$lte", "$gt", "$gte"];
// Index over boundaries.
@@ -56,10 +56,10 @@ function generateComparisons(field, boundaries, fieldType) {
return docs;
}
-const min_char_code = '0'.codePointAt(0);
-const max_char_code = '~'.codePointAt(0);
+export const min_char_code = '0'.codePointAt(0);
+export const max_char_code = '~'.codePointAt(0);
-function nextChar(thisChar, distance) {
+export function nextChar(thisChar, distance) {
const number_of_chars = max_char_code - min_char_code + 1;
const char_code = thisChar.codePointAt(0);
assert(min_char_code <= char_code <= max_char_code, "char is out of range");
@@ -73,7 +73,7 @@ function nextChar(thisChar, distance) {
* Produces a string value at some distance from the argument string.
* distance: "small", "middle", "large".
*/
-function nextStr(str, distance) {
+export function nextStr(str, distance) {
var res = 'nextStrUndefined';
const spec = {"small": 3, "medium": 2, "large": 1};
if (str.length == 0) {
@@ -90,8 +90,8 @@ function nextStr(str, distance) {
let newStr0 = str.slice(0, pos);
let nextCh = nextChar(str[pos], 4 - spec[distance] /*char distance*/);
- newStr1 = newStr0 + nextCh;
- newStr = newStr1 + str.slice(pos + 1, str.length);
+ const newStr1 = newStr0 + nextCh;
+ const newStr = newStr1 + str.slice(pos + 1, str.length);
assert(newStr.indexOf("NaN") == -1,
`Found NaN with inputs: newStr=${newStr}, str=${str}, distance=${distance}; pos=${
pos}, nextCh=${nextCh}, newStr0=${newStr0}, newStr1=${newStr1}`);
@@ -110,7 +110,7 @@ function nextStr(str, distance) {
* types both low and upper bounds are taken from the 'values' array and rangeSize is the distance
* they are apart from each other.
*/
-function generateRanges(values, fieldType, rangeSize) {
+export function generateRanges(values, fieldType, rangeSize) {
let ranges = [];
if (fieldType == 'integer' || fieldType == 'double') {
for (const val of values) {
@@ -118,7 +118,7 @@ function generateRanges(values, fieldType, rangeSize) {
}
} else if (fieldType == 'string') {
for (const val of values) {
- nanPos = val.indexOf("NaN");
+ const nanPos = val.indexOf("NaN");
assert(nanPos == -1, `Found NaN in values: ${values}, ${val}, ${nanPos}`);
var nextVar = nextStr(val, rangeSize);
assert(nextVar != 'nextStrUndefined',
@@ -155,7 +155,7 @@ function generateRanges(values, fieldType, rangeSize) {
* Split an ordered array of values into sub-arrays of the same type.
* Example: [0, 25, 'an', 'mac', 'zen'] -> [[0, 25], ['an', 'mac', 'zen']].
*/
-function splitValuesPerType(values) {
+export function splitValuesPerType(values) {
let tp = typeof values[0];
let changePos = [0];
let i = 1;
@@ -176,10 +176,10 @@ function splitValuesPerType(values) {
return typedValues;
}
-function getTypeFromFieldName(fieldName) {
+export function getTypeFromFieldName(fieldName) {
const fieldMeta = fieldName.split("_");
let elemType = undefined;
- for (fieldPart of fieldMeta) {
+ for (let fieldPart of fieldMeta) {
if (fieldPart == "int") {
elemType = "integer";
} else if (fieldPart == "dbl") {
@@ -202,7 +202,7 @@ function getTypeFromFieldName(fieldName) {
* in the 'queryValues' document: {values: [1, 15, 37, 72, 100], min: 1, max: 100}. The 'values'
* array is sorted.
*/
-function generateRangePredicates(field, queryValues, fieldType) {
+export function generateRangePredicates(field, queryValues, fieldType) {
const querySpecs = {"small": 0.001, "medium": 0.01, "large": 0.1};
const opOptions = [["$gt", "$lt"], ["$gt", "$lte"], ["$gte", "$lt"], ["$gte", "$lte"]];
@@ -239,7 +239,7 @@ function generateRangePredicates(field, queryValues, fieldType) {
ranges.forEach(function(range) {
assert(range.length == 2);
let [op1, op2] = opOptions[j];
- pred = makeRangePredicate(field, op1, range[0], op2, range[1]);
+ let pred = makeRangePredicate(field, op1, range[0], op2, range[1]);
const doc = {
"pipeline": [pred],
"qtype": qSize + " range",
@@ -249,7 +249,7 @@ function generateRangePredicates(field, queryValues, fieldType) {
};
docs.push(doc);
if (fieldType == 'array' && range[0] <= range[1]) {
- pred = makeRangePredicate(field, op1, range[0], op2, range[1], true);
+ let pred = makeRangePredicate(field, op1, range[0], op2, range[1], true);
const doc = {
"pipeline": [pred],
"qtype": qSize + " range",
@@ -269,7 +269,7 @@ function generateRangePredicates(field, queryValues, fieldType) {
/**
* Helper function to extract positions for a sample of size n from a collection.
*/
-function selectSamplePos(collSize, n) {
+export function selectSamplePos(collSize, n) {
let samplePos = [];
let step = Math.round(collSize / n);
let offset = n * step - collSize;
@@ -284,11 +284,11 @@ function selectSamplePos(collSize, n) {
return samplePos;
}
-function selectSample(coll, samplePos) {
+export function selectSample(coll, samplePos) {
return coll.aggregate([{$match: {"_id": {$in: samplePos}}}]).toArray();
}
-function selectFieldValues(sample, field) {
+export function selectFieldValues(sample, field) {
let values = [];
for (const doc of sample) {
values.push(doc[field]);
@@ -299,7 +299,7 @@ function selectFieldValues(sample, field) {
/**
* Selects few values from histogram bucket boundaries.
*/
-function selectHistogramBounds(statsColl, field, fieldType) {
+export function selectHistogramBounds(statsColl, field, fieldType) {
let values = [];
let stats = statsColl.find({"_id": field})[0];
// Specify which bucket bound to choose from each histogram type. The number is ratio of the
@@ -332,7 +332,7 @@ function selectHistogramBounds(statsColl, field, fieldType) {
* Extract min/max values from a field. The initial unwind phase extracts the values in case the
* field contains arrays.
*/
-function getMinMax(coll, field) {
+export function getMinMax(coll, field) {
const res = coll.aggregate([
{$unwind: field},
{$group: {_id: null, min: {$min: field}, max: {$max: field}}},
@@ -346,7 +346,7 @@ function getMinMax(coll, field) {
* Extract query values from an array of sample arrays. Select up to three values per array element.
* {[1, 3, 5], [ 2, 4, 6, 8, 10], [100]] -> [1, 3, 5, 2, 6, 10, 100]
*/
-function selectArrayValues(nestedArray) {
+export function selectArrayValues(nestedArray) {
let values = [];
nestedArray.forEach(function(array) {
if (typeof array != "object") {
@@ -366,7 +366,7 @@ function selectArrayValues(nestedArray) {
return values;
}
-function selectOutOfRangeValues(minMaxDoc, fieldType) {
+export function selectOutOfRangeValues(minMaxDoc, fieldType) {
let values = [];
const validTypes = new Set(["integer", "double", "string", "date"]);
if (!validTypes.has(fieldType)) {
@@ -397,7 +397,7 @@ function selectOutOfRangeValues(minMaxDoc, fieldType) {
return values;
}
-function sortValues(values) {
+export function sortValues(values) {
let sortColl = db["sortColl"];
sortColl.drop();
for (const x of values) {
@@ -411,7 +411,7 @@ function sortValues(values) {
return sorted;
}
-function deduplicate(boundaries) {
+export function deduplicate(boundaries) {
let values = [boundaries[0]];
let i = 0;
while (i + 1 < boundaries.length) {
@@ -435,7 +435,7 @@ function deduplicate(boundaries) {
* values, min, and max for the respective field. Example:
* {"a": {values: [1, 15, 37, 72, 100], min: 1, max: 100}, "b": {...} }
*/
-function selectQueryValues(coll, fields, fieldTypes, samplePos, statsColl) {
+export function selectQueryValues(coll, fields, fieldTypes, samplePos, statsColl) {
const sample = selectSample(coll, samplePos);
let queryValues = {};
@@ -478,7 +478,7 @@ function selectQueryValues(coll, fields, fieldTypes, samplePos, statsColl) {
* Query generation for a collection 'coll' with given fields and field types.
* The generation uses values from a collection sample with 'sampleSize'.
*/
-function generateQueries(fields, fieldTypes, queryValues) {
+export function generateQueries(fields, fieldTypes, queryValues) {
let testCases = [];
let i = 0;
while (i < fields.length) {
@@ -508,7 +508,7 @@ function generateQueries(fields, fieldTypes, queryValues) {
* - step: step to navigate through the testCases array
* - predicates: array of result predicate documents
*/
-function pickNextTerm(testCases, cnt, curPos, chosenIds, chosenFields, step, predicates) {
+export function pickNextTerm(testCases, cnt, curPos, chosenIds, chosenFields, step, predicates) {
assert.eq(curPos, chosenIds.length);
let i = (curPos == 0) ? 0 : chosenIds.at(-1) + 1;
@@ -548,7 +548,8 @@ function pickNextTerm(testCases, cnt, curPos, chosenIds, chosenFields, step, pre
* op: $and or $or
* comp: array of comparisons for predicate terms
*/
-function makeSingleFieldComplexPredicate(field, values, op, comp, predicates, isArray = false) {
+export function makeSingleFieldComplexPredicate(
+ field, values, op, comp, predicates, isArray = false) {
let terms = [];
for (let i = 0; i < comp.length; i++) {
terms.push({[field]: {[comp[i]]: values[i]}});
@@ -570,7 +571,7 @@ function makeSingleFieldComplexPredicate(field, values, op, comp, predicates, is
/**
* Make a single field DNF predicate.
*/
-function makeSingleFieldDNF(field, values, predicates) {
+export function makeSingleFieldDNF(field, values, predicates) {
let term1 = {"$and": [{[field]: {"$gt": values[0]}}, {[field]: {"$lt": values[1]}}]};
let term2 = {"$and": [{[field]: {"$gte": values[2]}}, {[field]: {"$lt": values[3]}}]};
@@ -586,7 +587,7 @@ function makeSingleFieldDNF(field, values, predicates) {
/**
* Generate single-field conjunctions and disjunctions using values from the 'queryValues' document.
*/
-function generateSingleFieldPredicates(fields, fieldTypes, queryValues, predicates) {
+export function generateSingleFieldPredicates(fields, fieldTypes, queryValues, predicates) {
let i = 0;
while (i < fields.length) {
const field = fields[i];
@@ -628,7 +629,7 @@ function generateSingleFieldPredicates(fields, fieldTypes, queryValues, predicat
* - single-field conjunctions and disjunctions with 2 and 4 terms.
* - single-field DNFs.
*/
-function generateComplexPredicates(testCases, fields, fieldTypes, queryValues) {
+export function generateComplexPredicates(testCases, fields, fieldTypes, queryValues) {
let predicates = [];
// Generate multi-field conjunctions.
let chosenFields = new Set();
@@ -643,7 +644,7 @@ function generateComplexPredicates(testCases, fields, fieldTypes, queryValues) {
// Generate single-field disjunctions and conjunctions.
generateSingleFieldPredicates(fields, fieldTypes, queryValues, predicates);
- i = 0;
+ let i = 0;
for (let query of predicates) {
query["_id"] = i++;
}
diff --git a/jstests/query_golden/libs/projection_helpers.js b/jstests/query_golden/libs/projection_helpers.js
index caf02d103535b..bb2f770de4bc8 100644
--- a/jstests/query_golden/libs/projection_helpers.js
+++ b/jstests/query_golden/libs/projection_helpers.js
@@ -1,10 +1,11 @@
-load("jstests/query_golden/libs/utils.js");
+import {show} from "jstests/libs/golden_test.js";
+import {resetCollection} from "jstests/query_golden/libs/utils.js";
/**
* Drops 'coll' and re-populates it according to 'docs' and 'indexes'. Then, runs the specified
* projections against the collection and prints the results.
*/
-function runProjectionsAgainstColl(coll, docs, indexes, projSpecs) {
+export function runProjectionsAgainstColl(coll, docs, indexes, projSpecs) {
resetCollection(coll, docs, indexes);
for (const projectionSpec of projSpecs) {
@@ -17,7 +18,7 @@ function runProjectionsAgainstColl(coll, docs, indexes, projSpecs) {
/**
* Returns some example docs with interesting values as paths "a", "a.b", and "a.b.c".
*/
-function getProjectionDocs() {
+export function getProjectionDocs() {
return [
//
// Simple documents without any arrays along "a.b.c".
@@ -135,7 +136,7 @@ function getProjectionDocs() {
* Similar to getProjectionDocs(), but a smaller list where the interesting values are just under
* the _id field.
*/
-function getIdProjectionDocs() {
+export function getIdProjectionDocs() {
return [
{_id: 1, x: 2},
{_id: {}, x: 1},
diff --git a/jstests/query_golden/libs/run_queries_ce.js b/jstests/query_golden/libs/run_queries_ce.js
index 8dbe838d6007e..b0244386646ab 100644
--- a/jstests/query_golden/libs/run_queries_ce.js
+++ b/jstests/query_golden/libs/run_queries_ce.js
@@ -1,21 +1,37 @@
-load("jstests/libs/ce_stats_utils.js");
-load("jstests/libs/optimizer_utils.js");
-load("jstests/query_golden/libs/compute_errors.js");
-load("jstests/query_golden/libs/generate_queries.js");
-
-function indexedStrategy(strategyName) {
+import {analyzeFields, getRootCE} from "jstests/libs/ce_stats_utils.js";
+import {
+ forceCE,
+ getPlanSkeleton,
+ navigateToRootNode,
+ round2
+} from "jstests/libs/optimizer_utils.js";
+import {
+ aggegateOptimizationTimesPerStrategy,
+ aggregateErrorsPerCategory,
+ aggregateErrorsPerStrategy,
+ populateErrorCollection,
+ printQueriesWithBadAccuracy,
+} from "jstests/query_golden/libs/compute_errors.js";
+import {
+ generateComplexPredicates,
+ generateQueries,
+ selectQueryValues,
+ selectSamplePos,
+} from "jstests/query_golden/libs/generate_queries.js";
+
+export function indexedStrategy(strategyName) {
return strategyName + "Idx";
}
-function timedExplain(coll, pipeline) {
+export function timedExplain(coll, pipeline) {
const t0 = Date.now();
- explain = coll.explain().aggregate(pipeline);
+ const explain = coll.explain().aggregate(pipeline);
const t1 = Date.now();
const duration = t1 - t0;
return {explain, duration};
}
-function getCE(pipeline, explain) {
+export function getCE(pipeline, explain) {
try {
return round2(getRootCE(explain));
} catch (e) {
@@ -28,7 +44,7 @@ function getCE(pipeline, explain) {
/**
* Run the query specified in the 'testcase' document with the CE 'strategy'.
*/
-function runAggregationWithCE(coll, testcase, strategy) {
+export function runAggregationWithCE(coll, testcase, strategy) {
let explain = {};
let duration = -1;
if (testcase["nReturned"] == null) {
@@ -40,10 +56,10 @@ function runAggregationWithCE(coll, testcase, strategy) {
}
testcase["nReturned"] = explain.executionStats.nReturned;
// Run explain without execution to measure optimization time. Ignore the explain.
- timedRes = timedExplain(coll, testcase.pipeline);
+ const timedRes = timedExplain(coll, testcase.pipeline);
duration = timedRes.duration;
} else {
- timedRes = timedExplain(coll, testcase.pipeline);
+ const timedRes = timedExplain(coll, testcase.pipeline);
explain = timedRes.explain;
duration = timedRes.duration;
}
@@ -57,7 +73,7 @@ function runAggregationWithCE(coll, testcase, strategy) {
/**
* Run queries with complex predicates in batches with a limited number of index fields.
*/
-function runComplexPredicates(coll, testCases, ceStrategies, ceDebugFlag) {
+export function runComplexPredicates(coll, testCases, ceStrategies, ceDebugFlag) {
const maxIndexCnt = 50;
let start = 0;
@@ -103,7 +119,7 @@ function runComplexPredicates(coll, testCases, ceStrategies, ceDebugFlag) {
* If 'fields' is not empty, create index for each field and execute all queries on this field.
* If 'fields' is empty, execute queries with complex predicates in batches.
*/
-function runQueries(coll, testCases, ceStrategies, fields, ceDebugFlag) {
+export function runQueries(coll, testCases, ceStrategies, fields, ceDebugFlag) {
print("Run queries without indexing.\n");
ceStrategies.forEach(function(strategy) {
forceCE(strategy);
@@ -131,7 +147,7 @@ function runQueries(coll, testCases, ceStrategies, fields, ceDebugFlag) {
}
}
-function printSimpleQueryStats(errorColl, strategies, queries, debugFlag) {
+export function printSimpleQueryStats(errorColl, strategies, queries, debugFlag) {
jsTestLog("Aggregate errors for all simple predicate queries");
// Aggregate errors for all CE strategies per query category.
@@ -165,7 +181,7 @@ function printSimpleQueryStats(errorColl, strategies, queries, debugFlag) {
}
}
-function printComplexQueryStats(errorColl, strategies, queries, debugFlag) {
+export function printComplexQueryStats(errorColl, strategies, queries, debugFlag) {
jsTestLog("Aggregate errors for all complex predicate queries");
// Aggregate errors for all CE strategies per query category.
aggregateErrorsPerCategory(errorColl, ["qtype"], strategies);
@@ -182,7 +198,7 @@ function printComplexQueryStats(errorColl, strategies, queries, debugFlag) {
}
}
-function printAllQueryStats(testDB, errorColl1, errorColl2, strategies) {
+export function printAllQueryStats(testDB, errorColl1, errorColl2, strategies) {
jsTestLog("Aggregate errors for all queries (simple and complex predicates)");
let allErrorsColl = testDB.ce_all_errors;
allErrorsColl.drop();
@@ -196,7 +212,7 @@ function printAllQueryStats(testDB, errorColl1, errorColl2, strategies) {
* collection metadata. The function assumes that the collection exists and is populated with data.
* 'sampleSize' is the number of documents used to extract sample values for query generation.
*/
-function runCETestForCollection(testDB, collMeta, sampleSize = 6, ceDebugFlag = false) {
+export function runCETestForCollection(testDB, collMeta, sampleSize = 6, ceDebugFlag = false) {
let ceStrategies = ["heuristic", "histogram"];
if (ceDebugFlag) {
ceStrategies.push("sampling");
@@ -216,8 +232,8 @@ function runCETestForCollection(testDB, collMeta, sampleSize = 6, ceDebugFlag =
}
// Switch to 'tryBonsai' to create statistics and generate queries.
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"}));
+ assert.commandWorked(testDB.adminCommand(
+ {setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"}));
analyzeFields(testDB, coll, fields);
const statsColl = testDB.system.statistics[collName];
@@ -253,8 +269,8 @@ function runCETestForCollection(testDB, collMeta, sampleSize = 6, ceDebugFlag =
runQueries(coll, complexPred, ceStrategies, [], ceDebugFlag);
// Switch to 'tryBonsai' for accuracy analysis.
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"}));
+ assert.commandWorked(testDB.adminCommand(
+ {setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"}));
let allStrategies = [];
for (let strategy of ceStrategies) {
@@ -289,8 +305,8 @@ function runCETestForCollection(testDB, collMeta, sampleSize = 6, ceDebugFlag =
testDB.createView('ce_errors_complex_pred_not_empty',
'ce_errors_complex_pred',
[{$match: {$expr: {$gt: ["$nReturned", 0]}}}]);
- errorCollNonEmpty = testDB.ce_errors_not_empty;
- errorCollComplexPredNonEmpty = testDB.ce_errors_complex_pred_not_empty;
+ const errorCollNonEmpty = testDB.ce_errors_not_empty;
+ const errorCollComplexPredNonEmpty = testDB.ce_errors_complex_pred_not_empty;
print(`Non-empty simple error entries: ${
errorCollNonEmpty.find().itcount()}; complex error entries: ${
errorCollComplexPredNonEmpty.find().itcount()}`);
diff --git a/jstests/query_golden/libs/utils.js b/jstests/query_golden/libs/utils.js
index fdf690e482135..9dcc8db45973e 100644
--- a/jstests/query_golden/libs/utils.js
+++ b/jstests/query_golden/libs/utils.js
@@ -1,10 +1,11 @@
-load("jstests/query_golden/libs/example_data.js");
+import {show} from "jstests/libs/golden_test.js";
+import {sequentialIds} from "jstests/query_golden/libs/example_data.js";
/**
* Drops 'coll' and repopulates it with 'docs' and 'indexes'. Sequential _ids are added to
* documents which do not have _id set.
*/
-function resetCollection(coll, docs, indexes = []) {
+export function resetCollection(coll, docs, indexes = []) {
coll.drop();
const docsWithIds = sequentialIds(docs);
diff --git a/jstests/query_golden/load_data.js b/jstests/query_golden/load_data.js
index b17101730085e..a6846066a444f 100644
--- a/jstests/query_golden/load_data.js
+++ b/jstests/query_golden/load_data.js
@@ -5,8 +5,8 @@
* ]
*/
-(function() {
load("jstests/libs/load_ce_test_data.js");
+import {runHistogramsTest} from "jstests/libs/ce_stats_utils.js";
const dbName = 'ce_accuracy_test';
const dataDir = 'jstests/query_golden/libs/data/';
@@ -35,10 +35,9 @@ for (const collMetadata of dbMetadata) {
print(`Actual cardinality: ${actualCard}\n`);
assert.eq(expectedCard, actualCard);
collMetadata.fields.forEach(function(fieldMetadata) {
- fieldName = fieldMetadata.fieldName;
+ const fieldName = fieldMetadata.fieldName;
const fieldCard = coll.find({}, {fieldName: 1}).itcount();
print(`card(${fieldName}) = ${fieldCard}\n`);
assert.eq(fieldCard, actualCard);
});
}
-})();
diff --git a/jstests/query_golden/match_with_and_or.js b/jstests/query_golden/match_with_and_or.js
index 85acbf16eecd4..a243d9512501f 100644
--- a/jstests/query_golden/match_with_and_or.js
+++ b/jstests/query_golden/match_with_and_or.js
@@ -1,10 +1,8 @@
/**
* Test $match with $and/$or is supported and returns correct results.
*/
-
-(function() {
-"use strict";
-load("jstests/query_golden/libs/utils.js");
+import {show} from "jstests/libs/golden_test.js";
+import {resetCollection} from "jstests/query_golden/libs/utils.js";
const coll = db.and_or_coll;
@@ -105,4 +103,3 @@ for (const op of operators) {
show(coll.aggregate(pipeline));
}
}
-}());
diff --git a/jstests/query_golden/match_with_exists.js b/jstests/query_golden/match_with_exists.js
index c2f64a1532eb9..f1c34bb2be4d7 100644
--- a/jstests/query_golden/match_with_exists.js
+++ b/jstests/query_golden/match_with_exists.js
@@ -1,9 +1,7 @@
/**
* Test $match with $exists is supported and returns correct results.
*/
-
-(function() {
-"use strict";
+import {show} from "jstests/libs/golden_test.js";
const coll = db.cqf_golden_match_with_exists;
@@ -66,4 +64,3 @@ runWithData(
{_id: 2, a: [2]},
],
[{'a': {$elemMatch: {$exists: true}}}, {'a': {$elemMatch: {$exists: false}}}]);
-})();
diff --git a/jstests/query_golden/match_with_in.js b/jstests/query_golden/match_with_in.js
index 849468aec2178..84c2346e6c6dd 100644
--- a/jstests/query_golden/match_with_in.js
+++ b/jstests/query_golden/match_with_in.js
@@ -1,9 +1,7 @@
/**
* Test $match with $in is supported and returns correct results.
*/
-
-(function() {
-"use strict";
+import {show} from "jstests/libs/golden_test.js";
const coll = db.cqf_golden_match_with_in;
coll.drop();
@@ -77,4 +75,3 @@ const testFilters = [
for (const filter of testFilters) {
runTest(filter);
}
-}());
diff --git a/jstests/query_golden/multiple_traverse_single_scan.js b/jstests/query_golden/multiple_traverse_single_scan.js
index 2eaff689c3bc4..a0f23bde3619c 100644
--- a/jstests/query_golden/multiple_traverse_single_scan.js
+++ b/jstests/query_golden/multiple_traverse_single_scan.js
@@ -5,8 +5,7 @@
*
* Reproduces SERVER-71524.
*/
-(function() {
-"use strict";
+import {show} from "jstests/libs/golden_test.js";
const coll = db.query_golden_multiple_traverse_single_scan;
coll.drop();
@@ -21,4 +20,3 @@ assert.commandWorked(coll.createIndex({a: 1}));
// An incorrect plan would force each index entry to match both predicates,
// returning an empty result-set.
show(coll.find({'a.x': 1, 'a.y': 1}, {_id: 0}));
-})();
diff --git a/jstests/query_golden/non_multikey_paths.js b/jstests/query_golden/non_multikey_paths.js
index 6a062aad53189..988bac0a216a4 100644
--- a/jstests/query_golden/non_multikey_paths.js
+++ b/jstests/query_golden/non_multikey_paths.js
@@ -7,10 +7,7 @@
* requires_cqf,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For leftmostLeafStage
+import {leftmostLeafStage, prettyInterval} from "jstests/libs/optimizer_utils.js";
db.setLogLevel(4, "query");
@@ -61,5 +58,4 @@ run([{$match: {'one.one.one.one': 2}}]);
run([{$match: {'one.one.one.many': 2}}]);
run([{$match: {'many.one.one.one': 2}}]);
run([{$match: {'many.one.one.many': 2}}]);
-run([{$match: {'many.many.many.many': 2}}]);
-})();
\ No newline at end of file
+run([{$match: {'many.many.many.many': 2}}]);
\ No newline at end of file
diff --git a/jstests/query_golden/not_pushdown.js b/jstests/query_golden/not_pushdown.js
index 9b43725d83f6f..b626356892ca3 100644
--- a/jstests/query_golden/not_pushdown.js
+++ b/jstests/query_golden/not_pushdown.js
@@ -7,10 +7,7 @@
* requires_cqf,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For leftmostLeafStage
+import {findSubtrees} from "jstests/libs/optimizer_utils.js";
const coll = db.cqf_not_pushdown;
coll.drop();
@@ -65,5 +62,4 @@ run('Should stay as Not Traverse Eq', [{$match: {'many.many.many.many': {$ne: 7}
// We have an $elemMatch (multikey), but no Traverse underneath the Not.
run('Should be optimized to Neq', [{$match: {'many': {$elemMatch: {'one.one.one': {$ne: 7}}}}}]);
-run('Should be optimized to Neq', [{$match: {'many.one': {$elemMatch: {'one.one': {$ne: 7}}}}}]);
-})();
\ No newline at end of file
+run('Should be optimized to Neq', [{$match: {'many.one': {$elemMatch: {'one.one': {$ne: 7}}}}}]);
\ No newline at end of file
diff --git a/jstests/query_golden/null_missing.js b/jstests/query_golden/null_missing.js
index 80d41924db6dd..01bad7370edcb 100644
--- a/jstests/query_golden/null_missing.js
+++ b/jstests/query_golden/null_missing.js
@@ -7,10 +7,7 @@
* requires_cqf,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/optimizer_utils.js"); // For getPlanSkeleton.
+import {getPlanSkeleton} from "jstests/libs/optimizer_utils.js";
db.setLogLevel(4, "query");
@@ -42,5 +39,4 @@ const pipeline = [{$match: {'a.b': null}}];
print(`nReturned: ${explain.executionStats.nReturned}\n`);
print(`Plan skeleton: `);
printjson(getPlanSkeleton(explain));
-}
-})();
+}
\ No newline at end of file
diff --git a/jstests/replsets/all_commands_downgrading_to_upgraded.js b/jstests/replsets/all_commands_downgrading_to_upgraded.js
index 9460d090927fb..dcf9db222036a 100644
--- a/jstests/replsets/all_commands_downgrading_to_upgraded.js
+++ b/jstests/replsets/all_commands_downgrading_to_upgraded.js
@@ -10,13 +10,10 @@
* ]
*/
-(function() {
-"use strict";
-
// This will verify the completeness of our map and run all tests.
load("jstests/libs/all_commands_test.js");
-load("jstests/libs/fixture_helpers.js"); // For isSharded and isReplSet
-load("jstests/libs/feature_flag_util.js"); // For isPresentAndEnabled
+load("jstests/libs/fixture_helpers.js"); // For isSharded and isReplSet
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
load('jstests/replsets/rslib.js');
const name = jsTestName();
@@ -29,6 +26,7 @@ const isAnInternalCommand = "internal command";
const isDeprecated = "deprecated command";
const commandIsDisabledOnLastLTS = "skip command on downgrading fcv";
const requiresParallelShell = "requires parallel shell";
+const cannotRunWhileDowngrading = "cannot run command while downgrading";
const allCommands = {
_addShard: {skip: isAnInternalCommand},
@@ -65,15 +63,16 @@ const allCommands = {
_configsvrRemoveShardFromZone: {skip: isAnInternalCommand},
_configsvrRemoveTags: {skip: isAnInternalCommand},
_configsvrRepairShardedCollectionChunksHistory: {skip: isAnInternalCommand},
- _configsvrRenameCollectionMetadata: {skip: isAnInternalCommand},
+ _configsvrResetPlacementHistory: {skip: isAnInternalCommand},
_configsvrReshardCollection: {skip: isAnInternalCommand},
_configsvrRunRestore: {skip: isAnInternalCommand},
_configsvrSetAllowMigrations: {skip: isAnInternalCommand},
_configsvrSetClusterParameter: {skip: isAnInternalCommand},
_configsvrSetUserWriteBlockMode: {skip: isAnInternalCommand},
- _configsvrTransitionToCatalogShard: {skip: isAnInternalCommand},
+ _configsvrTransitionFromDedicatedConfigServer: {skip: isAnInternalCommand},
_configsvrTransitionToDedicatedConfigServer: {skip: isAnInternalCommand},
_configsvrUpdateZoneKeyRange: {skip: isAnInternalCommand},
+ _dropConnectionsToMongot: {skip: isAnInternalCommand},
_flushDatabaseCacheUpdates: {skip: isAnInternalCommand},
_flushDatabaseCacheUpdatesWithWriteConcern: {skip: isAnInternalCommand},
_flushReshardingStateChange: {skip: isAnInternalCommand},
@@ -87,6 +86,7 @@ const allCommands = {
_killOperations: {skip: isAnInternalCommand},
_mergeAuthzCollections: {skip: isAnInternalCommand},
_migrateClone: {skip: isAnInternalCommand},
+ _mongotConnPoolStats: {skip: isAnInternalCommand},
_movePrimaryRecipientAbortMigration: {skip: isAnInternalCommand},
_movePrimaryRecipientForgetMigration: {skip: isAnInternalCommand},
_movePrimaryRecipientSyncData: {skip: isAnInternalCommand},
@@ -97,6 +97,7 @@ const allCommands = {
_recvChunkStatus: {skip: isAnInternalCommand},
_refreshQueryAnalyzerConfiguration: {skip: isAnInternalCommand},
_shardsvrAbortReshardCollection: {skip: isAnInternalCommand},
+ _shardsvrCleanupStructuredEncryptionData: {skip: isAnInternalCommand},
_shardsvrCleanupReshardCollection: {skip: isAnInternalCommand},
_shardsvrCloneCatalogData: {skip: isAnInternalCommand},
_shardsvrCompactStructuredEncryptionData: {skip: isAnInternalCommand},
@@ -106,8 +107,6 @@ const allCommands = {
_shardsvrDropCollection: {skip: isAnInternalCommand},
_shardsvrCreateCollection: {skip: isAnInternalCommand},
_shardsvrCreateGlobalIndex: {skip: isAnInternalCommand},
- // TODO SERVER-74324: deprecate _shardsvrDropCollectionIfUUIDNotMatching after 7.0 is lastLTS.
- _shardsvrDropCollectionIfUUIDNotMatching: {skip: isAnInternalCommand},
_shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern: {skip: isAnInternalCommand},
_shardsvrDropCollectionParticipant: {skip: isAnInternalCommand},
_shardsvrDropGlobalIndex: {skip: isAnInternalCommand},
@@ -144,7 +143,14 @@ const allCommands = {
_shardsvrParticipantBlock: {skip: isAnInternalCommand},
_shardsvrCheckMetadataConsistency: {skip: isAnInternalCommand},
_shardsvrCheckMetadataConsistencyParticipant: {skip: isAnInternalCommand},
- _startStreamProcessor: {skip: isAnInternalCommand},
+ streams_startStreamProcessor: {skip: isAnInternalCommand},
+ streams_startStreamSample: {skip: isAnInternalCommand},
+ streams_stopStreamProcessor: {skip: isAnInternalCommand},
+ streams_listStreamProcessors: {skip: isAnInternalCommand},
+ streams_getMoreStreamSample: {skip: isAnInternalCommand},
+ streams_getStats: {skip: isAnInternalCommand},
+ streams_testOnlyInsert: {skip: isAnInternalCommand},
+ streams_getMetrics: {skip: isAnInternalCommand},
_transferMods: {skip: isAnInternalCommand},
_vectorClockPersist: {skip: isAnInternalCommand},
abortReshardCollection: {
@@ -219,11 +225,7 @@ const allCommands = {
assert.commandWorked(conn.getDB(dbName).runCommand({create: collName}));
},
command: {analyze: collName},
- expectFailure: true,
- expectedErrorCode: [
- 6660400,
- 6765500
- ], // Analyze command requires common query framework feature flag to be enabled.
+ checkFeatureFlag: "CommonQueryFramework",
teardown: function(conn) {
assert.commandWorked(conn.getDB(dbName).runCommand({drop: collName}));
},
@@ -231,8 +233,6 @@ const allCommands = {
analyzeShardKey: {
// TODO SERVER-74867: Remove the skip once 7.0 is lastLTS.
skip: commandIsDisabledOnLastLTS,
- // TODO SERVER-67966: Remove check when this feature flag is removed.
- checkFeatureFlag: "AnalyzeShardKey",
setUp: function(conn) {
assert.commandWorked(conn.getDB(dbName).runCommand({create: collName}));
assert.commandWorked(
@@ -365,6 +365,7 @@ const allCommands = {
// operation.
skip: "requires additional setup through a failed resharding operation",
},
+ cleanupStructuredEncryptionData: {skip: "requires additional encrypted collection setup"},
clearJumboFlag: {
isShardedOnly: true,
fullScenario: function(conn, fixture) {
@@ -412,6 +413,7 @@ const allCommands = {
},
clusterAbortTransaction: {skip: "already tested by 'abortTransaction' tests on mongos"},
clusterAggregate: {skip: "already tested by 'aggregate' tests on mongos"},
+ clusterBulkWrite: {skip: "already tested by 'bulkWrite' tests on mongos"},
clusterCommitTransaction: {skip: "already tested by 'commitTransaction' tests on mongos"},
clusterCount: {skip: "already tested by 'count' tests on mongos"},
clusterDelete: {skip: "already tested by 'delete' tests on mongos"},
@@ -497,15 +499,13 @@ const allCommands = {
configureQueryAnalyzer: {
// TODO SERVER-74867: Remove the skip once 7.0 is lastLTS.
skip: commandIsDisabledOnLastLTS,
- // TODO SERVER-67966: Remove check when this feature flag is removed.
- checkFeatureFlag: "AnalyzeShardKey",
setUp: function(conn) {
assert.commandWorked(conn.getDB(dbName).runCommand({create: collName}));
for (let i = 0; i < 10; i++) {
assert.commandWorked(conn.getCollection(fullNs).insert({a: i}));
}
},
- command: {configureQueryAnalyzer: fullNs, mode: "full", sampleRate: 1},
+ command: {configureQueryAnalyzer: fullNs, mode: "full", samplesPerSecond: 1},
teardown: function(conn) {
assert.commandWorked(conn.getDB(dbName).runCommand({drop: collName}));
},
@@ -1091,13 +1091,7 @@ const allCommands = {
},
},
movePrimary: {
- isShardedOnly: true,
- fullScenario: function(conn, fixture) {
- assert.commandWorked(conn.getDB(dbName).runCommand({create: collName}));
- assert.commandWorked(conn.getDB('admin').runCommand(
- {movePrimary: dbName, to: fixture.shard0.shardName}));
- assert.commandWorked(conn.getDB(dbName).runCommand({drop: collName}));
- }
+ skip: cannotRunWhileDowngrading,
},
moveRange: {
isShardedOnly: true,
@@ -1315,6 +1309,11 @@ const allCommands = {
isAdminCommand: true,
command: {replSetResizeOplog: 1, minRetentionHours: 1},
},
+ resetPlacementHistory: {
+ command: {resetPlacementHistory: 1},
+ isShardedOnly: true,
+ isAdminCommand: true,
+ },
reshardCollection: {
// TODO SERVER-74867: Remove the skip once 7.0 is lastLTS.
skip: commandIsDisabledOnLastLTS,
@@ -1541,20 +1540,16 @@ const allCommands = {
isAdminCommand: true,
doesNotRunOnMongos: true,
},
- transitionToCatalogShard: {
+ transitionFromDedicatedConfigServer: {
// TODO SERVER-74867: Remove the skip once 7.0 is lastLTS.
skip: commandIsDisabledOnLastLTS,
- // TODO SERVER-66060: Remove check when this feature flag is removed.
- checkFeatureFlag: "CatalogShard",
- command: {transitionToCatalogShard: 1},
+ command: {transitionFromDedicatedConfigServer: 1},
isShardedOnly: true,
isAdminCommand: true,
},
transitionToDedicatedConfigServer: {
// TODO SERVER-74867: Remove the skip once 7.0 is lastLTS.
skip: commandIsDisabledOnLastLTS,
- // TODO SERVER-66060: Remove check when this feature flag is removed.
- checkFeatureFlag: "CatalogShard",
command: {transitionToDedicatedConfigServer: 1},
isShardedOnly: true,
isAdminCommand: true,
@@ -1767,11 +1762,6 @@ let runAllCommands = function(command, test, conn, fixture) {
};
let runTest = function(conn, adminDB, fixture) {
- let runDowngradingToUpgrading = false;
- if (FeatureFlagUtil.isEnabled(adminDB, "DowngradingToUpgrading")) {
- runDowngradingToUpgrading = true;
- }
-
assert.commandFailed(conn.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
jsTestLog("Running all commands in the downgradingToLastLTS FCV");
@@ -1798,30 +1788,28 @@ let runTest = function(conn, adminDB, fixture) {
runAllCommands(command, test, conn, fixture);
}
- if (runDowngradingToUpgrading) {
- assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
-
- jsTestLog("Running all commands after upgrading back to the latest FCV");
- commandsList = AllCommandsTest.checkCommandCoverage(conn, allCommands);
- if (isMongos(adminDB)) {
- let shardCommandsList =
- AllCommandsTest.checkCommandCoverage(fixture.shard0.rs.getPrimary(), allCommands);
- commandsList = new Set(commandsList.concat(shardCommandsList));
- }
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- for (const command of commandsList) {
- const test = allCommands[command];
+ jsTestLog("Running all commands after upgrading back to the latest FCV");
+ commandsList = AllCommandsTest.checkCommandCoverage(conn, allCommands);
+ if (isMongos(adminDB)) {
+ let shardCommandsList =
+ AllCommandsTest.checkCommandCoverage(fixture.shard0.rs.getPrimary(), allCommands);
+ commandsList = new Set(commandsList.concat(shardCommandsList));
+ }
- // Coverage already guaranteed above, but check again just in case.
- assert(test, "Coverage failure: must explicitly define a test for " + command);
+ for (const command of commandsList) {
+ const test = allCommands[command];
- if (test.skip !== undefined) {
- jsTestLog("Skipping " + command + ": " + test.skip);
- continue;
- }
+ // Coverage already guaranteed above, but check again just in case.
+ assert(test, "Coverage failure: must explicitly define a test for " + command);
- runAllCommands(command, test, conn, fixture);
+ if (test.skip !== undefined) {
+ jsTestLog("Skipping " + command + ": " + test.skip);
+ continue;
}
+
+ runAllCommands(command, test, conn, fixture);
}
};
@@ -1868,5 +1856,4 @@ let runShardedClusterTest = function() {
runStandaloneTest();
runReplicaSetTest();
-runShardedClusterTest();
-})();
+runShardedClusterTest();
\ No newline at end of file
diff --git a/jstests/replsets/apply_batches_totalMillis.js b/jstests/replsets/apply_batches_totalMillis.js
index 7e59f76fe5d7a..648948647dc47 100644
--- a/jstests/replsets/apply_batches_totalMillis.js
+++ b/jstests/replsets/apply_batches_totalMillis.js
@@ -60,4 +60,4 @@ jsTestLog(`Time recorded after larger batch: ${timeAfterLarge}ms`);
assert.gte(timeAfterLarge, timeAfterSmall);
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/apply_transaction_with_yield.js b/jstests/replsets/apply_transaction_with_yield.js
index b1705a3e3b9a3..5b9940cde13b4 100644
--- a/jstests/replsets/apply_transaction_with_yield.js
+++ b/jstests/replsets/apply_transaction_with_yield.js
@@ -41,4 +41,4 @@ session.commitTransaction();
replTest.awaitReplication();
replTest.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/arbiters_not_included_in_w3_wc.js b/jstests/replsets/arbiters_not_included_in_w3_wc.js
index aaf35cb450127..b4613094e8dd5 100644
--- a/jstests/replsets/arbiters_not_included_in_w3_wc.js
+++ b/jstests/replsets/arbiters_not_included_in_w3_wc.js
@@ -47,4 +47,4 @@ assert.commandFailedWithCode(testColl.insert({"b": 2}, {writeConcern: {w: 3, wti
ErrorCodes.WriteConcernFailed);
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js
index 8e45f6b403e5f..9f95d74d1fd12 100644
--- a/jstests/replsets/auth1.js
+++ b/jstests/replsets/auth1.js
@@ -57,7 +57,7 @@ print("make sure user is written before shutting down");
MongoRunner.stopMongod(m);
print("start up rs");
-var rs = new ReplSetTest({"name": name, "nodes": 3});
+const rs = new ReplSetTest({"name": name, "nodes": 3});
// The first node is started with the pre-populated data directory.
print("start 0 with keyFile");
diff --git a/jstests/replsets/auth_no_pri.js b/jstests/replsets/auth_no_pri.js
index 35629b3a383e7..99425014e0213 100644
--- a/jstests/replsets/auth_no_pri.js
+++ b/jstests/replsets/auth_no_pri.js
@@ -3,7 +3,7 @@
'use strict';
var NODE_COUNT = 3;
-var rs = new ReplSetTest({"nodes": NODE_COUNT, keyFile: "jstests/libs/key1"});
+const rs = new ReplSetTest({"nodes": NODE_COUNT, keyFile: "jstests/libs/key1"});
var nodes = rs.startSet();
rs.initiate();
diff --git a/jstests/replsets/awaitable_hello_errors_on_horizon_change.js b/jstests/replsets/awaitable_hello_errors_on_horizon_change.js
index 15b8ec66b0f58..e8923a2f04db7 100644
--- a/jstests/replsets/awaitable_hello_errors_on_horizon_change.js
+++ b/jstests/replsets/awaitable_hello_errors_on_horizon_change.js
@@ -133,4 +133,4 @@ runTest("hello");
runTest("isMaster");
runTest("ismaster");
replTest.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/background_index.js b/jstests/replsets/background_index.js
index a09138a9e194b..f56dbf3b8c66e 100644
--- a/jstests/replsets/background_index.js
+++ b/jstests/replsets/background_index.js
@@ -23,7 +23,7 @@ for (var i = 0; i < 100; i++) {
}
// Add a background index.
-coll.createIndex({x: 1}, {background: true});
+coll.createIndex({x: 1});
// Rename the collection.
assert.commandWorked(
diff --git a/jstests/replsets/bulk_write_command_wc.js b/jstests/replsets/bulk_write_command_wc.js
new file mode 100644
index 0000000000000..c305d326085e7
--- /dev/null
+++ b/jstests/replsets/bulk_write_command_wc.js
@@ -0,0 +1,194 @@
+/**
+ * Tests write-concern-related bulkWrite protocol functionality
+ *
+ * The test runs commands that are not allowed with security token: bulkWrite.
+ * @tags: [
+ * assumes_against_mongod_not_mongos,
+ * not_allowed_with_security_token,
+ * command_not_supported_in_serverless,
+ * # TODO SERVER-52419 Remove this tag.
+ * featureFlagBulkWriteCommand,
+ * ]
+ */
+(function() {
+
+// Skip this test when running with storage engines other than inMemory, as the test relies on
+// journaling not being active.
+if (jsTest.options().storageEngine !== "inMemory") {
+ jsTest.log("Skipping test because it is only applicable for the inMemory storage engine");
+ return;
+}
+
+var request;
+var result;
+
+// NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
+
+jsTest.log("Starting no journal/repl set tests...");
+
+// Start a single-node replica set with no journal
+// Allows testing immediate write concern failures and wc application failures
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+var mongod = rst.getPrimary();
+var coll = mongod.getCollection("test.bulk_write_command_wc");
+
+//
+// Basic bulkWrite, default WC
+coll.remove({});
+printjson(request = {
+ bulkWrite: 1,
+ ops: [{insert: 0, document: {a: 1}}],
+ nsInfo: [{ns: "test.bulk_write_command_wc"}]
+});
+printjson(result = mongod.adminCommand(request));
+assert(result.ok);
+assert.eq(1, result.cursor.firstBatch[0].n);
+assert.eq(1, coll.find().itcount());
+
+//
+// Basic bulkWrite, majority WC
+coll.remove({});
+printjson(request = {
+ bulkWrite: 1,
+ ops: [{insert: 0, document: {a: 1}}],
+ nsInfo: [{ns: "test.bulk_write_command_wc"}],
+ writeConcern: {w: 'majority'}
+});
+printjson(result = mongod.adminCommand(request));
+assert(result.ok);
+assert.eq(1, result.cursor.firstBatch[0].n);
+assert.eq(1, coll.find().itcount());
+
+//
+// Basic bulkWrite, w:2 WC
+coll.remove({});
+printjson(request = {
+ bulkWrite: 1,
+ ops: [{insert: 0, document: {a: 1}}],
+ nsInfo: [{ns: "test.bulk_write_command_wc"}],
+ writeConcern: {w: 2}
+});
+printjson(result = mongod.adminCommand(request));
+assert(result.ok);
+assert.eq(1, result.cursor.firstBatch[0].n);
+assert.eq(1, coll.find().itcount());
+
+//
+// Basic bulkWrite, immediate nojournal error
+coll.remove({});
+printjson(request = {
+ bulkWrite: 1,
+ ops: [{insert: 0, document: {a: 1}}],
+ nsInfo: [{ns: "test.bulk_write_command_wc"}],
+ writeConcern: {j: true}
+});
+printjson(result = mongod.adminCommand(request));
+assert(!result.ok);
+assert.eq(0, coll.find().itcount());
+
+//
+// Basic bulkWrite, timeout wc error
+coll.remove({});
+printjson(request = {
+ bulkWrite: 1,
+ ops: [{insert: 0, document: {a: 1}}],
+ nsInfo: [{ns: "test.bulk_write_command_wc"}],
+ writeConcern: {w: 3, wtimeout: 1}
+});
+printjson(result = mongod.adminCommand(request));
+assert(result.ok);
+assert.eq(1, result.cursor.firstBatch[0].n);
+assert(result.writeConcernError);
+assert.eq(100, result.writeConcernError.code);
+assert.eq(1, coll.find().itcount());
+
+//
+// Basic bulkWrite, wmode wc error
+coll.remove({});
+printjson(request = {
+ bulkWrite: 1,
+ ops: [{insert: 0, document: {a: 1}}],
+ nsInfo: [{ns: "test.bulk_write_command_wc"}],
+ writeConcern: {w: 'invalid'}
+});
+printjson(result = mongod.adminCommand(request));
+assert(result.ok);
+assert.eq(1, result.cursor.firstBatch[0].n);
+assert(result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+//
+// Two ordered inserts, write error and wc error both reported
+coll.remove({});
+printjson(request = {
+ bulkWrite: 1,
+ ops: [{insert: 0, document: {a: 1}}, {insert: 0, document: {_id: /a/}}],
+ nsInfo: [{ns: "test.bulk_write_command_wc"}],
+ writeConcern: {w: 'invalid'}
+});
+printjson(result = mongod.adminCommand(request));
+assert(result.ok);
+assert.eq(1, result.cursor.firstBatch[0].n);
+assert.eq(0, result.cursor.firstBatch[1].ok);
+assert.eq(1, result.cursor.firstBatch[1].idx);
+assert(result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+//
+// Two unordered inserts, write error and wc error reported
+coll.remove({});
+printjson(request = {
+ bulkWrite: 1,
+ ops: [{insert: 0, document: {a: 1}}, {insert: 0, document: {_id: /a/}}],
+ nsInfo: [{ns: "test.bulk_write_command_wc"}],
+ ordered: false,
+ writeConcern: {w: 'invalid'}
+});
+printjson(result = mongod.adminCommand(request));
+assert(result.ok);
+assert.eq(1, result.cursor.firstBatch[0].n);
+assert.eq(0, result.cursor.firstBatch[1].ok);
+assert.eq(1, result.cursor.firstBatch[1].idx);
+assert(result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+//
+// Write error with empty writeConcern object.
+coll.remove({});
+request = {
+ bulkWrite: 1,
+ ops: [{insert: 0, document: {_id: 1}}, {insert: 0, document: {_id: 1}}],
+ nsInfo: [{ns: "test.bulk_write_command_wc"}],
+ ordered: false,
+ writeConcern: {}
+};
+result = mongod.adminCommand(request);
+assert(result.ok);
+assert.eq(1, result.cursor.firstBatch[0].n);
+assert.eq(0, result.cursor.firstBatch[1].ok);
+assert.eq(1, result.cursor.firstBatch[1].idx);
+assert.eq(null, result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+//
+// Write error with unspecified w.
+coll.remove({});
+request = {
+ bulkWrite: 1,
+ ops: [{insert: 0, document: {_id: 1}}, {insert: 0, document: {_id: 1}}],
+ nsInfo: [{ns: "test.bulk_write_command_wc"}],
+ ordered: false,
+ writeConcern: {wtimeout: 1}
+};
+result = assert.commandWorkedIgnoringWriteErrors(mongod.adminCommand(request));
+assert.eq(1, result.cursor.firstBatch[0].n);
+assert.eq(0, result.cursor.firstBatch[1].ok);
+assert.eq(1, result.cursor.firstBatch[1].idx);
+assert.eq(null, result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+jsTest.log("DONE no journal/repl tests");
+rst.stopSet();
+})();
diff --git a/jstests/replsets/cluster_server_parameter_commands_replset.js b/jstests/replsets/cluster_server_parameter_commands_replset.js
index 79e1356f47c10..bf1d3bca12bcc 100644
--- a/jstests/replsets/cluster_server_parameter_commands_replset.js
+++ b/jstests/replsets/cluster_server_parameter_commands_replset.js
@@ -7,10 +7,11 @@
* multiversion_incompatible
* ]
*/
-(function() {
-'use strict';
-
-load('jstests/libs/cluster_server_parameter_utils.js');
+import {
+ setupReplicaSet,
+ testInvalidClusterParameterCommands,
+ testValidClusterParameterCommands,
+} from "jstests/libs/cluster_server_parameter_utils.js";
// Tests that set/getClusterParameter works on a non-sharded replica set.
const rst = new ReplSetTest({
@@ -29,5 +30,4 @@ testInvalidClusterParameterCommands(rst);
// majority of the nodes in the replica set.
testValidClusterParameterCommands(rst);
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/replsets/collection_clone_resume_after_network_error.js b/jstests/replsets/collection_clone_resume_after_network_error.js
index 0a800befc9083..ebac64c764799 100644
--- a/jstests/replsets/collection_clone_resume_after_network_error.js
+++ b/jstests/replsets/collection_clone_resume_after_network_error.js
@@ -26,7 +26,7 @@ function checkNoResumeAfter() {
// Verify the 'find' command received by the primary has resumeAfter set with the given recordId.
function checkHasResumeAfter(recordId) {
- checkLog.contains(primary, `"$_resumeAfter":{"$recordId":${recordId}}`);
+ checkLog.contains(primary, new RegExp(`"\\$_resumeAfter":\\{.*"\\$recordId":${recordId}.*\\}`));
}
const beforeRetryFailPointName = "hangBeforeRetryingClonerStage";
diff --git a/jstests/replsets/commands_that_write_accept_wc.js b/jstests/replsets/commands_that_write_accept_wc.js
index 8a8adffad5e35..75d06682da8a0 100644
--- a/jstests/replsets/commands_that_write_accept_wc.js
+++ b/jstests/replsets/commands_that_write_accept_wc.js
@@ -44,7 +44,7 @@ commands.push({
commands.push({
req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
setupFunc: function() {
- coll.insert({type: 'oak'});
+ assert.commandWorked(coll.insert({type: 'oak'}));
assert.eq(coll.getIndexes().length, 1);
},
confirmFunc: function() {
@@ -62,7 +62,7 @@ commands.push({
writeConcern: {w: 'majority'}
},
setupFunc: function() {
- coll.insert({type: 'oak'});
+ assert.commandWorked(coll.insert({type: 'oak'}));
assert.eq(coll.count({type: 'ginkgo'}), 0);
assert.eq(coll.count({type: 'oak'}), 1);
},
@@ -80,7 +80,7 @@ commands.push({
writeConcern: {w: 'majority'}
},
setupFunc: function() {
- coll.insert({type: 'oak'});
+ assert.commandWorked(coll.insert({type: 'oak'}));
assert.eq(coll.count({type: 'ginkgo'}), 0);
assert.eq(coll.count({type: 'oak'}), 1);
},
@@ -98,7 +98,7 @@ commands.push({
writeConcern: {w: 'majority'}
},
setupFunc: function() {
- coll.insert({type: 'oak'});
+ assert.commandWorked(coll.insert({type: 'oak'}));
assert.eq(coll.count({type: 'ginkgo'}), 0);
assert.eq(coll.count({type: 'oak'}), 1);
},
@@ -111,7 +111,7 @@ commands.push({
commands.push({
req: {applyOps: [{op: "u", ns: coll.getFullName(), o: {_id: 1, type: "willow"}, o2: {_id: 1}}]},
setupFunc: function() {
- coll.insert({_id: 1, type: 'oak'});
+ assert.commandWorked(coll.insert({_id: 1, type: 'oak'}));
assert.eq(coll.count({type: 'willow'}), 0);
},
confirmFunc: function() {
@@ -141,15 +141,24 @@ commands.push({
});
},
reduce: function(key, values) {
- return {count: values.length};
+ // We may be re-reducing values that have already been partially reduced. In that case,
+ // we expect to see an object like {count: } in the array of input values.
+ const numValues = values.reduce(function(acc, currentValue) {
+ if (typeof currentValue === "object") {
+ return acc + currentValue.count;
+ } else {
+ return acc + 1;
+ }
+ }, 0);
+ return {count: numValues};
},
out: "foo"
},
setupFunc: function() {
- coll.insert({x: 1, tags: ["a", "b"]});
- coll.insert({x: 2, tags: ["b", "c"]});
- coll.insert({x: 3, tags: ["c", "a"]});
- coll.insert({x: 4, tags: ["b", "c"]});
+ assert.commandWorked(coll.insert({x: 1, tags: ["a", "b"]}));
+ assert.commandWorked(coll.insert({x: 2, tags: ["b", "c"]}));
+ assert.commandWorked(coll.insert({x: 3, tags: ["c", "a"]}));
+ assert.commandWorked(coll.insert({x: 4, tags: ["b", "c"]}));
},
confirmFunc: function() {
assert.eq(db.foo.findOne({_id: 'a'}).value.count, 2);
diff --git a/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js b/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js
index 3502865b351fa..a841d83eb3a72 100644
--- a/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js
+++ b/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js
@@ -103,4 +103,4 @@ let res = secondary.getDB(dbName).getCollection(collName).find();
assert.eq(res.toArray(), [{_id: 1, a: 0}, {_id: 2}, {_id: 3, a: 1}], res);
replTest.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/config_txns_reaping_interrupt.js b/jstests/replsets/config_txns_reaping_interrupt.js
new file mode 100644
index 0000000000000..c4da081caf586
--- /dev/null
+++ b/jstests/replsets/config_txns_reaping_interrupt.js
@@ -0,0 +1,327 @@
+/*
+ * Tests that deleting a config.transactions document interrupts all transaction sessions
+ * it is associated with.
+ *
+ * @tags: [uses_transactions]
+ */
+(function() {
+"use strict";
+
+// This test implicitly write the confif.transactions collection, which is not allowed under a
+// session.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/fail_point_util.js");
+load("jstests/libs/parallelTester.js");
+load("jstests/libs/uuid_util.js");
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+
+const dbName = "testDb";
+const collName = "testColl";
+const ns = dbName + "." + collName;
+const sessionColl = primary.getCollection("config.transactions");
+
+function runInsert(host,
+ lsidUUIDString,
+ lsidTxnNumber,
+ lsidTxnUUIDString,
+ txnNumber,
+ dbName,
+ collName,
+ isRetryableWrite) {
+ const conn = new Mongo(host);
+ const lsid = {id: UUID(lsidUUIDString)};
+ if (lsidTxnNumber) {
+ lsid.txnNumber = NumberLong(lsidTxnNumber);
+ }
+ if (lsidTxnUUIDString) {
+ lsid.txnUUID = UUID(lsidTxnUUIDString);
+ }
+ const cmdObj = {
+ insert: collName,
+ documents: [{x: 2}],
+ lsid,
+ txnNumber: NumberLong(txnNumber),
+
+ };
+ if (isRetryableWrite || lsid.txnNumber) {
+ cmdObj.stmtId = NumberInt(2);
+ }
+ if (!isRetryableWrite) {
+ cmdObj.autocommit = false;
+ }
+ return conn.getDB(dbName).runCommand(cmdObj);
+}
+
+function runTest({committedTxnOpts, inProgressTxnOpts, expectInterrupt}) {
+ jsTest.log("Testing " + tojson({committedTxnOpts, inProgressTxnOpts, expectInterrupt}));
+ // Start and commit a transaction.
+ const cmdObj0 = {
+ insert: collName,
+ documents: [{x: 0}],
+ lsid: committedTxnOpts.lsid,
+ txnNumber: NumberLong(committedTxnOpts.txnNumber),
+ startTransaction: true,
+ autocommit: false,
+ };
+ if (committedTxnOpts.lsid.txnNumber) {
+ cmdObj0.stmtId = NumberInt(0);
+ }
+ assert.commandWorked(primary.getDB(dbName).runCommand(cmdObj0));
+ assert.commandWorked(primary.adminCommand({
+ commitTransaction: 1,
+ lsid: committedTxnOpts.lsid,
+ txnNumber: NumberLong(committedTxnOpts.txnNumber),
+ autocommit: false
+ }));
+
+ // Start another transaction. Pause it after it has checked out the session.
+ const cmdObj1 = {
+ insert: collName,
+ documents: [{x: 1}],
+ lsid: inProgressTxnOpts.lsid,
+ txnNumber: NumberLong(inProgressTxnOpts.txnNumber),
+ };
+ if (inProgressTxnOpts.lsid.txnNumber || inProgressTxnOpts.isRetryableWrite) {
+ cmdObj1.stmtId = NumberInt(1);
+ }
+ if (!inProgressTxnOpts.isRetryableWrite) {
+ cmdObj1.startTransaction = true;
+ cmdObj1.autocommit = false;
+ }
+ assert.commandWorked(primary.getDB(dbName).runCommand(cmdObj1));
+ const inProgressTxnThread = new Thread(
+ runInsert,
+ primary.host,
+ extractUUIDFromObject(inProgressTxnOpts.lsid.id),
+ inProgressTxnOpts.lsid.txnNumber ? inProgressTxnOpts.lsid.txnNumber.toNumber() : null,
+ inProgressTxnOpts.lsid.txnUUID ? extractUUIDFromObject(inProgressTxnOpts.lsid.txnUUID)
+ : null,
+ inProgressTxnOpts.txnNumber,
+ dbName,
+ collName,
+ inProgressTxnOpts.isRetryableWrite);
+ let fp = configureFailPoint(primary, "hangDuringBatchInsert", {shouldCheckForInterrupt: true});
+ inProgressTxnThread.start();
+
+ fp.wait();
+ // Delete the config.transactions document for the committed transaction.
+ assert.commandWorked(sessionColl.remove(makeLsidFilter(committedTxnOpts.lsid, "_id")));
+
+ fp.off();
+ const insertRes = inProgressTxnThread.returnData();
+ if (expectInterrupt) {
+ assert.commandFailedWithCode(insertRes, ErrorCodes.Interrupted);
+ } else {
+ assert.commandWorked(insertRes);
+ if (!inProgressTxnOpts.isRetryableWrite) {
+ assert.commandWorked(primary.adminCommand({
+ commitTransaction: 1,
+ lsid: inProgressTxnOpts.lsid,
+ txnNumber: NumberLong(inProgressTxnOpts.txnNumber),
+ autocommit: false
+ }));
+ }
+ }
+}
+
+jsTest.log("Test deleting config.transactions document for an external/client session");
+
+{
+ const parentLsid = {id: UUID()};
+ const parentTxnNumber = 1234;
+ runTest({
+ committedTxnOpts: {lsid: parentLsid, txnNumber: parentTxnNumber},
+ inProgressTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ txnUUID: UUID(),
+ },
+ txnNumber: 1,
+ },
+ expectInterrupt: true
+ });
+}
+
+{
+ const parentLsid = {id: UUID()};
+ const parentTxnNumber = 1234;
+ runTest({
+ committedTxnOpts: {lsid: parentLsid, txnNumber: parentTxnNumber - 1},
+ inProgressTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ txnNumber: NumberLong(parentTxnNumber),
+ txnUUID: UUID(),
+ },
+ txnNumber: 1,
+ },
+ expectInterrupt: true
+ });
+}
+
+jsTest.log("Test deleting config.transactions document for an internal session for a " +
+ "non-retryable write");
+
+{
+ const parentLsid = {id: UUID()};
+ const parentTxnNumber = 1234;
+ runTest({
+ committedTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ txnUUID: UUID(),
+ },
+ txnNumber: 1,
+ },
+ inProgressTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ },
+ txnNumber: parentTxnNumber,
+ },
+ expectInterrupt: true
+ });
+}
+
+{
+ const parentLsid = {id: UUID()};
+ runTest({
+ committedTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ txnUUID: UUID(),
+ },
+ txnNumber: 1,
+ },
+ inProgressTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ txnUUID: UUID(),
+ },
+ txnNumber: 1,
+ },
+ expectInterrupt: true
+ });
+}
+
+jsTest.log("Test deleting config.transactions document for an internal session for the current " +
+ "retryable write");
+
+{
+ const parentLsid = {id: UUID()};
+ const parentTxnNumber = 1234;
+ runTest({
+ committedTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ txnNumber: NumberLong(parentTxnNumber),
+ txnUUID: UUID(),
+ },
+ txnNumber: 1,
+ },
+ inProgressTxnOpts: {lsid: parentLsid, txnNumber: parentTxnNumber, isRetryableWrite: true},
+ expectInterrupt: true
+ });
+}
+
+{
+ const parentLsid = {id: UUID()};
+ const parentTxnNumber = 1234;
+ runTest({
+ committedTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ txnNumber: NumberLong(parentTxnNumber),
+ txnUUID: UUID(),
+ },
+ txnNumber: 1,
+ },
+ inProgressTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ txnNumber: NumberLong(parentTxnNumber),
+ txnUUID: UUID(),
+ },
+ txnNumber: 1,
+ },
+ expectInterrupt: true
+ });
+}
+
+jsTest.log("Test deleting config.transactions document for an internal transaction for the " +
+ "previous retryable write (i.e. no interrupt is expected)");
+
+{
+ const parentLsid = {id: UUID()};
+ const parentTxnNumber = 1234;
+ runTest({
+ committedTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ txnNumber: NumberLong(parentTxnNumber - 1),
+ txnUUID: UUID(),
+ },
+ txnNumber: 1,
+ },
+ inProgressTxnOpts: {
+ lsid: parentLsid,
+ txnNumber: parentTxnNumber,
+ },
+ expectInterrupt: false
+ });
+}
+
+{
+ const parentLsid = {id: UUID()};
+ const parentTxnNumber = 1234;
+ runTest({
+ committedTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ txnNumber: NumberLong(parentTxnNumber - 1),
+ txnUUID: UUID(),
+ },
+ txnNumber: 1,
+ },
+ inProgressTxnOpts: {
+ lsid: parentLsid,
+ txnNumber: parentTxnNumber,
+ isRetryableWrite: true,
+ },
+ expectInterrupt: false
+ });
+}
+
+{
+ const parentLsid = {id: UUID()};
+ const parentTxnNumber = 1234;
+ runTest({
+ committedTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ txnNumber: NumberLong(parentTxnNumber - 1),
+ txnUUID: UUID(),
+ },
+ txnNumber: 1,
+ },
+ inProgressTxnOpts: {
+ lsid: {
+ id: parentLsid.id,
+ txnNumber: NumberLong(parentTxnNumber),
+ txnUUID: UUID(),
+ },
+ txnNumber: 1,
+ },
+ expectInterrupt: false
+ });
+}
+
+rst.stopSet();
+})();
diff --git a/jstests/replsets/crud_ops_do_not_throw_locktimeout_on_ticket_exhaustion.js b/jstests/replsets/crud_ops_do_not_throw_locktimeout_on_ticket_exhaustion.js
index ea696d2b2b936..1e6dceeb71ee3 100644
--- a/jstests/replsets/crud_ops_do_not_throw_locktimeout_on_ticket_exhaustion.js
+++ b/jstests/replsets/crud_ops_do_not_throw_locktimeout_on_ticket_exhaustion.js
@@ -24,7 +24,7 @@ const rst = new ReplSetTest({
nodeOptions: {
setParameter: {
// This test requires a fixed ticket pool size.
- storageEngineConcurrencyAdjustmentAlgorithm: "",
+ storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions",
wiredTigerConcurrentWriteTransactions: kNumWriteTickets,
wiredTigerConcurrentReadTransactions: kNumReadTickets,
logComponentVerbosity: tojson({storage: 1, command: 2})
diff --git a/jstests/replsets/db_reads_while_recovering_all_commands.js b/jstests/replsets/db_reads_while_recovering_all_commands.js
index b055ad8c83493..098778c1d9a15 100644
--- a/jstests/replsets/db_reads_while_recovering_all_commands.js
+++ b/jstests/replsets/db_reads_while_recovering_all_commands.js
@@ -34,8 +34,8 @@ const allCommands = {
_configsvrBalancerStart: {skip: isPrimaryOnly},
_configsvrBalancerStatus: {skip: isPrimaryOnly},
_configsvrBalancerStop: {skip: isPrimaryOnly},
- _configsvrCheckClusterMetadataConsistency: {skip: isPrimaryOnly},
- _configsvrCheckMetadataConsistency: {skip: isPrimaryOnly},
+ _configsvrCheckClusterMetadataConsistency: {skip: isAnInternalCommand},
+ _configsvrCheckMetadataConsistency: {skip: isAnInternalCommand},
_configsvrCleanupReshardCollection: {skip: isPrimaryOnly},
_configsvrCollMod: {skip: isAnInternalCommand},
_configsvrClearJumboFlag: {skip: isPrimaryOnly},
@@ -58,15 +58,16 @@ const allCommands = {
_configsvrRemoveShardFromZone: {skip: isPrimaryOnly},
_configsvrRemoveTags: {skip: isPrimaryOnly},
_configsvrRepairShardedCollectionChunksHistory: {skip: isPrimaryOnly},
- _configsvrRenameCollectionMetadata: {skip: isPrimaryOnly},
+ _configsvrResetPlacementHistory: {skip: isPrimaryOnly},
_configsvrReshardCollection: {skip: isPrimaryOnly},
_configsvrRunRestore: {skip: isPrimaryOnly},
_configsvrSetAllowMigrations: {skip: isPrimaryOnly},
_configsvrSetClusterParameter: {skip: isPrimaryOnly},
_configsvrSetUserWriteBlockMode: {skip: isPrimaryOnly},
- _configsvrTransitionToCatalogShard: {skip: isPrimaryOnly},
+ _configsvrTransitionFromDedicatedConfigServer: {skip: isPrimaryOnly},
_configsvrTransitionToDedicatedConfigServer: {skip: isPrimaryOnly},
_configsvrUpdateZoneKeyRange: {skip: isPrimaryOnly},
+ _dropConnectionsToMongot: {skip: isAnInternalCommand},
_flushDatabaseCacheUpdates: {skip: isPrimaryOnly},
_flushDatabaseCacheUpdatesWithWriteConcern: {skip: isPrimaryOnly},
_flushReshardingStateChange: {skip: isPrimaryOnly},
@@ -80,6 +81,7 @@ const allCommands = {
_killOperations: {skip: isNotAUserDataRead},
_mergeAuthzCollections: {skip: isPrimaryOnly},
_migrateClone: {skip: isPrimaryOnly},
+ _mongotConnPoolStats: {skip: isAnInternalCommand},
_movePrimaryRecipientAbortMigration: {skip: isAnInternalCommand},
_movePrimaryRecipientForgetMigration: {skip: isAnInternalCommand},
_movePrimaryRecipientSyncData: {skip: isAnInternalCommand},
@@ -90,6 +92,7 @@ const allCommands = {
_recvChunkStatus: {skip: isPrimaryOnly},
_refreshQueryAnalyzerConfiguration: {skip: isPrimaryOnly},
_shardsvrAbortReshardCollection: {skip: isPrimaryOnly},
+ _shardsvrCleanupStructuredEncryptionData: {skip: isPrimaryOnly},
_shardsvrCleanupReshardCollection: {skip: isPrimaryOnly},
_shardsvrCloneCatalogData: {skip: isPrimaryOnly},
_shardsvrCompactStructuredEncryptionData: {skip: isPrimaryOnly},
@@ -100,8 +103,6 @@ const allCommands = {
_shardsvrDropGlobalIndex: {skip: isAnInternalCommand},
_shardsvrDropCollection: {skip: isPrimaryOnly},
_shardsvrCreateCollection: {skip: isPrimaryOnly},
- // TODO SERVER-74324: deprecate _shardsvrDropCollectionIfUUIDNotMatching after 7.0 is lastLTS.
- _shardsvrDropCollectionIfUUIDNotMatching: {skip: isNotAUserDataRead},
_shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern: {skip: isNotAUserDataRead},
_shardsvrDropCollectionParticipant: {skip: isPrimaryOnly},
_shardsvrDropIndexCatalogEntryParticipant: {skip: isPrimaryOnly},
@@ -137,7 +138,14 @@ const allCommands = {
_shardsvrParticipantBlock: {skip: isAnInternalCommand},
_shardsvrCheckMetadataConsistency: {skip: isAnInternalCommand},
_shardsvrCheckMetadataConsistencyParticipant: {skip: isAnInternalCommand},
- _startStreamProcessor: {skip: isAnInternalCommand},
+ streams_startStreamProcessor: {skip: isAnInternalCommand},
+ streams_startStreamSample: {skip: isAnInternalCommand},
+ streams_stopStreamProcessor: {skip: isAnInternalCommand},
+ streams_listStreamProcessors: {skip: isAnInternalCommand},
+ streams_getMoreStreamSample: {skip: isAnInternalCommand},
+ streams_getStats: {skip: isAnInternalCommand},
+ streams_testOnlyInsert: {skip: isAnInternalCommand},
+ streams_getMetrics: {skip: isAnInternalCommand},
_transferMods: {skip: isPrimaryOnly},
_vectorClockPersist: {skip: isPrimaryOnly},
abortReshardCollection: {skip: isPrimaryOnly},
@@ -165,10 +173,12 @@ const allCommands = {
checkShardingIndex: {skip: isPrimaryOnly},
cleanupOrphaned: {skip: isPrimaryOnly},
cleanupReshardCollection: {skip: isPrimaryOnly},
+ cleanupStructuredEncryptionData: {skip: isPrimaryOnly},
clearLog: {skip: isNotAUserDataRead},
cloneCollectionAsCapped: {skip: isPrimaryOnly},
clusterAbortTransaction: {skip: "already tested by 'abortTransaction' tests on mongos"},
clusterAggregate: {skip: "already tested by 'aggregate' tests on mongos"},
+ clusterBulkWrite: {skip: "already tested by 'bulkWrite' tests on mongos"},
clusterCommitTransaction: {skip: "already tested by 'commitTransaction' tests on mongos"},
clusterCount: {skip: "already tested by 'count' tests on mongos"},
clusterDelete: {skip: "already tested by 'delete' tests on mongos"},
@@ -378,6 +388,7 @@ const allCommands = {
replSetTestEgress: {skip: isNotAUserDataRead},
replSetUpdatePosition: {skip: isNotAUserDataRead},
replSetResizeOplog: {skip: isNotAUserDataRead},
+ resetPlacementHistory: {skip: isPrimaryOnly},
revokePrivilegesFromRole: {skip: isPrimaryOnly},
revokeRolesFromRole: {skip: isPrimaryOnly},
revokeRolesFromUser: {skip: isPrimaryOnly},
diff --git a/jstests/replsets/dbcheck_validation_mode_parameters.js b/jstests/replsets/dbcheck_validation_mode_parameters.js
new file mode 100644
index 0000000000000..c76ab0872ba3b
--- /dev/null
+++ b/jstests/replsets/dbcheck_validation_mode_parameters.js
@@ -0,0 +1,105 @@
+/**
+ * Test the validity of parameters in the dbCheck command.
+ *
+ * @tags: [
+ * requires_fcv_71
+ * ]
+ */
+
+import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
+
+const dbName = "dbCheckValidationModeParameters";
+const colName = "dbCheckValidationModeParameters-collection";
+
+const replSet = new ReplSetTest({
+ name: jsTestName(),
+ nodes: 2,
+});
+replSet.startSet();
+replSet.initiateWithHighElectionTimeout();
+const primary = replSet.getPrimary();
+const db = primary.getDB(dbName);
+const col = db[colName];
+const nDocs = 1000;
+
+assert.commandWorked(col.insertMany([...Array(nDocs).keys()].map(x => ({a: x})), {ordered: false}));
+replSet.awaitReplication();
+
+function testFeatureFlagDisabled() {
+ jsTestLog("Testing dbCheck with feature flag disabled.");
+ // validateMode field is not allowed if feature flag is disabled.
+ assert.commandFailedWithCode(db.runCommand({
+ dbCheck: colName,
+ validateMode: "dataConsistency",
+ }),
+ ErrorCodes.InvalidOptions);
+ assert.commandWorked(db.runCommand({
+ dbCheck: colName,
+ }));
+}
+
+function testInvalidParameter() {
+ jsTestLog("Testing dbCheck with invalid parameters.");
+ // Unsupported enum passed in to validateMode field.
+ assert.commandFailedWithCode(db.runCommand({
+ dbCheck: colName,
+ validateMode: "invalidParam",
+ }),
+ ErrorCodes.BadValue);
+
+ // secondaryIndex field must be specified when validateMode is extraIndexKeysCheck.
+ assert.commandFailedWithCode(db.runCommand({
+ dbCheck: colName,
+ validateMode: "extraIndexKeysCheck",
+ }),
+ ErrorCodes.InvalidOptions);
+
+ // secondaryIndex field cannot be specified when validateMode is dataConsistency or
+ // dataConsistencyAndMissingIndexKeysCheck.
+ assert.commandFailedWithCode(db.runCommand({
+ dbCheck: colName,
+ validateMode: "dataConsistency",
+ secondaryIndex: "secondaryIndex",
+ }),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(db.runCommand({
+ dbCheck: colName,
+ validateMode: "dataConsistencyAndMissingIndexKeysCheck",
+ secondaryIndex: "secondaryIndex",
+ }),
+ ErrorCodes.InvalidOptions);
+}
+
+function testValidParameter() {
+ jsTestLog("Testing dbCheck with valid parameters.");
+ // dataConsistency is a supported enum for the validateMode field.
+ assert.commandWorked(db.runCommand({
+ dbCheck: colName,
+ validateMode: "dataConsistency",
+ }));
+
+ // dataConsistencyAndMissingIndexKeysCheck is a supported enum for the validateMode.
+ // field
+ assert.commandWorked(db.runCommand({
+ dbCheck: colName,
+ validateMode: "dataConsistencyAndMissingIndexKeysCheck",
+ }));
+
+ // extraIndexKeysCheck is a supported enum for the validateMode field.
+ assert.commandWorked(db.runCommand({
+ dbCheck: colName,
+ validateMode: "extraIndexKeysCheck",
+ secondaryIndex: "secondaryIndex",
+ }));
+}
+
+const secondaryIndexChecks =
+ FeatureFlagUtil.isPresentAndEnabled(primary, "SecondaryIndexChecksInDbCheck");
+if (secondaryIndexChecks) {
+ testInvalidParameter();
+ testValidParameter();
+} else {
+ testFeatureFlagDisabled();
+}
+
+replSet.stopSet();
\ No newline at end of file
diff --git a/jstests/replsets/dbhash_lock_acquisition.js b/jstests/replsets/dbhash_lock_acquisition.js
index 1e7f3eafe349f..61a49b757d163 100644
--- a/jstests/replsets/dbhash_lock_acquisition.js
+++ b/jstests/replsets/dbhash_lock_acquisition.js
@@ -3,16 +3,9 @@
* resources when reading a timestamp using the $_internalReadAtClusterTime option.
*
* @tags: [
- * # Incompatible with all feature flags running on last continuous as dbHash runs lock-free in
- * # v7.0.
- * requires_fcv_70,
* uses_transactions,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js");
load("jstests/libs/parallelTester.js"); // for Thread
const rst = new ReplSetTest({nodes: 1});
@@ -22,13 +15,6 @@ rst.initiate();
const primary = rst.getPrimary();
const db = primary.getDB("test");
-if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) {
- jsTestLog("Skipping test as dbHash is run lock-free with " +
- "the point-in-time catalog lookups feature flag enabled");
- rst.stopSet();
- return;
-}
-
const session = primary.startSession({causalConsistency: false});
const sessionDB = session.getDatabase(db.getName());
@@ -114,5 +100,4 @@ assert.commandWorked(threadCaptruncCmd.returnData());
assert.commandWorked(threadDBHash.returnData());
session.endSession();
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/replsets/dbhash_read_at_cluster_time.js b/jstests/replsets/dbhash_read_at_cluster_time.js
index 69635bb4b4062..7f2c21c6f125f 100644
--- a/jstests/replsets/dbhash_read_at_cluster_time.js
+++ b/jstests/replsets/dbhash_read_at_cluster_time.js
@@ -3,18 +3,10 @@
* read concern are supported by the "dbHash" command.
*
* @tags: [
- * # Incompatible with all feature flags running on last continuous as dbHash can have different
- * # behaviour in v7.0 when using point-in-time catalog lookups.
- * requires_fcv_70,
* requires_majority_read_concern,
* uses_transactions,
* ]
*/
-(function() {
-"use strict";
-
-load("jstests/libs/feature_flag_util.js");
-
const rst = new ReplSetTest({nodes: 2});
rst.startSet();
@@ -177,21 +169,12 @@ assert.eq(atClusterTimeHashBefore,
const otherDB = otherSession.getDatabase("test");
// We create another collection inside a separate session to modify the collection catalog
- // at an opTime later than 'clusterTime'. This prevents further usage of the snapshot
- // associated with 'clusterTime' for snapshot reads if the point-in-time catalog lookups feature
- // flag is disabled.
+ // at an opTime later than 'clusterTime'.
assert.commandWorked(otherDB.runCommand({create: "mycoll2"}));
- if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) {
- assert.commandWorked(db.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime}));
- } else {
- assert.commandFailedWithCode(
- db.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime}),
- ErrorCodes.SnapshotUnavailable);
- }
+ assert.commandWorked(db.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime}));
otherSession.endSession();
}
session.endSession();
-rst.stopSet();
-})();
+rst.stopSet();
\ No newline at end of file
diff --git a/jstests/replsets/disabling_chaining_changes_sync_source.js b/jstests/replsets/disabling_chaining_changes_sync_source.js
index bca0f92f1cfec..b27e9eddef7ce 100644
--- a/jstests/replsets/disabling_chaining_changes_sync_source.js
+++ b/jstests/replsets/disabling_chaining_changes_sync_source.js
@@ -40,4 +40,4 @@ assert.commandWorked(primary.getDB("test").foo.insert({x: 1}));
replSet.awaitSyncSource(secondary, primary);
replSet.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/dont_set_invalid_rwconcern.js b/jstests/replsets/dont_set_invalid_rwconcern.js
index 9565ca70a60d1..913b17f3123b1 100644
--- a/jstests/replsets/dont_set_invalid_rwconcern.js
+++ b/jstests/replsets/dont_set_invalid_rwconcern.js
@@ -56,4 +56,4 @@ assert.commandFailedWithCode(coll.insert({a: 1}, {writeConcern: {w: "bajority"}}
assert.commandWorked(coll.insert({a: 1}, {writeConcern: {w: "multiRegion"}}));
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/drop_collections_two_phase_dbhash.js b/jstests/replsets/drop_collections_two_phase_dbhash.js
index 058a6f09aeb33..a2fa498c7c714 100644
--- a/jstests/replsets/drop_collections_two_phase_dbhash.js
+++ b/jstests/replsets/drop_collections_two_phase_dbhash.js
@@ -49,4 +49,4 @@ let failMsg = "dbHash during drop pending phase did not match dbHash after drop
assert.eq(dropPendingDbHash, dropCommittedDbHash, failMsg);
replTest.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/election_candidate_and_participant_metrics.js b/jstests/replsets/election_candidate_and_participant_metrics.js
index 20c75437c610e..4d4e71efb0629 100644
--- a/jstests/replsets/election_candidate_and_participant_metrics.js
+++ b/jstests/replsets/election_candidate_and_participant_metrics.js
@@ -244,4 +244,4 @@ assert.eq(
assert.eq(originalPrimaryElectionParticipantMetrics.priorityAtElection, 1);
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/election_handoff_basic.js b/jstests/replsets/election_handoff_basic.js
index 2a4376689d896..883c7738cc4af 100644
--- a/jstests/replsets/election_handoff_basic.js
+++ b/jstests/replsets/election_handoff_basic.js
@@ -21,4 +21,4 @@ rst.initiateWithHighElectionTimeout();
ElectionHandoffTest.testElectionHandoff(rst, 0, 1);
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/election_handoff_flip.js b/jstests/replsets/election_handoff_flip.js
index 8ce2b804ca8bc..17b21161e26ac 100644
--- a/jstests/replsets/election_handoff_flip.js
+++ b/jstests/replsets/election_handoff_flip.js
@@ -22,4 +22,4 @@ sleep(ElectionHandoffTest.stepDownPeriodSecs * 1000);
ElectionHandoffTest.testElectionHandoff(rst, 1, 0);
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/election_handoff_higher_priority.js b/jstests/replsets/election_handoff_higher_priority.js
index 12ac4914a40c2..a3fabee15d318 100644
--- a/jstests/replsets/election_handoff_higher_priority.js
+++ b/jstests/replsets/election_handoff_higher_priority.js
@@ -30,4 +30,4 @@ rst.initiate(config);
ElectionHandoffTest.testElectionHandoff(rst, 0, 2);
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/election_handoff_one_unelectable.js b/jstests/replsets/election_handoff_one_unelectable.js
index 970b605197c46..79e4571ac213f 100644
--- a/jstests/replsets/election_handoff_one_unelectable.js
+++ b/jstests/replsets/election_handoff_one_unelectable.js
@@ -28,4 +28,4 @@ rst.initiate(config);
ElectionHandoffTest.testElectionHandoff(rst, 0, 2);
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/election_participant_new_term_metrics.js b/jstests/replsets/election_participant_new_term_metrics.js
index aa76bdf4546e0..802df586a210c 100644
--- a/jstests/replsets/election_participant_new_term_metrics.js
+++ b/jstests/replsets/election_participant_new_term_metrics.js
@@ -106,4 +106,4 @@ assert(!testNodeElectionParticipantMetrics.newTermAppliedDate,
tojson(testNodeElectionParticipantMetrics));
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/force_reconfig_skips_config_replication.js b/jstests/replsets/force_reconfig_skips_config_replication.js
index fe3c10308f097..65d2f6871f918 100644
--- a/jstests/replsets/force_reconfig_skips_config_replication.js
+++ b/jstests/replsets/force_reconfig_skips_config_replication.js
@@ -48,4 +48,4 @@ secondary.reconnect(primary);
replTest.awaitNodesAgreeOnConfigVersion();
replTest.stopSet();
-}());
\ No newline at end of file
+}());
diff --git a/jstests/replsets/groupAndMapReduce.js b/jstests/replsets/groupAndMapReduce.js
index 22935531bcf27..88b0b5ffc3b60 100644
--- a/jstests/replsets/groupAndMapReduce.js
+++ b/jstests/replsets/groupAndMapReduce.js
@@ -1,6 +1,6 @@
load("jstests/replsets/rslib.js");
-doTest = function(signal) {
+let doTest = function(signal) {
// Test basic replica set functionality.
// -- Replication
// -- Failover
@@ -32,7 +32,7 @@ doTest = function(signal) {
// and secondaries in the set and wait until the change has replicated.
replTest.awaitReplication();
- secondaries = replTest.getSecondaries();
+ let secondaries = replTest.getSecondaries();
assert(secondaries.length == 2, "Expected 2 secondaries but length was " + secondaries.length);
secondaries.forEach(function(secondary) {
// try to read from secondary
@@ -47,10 +47,10 @@ doTest = function(signal) {
print("Calling inline mr() with secondaryOk=true, must succeed");
secondary.setSecondaryOk();
- map = function() {
+ let map = function() {
emit(this.a, 1);
};
- reduce = function(key, vals) {
+ let reduce = function(key, vals) {
var sum = 0;
for (var i = 0; i < vals.length; ++i) {
sum += vals[i];
diff --git a/jstests/replsets/heartbeat_reconfig_propagates_default_write_concern.js b/jstests/replsets/heartbeat_reconfig_propagates_default_write_concern.js
index 309fb7c134487..47ba466472bd6 100644
--- a/jstests/replsets/heartbeat_reconfig_propagates_default_write_concern.js
+++ b/jstests/replsets/heartbeat_reconfig_propagates_default_write_concern.js
@@ -56,4 +56,4 @@ function runTest(hasArbiter) {
runTest(false /* hasArbiter */);
runTest(true /* hasArbiter */);
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/initial_sync_capped_index.js b/jstests/replsets/initial_sync_capped_index.js
index 0400621b6b21f..ac6bcab2a37b4 100644
--- a/jstests/replsets/initial_sync_capped_index.js
+++ b/jstests/replsets/initial_sync_capped_index.js
@@ -22,6 +22,10 @@
* 8. Once initial sync completes, ensure that capped collection indexes on the SECONDARY are valid.
*
* This is a regression test for SERVER-29197.
+ *
+ * @tags: [
+ * uses_full_validation,
+ * ]
*/
(function() {
"use strict";
diff --git a/jstests/replsets/initial_sync_chooses_correct_sync_source.js b/jstests/replsets/initial_sync_chooses_correct_sync_source.js
index bc44457e84e63..5fd6327e57554 100644
--- a/jstests/replsets/initial_sync_chooses_correct_sync_source.js
+++ b/jstests/replsets/initial_sync_chooses_correct_sync_source.js
@@ -48,6 +48,11 @@ const restartAndWaitForHeartbeats = (rst, initialSyncNode, setParameterOpts = {}
setParameter: setParameterOpts,
});
+ // Wait for the restarted node to hit initial sync, then wait for heartbeats. This is to
+ // prevent a potential race where we wait for heartbeats in startup recovery, which satisfies
+ // the JS test, but then restart heartbeats and treat the other nodes as DOWN when entering
+ // initial sync.
+ rst.waitForState(initialSyncNode, ReplSetTest.State.STARTUP_2);
waitForHeartbeats(initialSyncNode);
};
diff --git a/jstests/replsets/initial_sync_clone_multikey.js b/jstests/replsets/initial_sync_clone_multikey.js
index 8c4416beeaae4..312a4f0151cd4 100644
--- a/jstests/replsets/initial_sync_clone_multikey.js
+++ b/jstests/replsets/initial_sync_clone_multikey.js
@@ -16,7 +16,7 @@ const primaryDB = primary.getDB(dbName);
jsTestLog("Creating the collection and an index.");
assert.commandWorked(primaryDB.createCollection(collName));
-assert.commandWorked(primaryDB[collName].createIndex({"x": 1}, {background: true}));
+assert.commandWorked(primaryDB[collName].createIndex({"x": 1}));
// Make the index multikey.
primaryDB[collName].insert({x: [1, 2]});
diff --git a/jstests/replsets/initial_sync_nodes_maintain_and_gossip_commit_point.js b/jstests/replsets/initial_sync_nodes_maintain_and_gossip_commit_point.js
index 9b79b67261137..c5f485d5f5fa1 100644
--- a/jstests/replsets/initial_sync_nodes_maintain_and_gossip_commit_point.js
+++ b/jstests/replsets/initial_sync_nodes_maintain_and_gossip_commit_point.js
@@ -118,15 +118,21 @@ assert.eq(1, rs.compareOpTimes(thirdCommitPointSecondary, secondCommitPointSecon
hangBeforeCompletingOplogFetching.off();
hangBeforeFinish.wait();
-// Verify that the initial sync node receives the commit point from the primary via oplog fetching.
+// Verify that the initial sync node receives the commit point from the primary, either via oplog
+// fetching or by a heartbeat. This will usually happen via oplog fetching but in some cases it is
+// possible that the OplogFetcher shuts down before this ever happens. See SERVER-76695 for details.
// We only assert that it is greater than or equal to the second commit point because it is possible
// for the commit point to not yet be advanced by the primary when we fetch the oplog entry.
-const commitPointInitialSyncNode = getLastCommittedOpTime(initialSyncNode);
-assert.gte(
- rs.compareOpTimes(commitPointInitialSyncNode, secondCommitPointPrimary),
- 0,
- `commit point on initial sync node should be at least as up-to-date as the second commit point: ${
- tojson(commitPointInitialSyncNode)}`);
+assert.soon(() => {
+ const commitPointInitialSyncNode = getLastCommittedOpTime(initialSyncNode);
+ // compareOpTimes will throw an error if given an invalid opTime, and if the
+ // node has not yet advanced its opTime it will still have the default one,
+ // which is invalid.
+ if (!globalThis.rs.isValidOpTime(commitPointInitialSyncNode)) {
+ return false;
+ }
+ return rs.compareOpTimes(commitPointInitialSyncNode, secondCommitPointPrimary) >= 0;
+}, `commit point on initial sync node should be at least as up-to-date as the second commit point`);
// Verify that the non-voting secondary has received the updated commit point via heartbeats from
// the initial sync node.
diff --git a/jstests/replsets/initial_sync_read_concern_no_oplog.js b/jstests/replsets/initial_sync_read_concern_no_oplog.js
index 29e908f5b804a..85bc46c810a5f 100644
--- a/jstests/replsets/initial_sync_read_concern_no_oplog.js
+++ b/jstests/replsets/initial_sync_read_concern_no_oplog.js
@@ -28,4 +28,4 @@ replSet.awaitReplication();
replSet.awaitSecondaryNodes();
replSet.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/initial_sync_replicates_prepare_received_during_another_initial_sync.js b/jstests/replsets/initial_sync_replicates_prepare_received_during_another_initial_sync.js
index 5e31305b07391..7a4136dc2620c 100644
--- a/jstests/replsets/initial_sync_replicates_prepare_received_during_another_initial_sync.js
+++ b/jstests/replsets/initial_sync_replicates_prepare_received_during_another_initial_sync.js
@@ -114,4 +114,4 @@ jsTestLog("secondary2 successfully replicated prepared transaction after initial
assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
replSet.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/initial_sync_with_write_load.js b/jstests/replsets/initial_sync_with_write_load.js
index c696b347b0b86..7e885dc79025f 100644
--- a/jstests/replsets/initial_sync_with_write_load.js
+++ b/jstests/replsets/initial_sync_with_write_load.js
@@ -47,7 +47,7 @@ var work = function() {
assert.commandWorked(db.timeToStartTrigger.insert({_id: 1}));
while (true) {
- for (x = 0; x < 100; x++) {
+ for (let x = 0; x < 100; x++) {
db["a" + x].insert({a: x});
}
diff --git a/jstests/replsets/initial_sync_write_conflict.js b/jstests/replsets/initial_sync_write_conflict.js
index 1571c318d45f8..1aecfaf28b097 100644
--- a/jstests/replsets/initial_sync_write_conflict.js
+++ b/jstests/replsets/initial_sync_write_conflict.js
@@ -31,4 +31,4 @@ replSet.awaitSecondaryNodes();
// If the index table contains any entries pointing to invalid document(RecordID), then
// validateCollections called during replica stopSet will capture the index corruption and throw
// error.
-replSet.stopSet();
\ No newline at end of file
+replSet.stopSet();
diff --git a/jstests/replsets/internal_sessions_reaping_basic.js b/jstests/replsets/internal_sessions_reaping_basic.js
index adf6ec1021e9e..890b0f9bbf77b 100644
--- a/jstests/replsets/internal_sessions_reaping_basic.js
+++ b/jstests/replsets/internal_sessions_reaping_basic.js
@@ -3,7 +3,7 @@
* config.image_collection entries for a transaction session if the logical session that it
* corresponds to has expired and been removed from the config.system.sessions collection.
*
- * @tags: [requires_fcv_70, uses_transactions]
+ * @tags: [requires_fcv_60, uses_transactions]
*/
(function() {
@@ -24,7 +24,6 @@ const rst = new ReplSetTest({
replBatchLimitOperations: 1,
// Make transaction records expire immediately.
TransactionRecordMinimumLifetimeMinutes: 0,
- storeFindAndModifyImagesInSideCollection: true,
internalSessionsReapThreshold: 0
}
}
diff --git a/jstests/replsets/internal_sessions_reaping_interrupt.js b/jstests/replsets/internal_sessions_reaping_interrupt.js
index 827d21c8b9ee6..bc15bba12e496 100644
--- a/jstests/replsets/internal_sessions_reaping_interrupt.js
+++ b/jstests/replsets/internal_sessions_reaping_interrupt.js
@@ -17,7 +17,9 @@ const rst = new ReplSetTest({
ttlMonitorEnabled: false,
disableLogicalSessionCacheRefresh: false,
TransactionRecordMinimumLifetimeMinutes: 0,
- logicalSessionRefreshMillis
+ logicalSessionRefreshMillis,
+ // Make the eager reaping occur more frequently.
+ internalSessionsReapThreshold: 5,
}
}
});
diff --git a/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js b/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js
index 56f46b581550f..b4f29d454b9b8 100644
--- a/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js
+++ b/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js
@@ -109,4 +109,4 @@ assert.commandWorked(sessionDB.adminCommand({
}));
rst.stopSet();
-})();
\ No newline at end of file
+})();
diff --git a/jstests/replsets/libs/basic_replset_test.js b/jstests/replsets/libs/basic_replset_test.js
new file mode 100644
index 0000000000000..38c1398755392
--- /dev/null
+++ b/jstests/replsets/libs/basic_replset_test.js
@@ -0,0 +1,132 @@
+load("jstests/replsets/rslib.js");
+load('jstests/replsets/libs/election_metrics.js');
+
+function basicReplsetTest(signal, ssl_options1, ssl_options2, ssl_name) {
+ // Test basic replica set functionality.
+ // -- Replication
+ // -- Failover
+
+ // Choose a name that is unique to the options specified.
+ // This is important because we are depending on a fresh replicaSetMonitor for each run;
+ // each differently-named replica set gets its own monitor.
+ // n0 and n1 get the same SSL config since there are 3 nodes but only 2 different configs
+ let replTest = new ReplSetTest({
+ name: 'testSet' + ssl_name,
+ nodes: {n0: ssl_options1, n1: ssl_options1, n2: ssl_options2}
+ });
+
+ // call startSet() to start each mongod in the replica set
+ replTest.startSet();
+
+ // Call initiate() to send the replSetInitiate command
+ // This will wait for initiation
+ replTest.initiate();
+
+ // Call getPrimary to return a reference to the node that's been
+ // elected primary.
+ let primary = replTest.getPrimary();
+
+ // Check that both the 'called' and 'successful' fields of the 'electionTimeout' election reason
+ // counter have been incremented in serverStatus.
+ const primaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
+ verifyServerStatusElectionReasonCounterValue(
+ primaryStatus.electionMetrics, "electionTimeout", 1);
+
+ // Ensure the primary logs an n-op to the oplog upon transitioning to primary.
+ assert.gt(primary.getDB("local").oplog.rs.count({op: 'n', o: {msg: 'new primary'}}), 0);
+
+ // Here's how you save something to primary
+ primary.getDB("foo").foo.save({a: 1000});
+
+ // This method will check the oplogs of the primary
+ // and secondaries in the set and wait until the change has replicated.
+ replTest.awaitReplication();
+
+ let cppconn = new Mongo(replTest.getURL()).getDB("foo");
+ assert.eq(1000, cppconn.foo.findOne().a, "cppconn 1");
+
+ {
+ // check c++ finding other servers
+ let temp = replTest.getURL();
+ temp = temp.substring(0, temp.lastIndexOf(","));
+ temp = new Mongo(temp).getDB("foo");
+ assert.eq(1000, temp.foo.findOne().a, "cppconn 1");
+ }
+
+ // Here's how to stop the primary node
+ let primaryId = replTest.getNodeId(primary);
+ replTest.stop(primaryId);
+
+ // Now let's see who the new primary is:
+ let newPrimary = replTest.getPrimary();
+
+ // Is the new primary the same as the old primary?
+ let newPrimaryId = replTest.getNodeId(newPrimary);
+
+ assert(primaryId != newPrimaryId, "Old primary shouldn't be equal to new primary.");
+
+ reconnect(cppconn);
+ assert.eq(1000, cppconn.foo.findOne().a, "cppconn 2");
+
+ // Now let's write some documents to the new primary
+ let bulk = newPrimary.getDB("bar").bar.initializeUnorderedBulkOp();
+ for (let i = 0; i < 1000; i++) {
+ bulk.insert({a: i});
+ }
+ bulk.execute();
+
+ // Here's how to restart the old primary node:
+ let secondary = replTest.restart(primaryId);
+
+ // Now, let's make sure that the old primary comes up as a secondary
+ assert.soon(function() {
+ let res = secondary.getDB("admin").runCommand({hello: 1});
+ printjson(res);
+ return res['ok'] == 1 && res['isWritablePrimary'] == false;
+ });
+
+ // And we need to make sure that the replset comes back up
+ assert.soon(function() {
+ let res = newPrimary.getDB("admin").runCommand({replSetGetStatus: 1});
+ printjson(res);
+ return res.myState == 1;
+ });
+
+ // And that both secondary nodes have all the updates
+ newPrimary = replTest.getPrimary();
+ assert.eq(1000, newPrimary.getDB("bar").runCommand({count: "bar"}).n, "assumption 2");
+ replTest.awaitSecondaryNodes();
+ replTest.awaitReplication();
+
+ let secondaries = replTest.getSecondaries();
+ assert(secondaries.length == 2, "Expected 2 secondaries but length was " + secondaries.length);
+ secondaries.forEach(function(secondary) {
+ secondary.setSecondaryOk();
+ let count = secondary.getDB("bar").runCommand({count: "bar"});
+ printjson(count);
+ assert.eq(1000, count.n, "secondary count wrong: " + secondary);
+ });
+
+ // last error
+ primary = replTest.getPrimary();
+ secondaries = replTest.getSecondaries();
+
+ let db = primary.getDB("foo");
+ let t = db.foo;
+
+ let ts = secondaries.map(function(z) {
+ z.setSecondaryOk();
+ return z.getDB("foo").foo;
+ });
+
+ t.save({a: 1000});
+ t.createIndex({a: 1});
+ replTest.awaitReplication();
+
+ ts.forEach(function(z) {
+ assert.eq(2, z.getIndexKeys().length, "A " + z.getMongo());
+ });
+
+ // Shut down the set and finish the test.
+ replTest.stopSet(signal);
+}
diff --git a/jstests/replsets/libs/oplog_rollover_test.js b/jstests/replsets/libs/oplog_rollover_test.js
index 6de111fd88389..e03f593b09fae 100644
--- a/jstests/replsets/libs/oplog_rollover_test.js
+++ b/jstests/replsets/libs/oplog_rollover_test.js
@@ -7,7 +7,7 @@
load("jstests/libs/fail_point_util.js");
-function oplogRolloverTest(storageEngine, initialSyncMethod) {
+function oplogRolloverTest(storageEngine, initialSyncMethod, serverless = false) {
jsTestLog("Testing with storageEngine: " + storageEngine);
if (initialSyncMethod) {
jsTestLog(" and initial sync method: " + initialSyncMethod);
@@ -25,14 +25,20 @@ function oplogRolloverTest(storageEngine, initialSyncMethod) {
if (initialSyncMethod) {
parameters = Object.merge(parameters, {initialSyncMethod: initialSyncMethod});
}
- const replSet = new ReplSetTest({
+
+ let replSetOptions = {
// Set the syncdelay to 1s to speed up checkpointing.
nodeOptions: {
syncdelay: 1,
setParameter: parameters,
},
nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
- });
+ };
+
+ if (serverless)
+ replSetOptions = Object.merge(replSetOptions, {serverless: true});
+
+ const replSet = new ReplSetTest(replSetOptions);
// Set max oplog size to 1MB.
replSet.startSet({storageEngine: storageEngine, oplogSize: 1});
replSet.initiate();
diff --git a/jstests/replsets/libs/prepare_failover_due_to_reconfig.js b/jstests/replsets/libs/prepare_failover_due_to_reconfig.js
index cf5b4e58fd869..c2fd990dafc00 100644
--- a/jstests/replsets/libs/prepare_failover_due_to_reconfig.js
+++ b/jstests/replsets/libs/prepare_failover_due_to_reconfig.js
@@ -69,4 +69,4 @@ var testPrepareFailoverDueToReconfig = function(name, reconfigOnPrimary) {
assert.docEq(newDoc, doc);
rst.stopSet();
-};
\ No newline at end of file
+};
diff --git a/jstests/replsets/libs/rename_across_dbs.js b/jstests/replsets/libs/rename_across_dbs.js
index 59b0fba4e1418..ed7b9c77ba25c 100644
--- a/jstests/replsets/libs/rename_across_dbs.js
+++ b/jstests/replsets/libs/rename_across_dbs.js
@@ -165,6 +165,7 @@ var RenameAcrossDatabasesTest = function(options) {
_testLog('Checking oplogs and dbhashes after renaming collection.');
replTest.awaitReplication();
replTest.checkOplogs(testName);
+ replTest.checkPreImageCollection(testName);
replTest.checkReplicatedDataHashes(testName);
_testLog('Test completed. Stopping replica set.');
diff --git a/jstests/replsets/libs/rollback_resumable_index_build.js b/jstests/replsets/libs/rollback_resumable_index_build.js
index 94b64e67a4da0..9925b0b6941f4 100644
--- a/jstests/replsets/libs/rollback_resumable_index_build.js
+++ b/jstests/replsets/libs/rollback_resumable_index_build.js
@@ -79,8 +79,10 @@ const RollbackResumableIndexBuildTest = class {
rollbackTest.awaitLastOpCommitted();
- assert.commandWorked(originalPrimary.adminCommand(
- {setParameter: 1, logComponentVerbosity: {index: 1, replication: {heartbeats: 0}}}));
+ assert.commandWorked(originalPrimary.adminCommand({
+ setParameter: 1,
+ logComponentVerbosity: {index: 1, replication: {election: 0, heartbeats: 0}},
+ }));
// Set internalQueryExecYieldIterations to 0, internalIndexBuildBulkLoadYieldIterations to
// 1, and maxIndexBuildDrainBatchSize to 1 so that the index builds are guaranteed to yield
@@ -311,4 +313,4 @@ const RollbackResumableIndexBuildTest = class {
testInfo.buildUUIDs,
testInfo.indexNames);
}
-};
\ No newline at end of file
+};
diff --git a/jstests/replsets/libs/rollback_test.js b/jstests/replsets/libs/rollback_test.js
index 8a19b15b51db0..db58d73b3fb69 100644
--- a/jstests/replsets/libs/rollback_test.js
+++ b/jstests/replsets/libs/rollback_test.js
@@ -107,6 +107,34 @@ function RollbackTest(name = "RollbackTest", replSet, nodeOptions) {
// Make sure we have a replica set up and running.
replSet = (replSet === undefined) ? performStandardSetup(nodeOptions) : replSet;
+
+ // Return an helper function to set a tenantId on commands if it is required.
+ let addTenantIdIfNeeded = (function() {
+ const adminDB = replSet.getPrimary().getDB("admin");
+ const flagDoc = assert.commandWorked(
+ adminDB.adminCommand({getParameter: 1, featureFlagRequireTenantID: 1}));
+ const multitenancyDoc =
+ assert.commandWorked(adminDB.adminCommand({getParameter: 1, multitenancySupport: 1}));
+ const fcvDoc = assert.commandWorked(
+ adminDB.adminCommand({getParameter: 1, featureCompatibilityVersion: 1}));
+ if (multitenancyDoc.hasOwnProperty("multitenancySupport") &&
+ multitenancyDoc.multitenancySupport &&
+ flagDoc.hasOwnProperty("featureFlagRequireTenantID") &&
+ flagDoc.featureFlagRequireTenantID.value &&
+ MongoRunner.compareBinVersions(fcvDoc.featureCompatibilityVersion.version,
+ flagDoc.featureFlagRequireTenantID.version) >= 0) {
+ const tenantId = ObjectId();
+
+ return function(cmdObj) {
+ return Object.assign(cmdObj, {'$tenant': tenantId});
+ };
+ } else {
+ return function(cmdObj) {
+ return cmdObj;
+ };
+ }
+ })();
+
validateAndUseSetup(replSet);
// Majority writes in the initial phase, before transitionToRollbackOperations(), should be
@@ -181,9 +209,12 @@ function RollbackTest(name = "RollbackTest", replSet, nodeOptions) {
// ensure the insert was replicated and written to the on-disk journal of all 3
// nodes, with the exception of ephemeral and in-memory storage engines where
// journaling isn't supported.
- assert.commandWorked(curPrimary.getDB(dbName).ensureSyncSource.insert(
- {thisDocument: 'is inserted to ensure any node can sync from any other'},
- {writeConcern: {w: 3, j: config.writeConcernMajorityJournalDefault}}));
+
+ assert.commandWorked(curPrimary.getDB(dbName).runCommand(addTenantIdIfNeeded({
+ insert: "ensureSyncSource",
+ documents: [{thisDocument: 'is inserted to ensure any node can sync from any other'}],
+ writeConcern: {w: 3, j: config.writeConcernMajorityJournalDefault}
+ })));
}
/**
@@ -296,6 +327,10 @@ function RollbackTest(name = "RollbackTest", replSet, nodeOptions) {
return rst.getPrimary(ReplSetTest.kDefaultTimeoutMS, kRetryIntervalMS);
}
+ this.stepUpNode = function(conn) {
+ stepUp(conn);
+ };
+
function oplogTop(conn) {
return conn.getDB("local").oplog.rs.find().limit(1).sort({$natural: -1}).next();
}
@@ -450,10 +485,12 @@ function RollbackTest(name = "RollbackTest", replSet, nodeOptions) {
// ensure that this document is not lost due to unclean shutdowns. Ephemeral and in-memory
// storage engines are an exception because journaling isn't supported.
let writeConcern = TestData.rollbackShutdowns ? {w: 1, j: true} : {w: 1};
- let dbName = "EnsureThereIsAtLeastOneOperationToRollback";
- assert.commandWorked(curPrimary.getDB(dbName).ensureRollback.insert(
- {thisDocument: 'is inserted to ensure rollback is not skipped'},
- {writeConcern: writeConcern}));
+ let dbName = "EnsureThereIsAtLeastOneOpToRollback";
+ assert.commandWorked(curPrimary.getDB(dbName).runCommand(addTenantIdIfNeeded({
+ insert: "ensureRollback",
+ documents: [{thisDocument: 'is inserted to ensure rollback is not skipped'}],
+ writeConcern
+ })));
log(`Isolating the primary ${curPrimary.host} so it will step down`);
// We should have already disconnected the primary from the secondary during the first stage
diff --git a/jstests/replsets/libs/tenant_migration_recipient_sync_source.js b/jstests/replsets/libs/tenant_migration_recipient_sync_source.js
index 521fd89ecf58e..d273651afd474 100644
--- a/jstests/replsets/libs/tenant_migration_recipient_sync_source.js
+++ b/jstests/replsets/libs/tenant_migration_recipient_sync_source.js
@@ -3,7 +3,7 @@
*/
import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js";
-import {makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js";
+import {makeTenantDB, makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js";
load("jstests/libs/fail_point_util.js");
load("jstests/libs/uuid_util.js");
@@ -40,7 +40,7 @@ export function setUpMigrationSyncSourceTest() {
const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), donorRst});
const tenantId = ObjectId().str;
- const tenantDB = tenantMigrationTest.tenantDB(tenantId, "DB");
+ const tenantDB = makeTenantDB(tenantId, "DB");
const collName = "testColl";
const donorPrimary = tenantMigrationTest.getDonorPrimary();
diff --git a/jstests/replsets/libs/tenant_migration_test.js b/jstests/replsets/libs/tenant_migration_test.js
index 270c443c8a10c..8e36b7234f307 100644
--- a/jstests/replsets/libs/tenant_migration_test.js
+++ b/jstests/replsets/libs/tenant_migration_test.js
@@ -10,12 +10,15 @@ import {
createTenantMigrationRecipientRoleIfNotExist,
createTenantMigrationDonorRoleIfNotExist,
runTenantMigrationCommand,
+ runDonorStartMigrationCommand,
isMigrationCompleted,
checkTenantDBHashes,
getExternalKeys,
isShardMergeEnabled,
isNamespaceForTenant,
getTenantMigrationAccessBlocker,
+ kProtocolShardMerge,
+ kProtocolMultitenantMigrations,
} from "jstests/replsets/libs/tenant_migration_util.js";
load("jstests/aggregation/extras/utils.js");
@@ -67,6 +70,9 @@ export class TenantMigrationTest {
* Make a new TenantMigrationTest
*
* @param {string} [name] the name of the replica sets
+ * @param {string} [protocol] the migration protocol to use, either "multitenant migrations" or
+ * "shard merge". If no value is provided, will default to "shard merge" if the shard merge
+ * feature flag is enabled, otherwise will be set to "multitenant migrations"
* @param {boolean} [enableRecipientTesting] whether recipient would actually migrate tenant
* data
* @param {Object} [donorRst] the ReplSetTest instance to adopt for the donor
@@ -83,6 +89,7 @@ export class TenantMigrationTest {
*/
constructor({
name = "TenantMigrationTest",
+ protocol = "",
enableRecipientTesting = true,
donorRst,
recipientRst,
@@ -132,7 +139,8 @@ export class TenantMigrationTest {
tojson({mode: 'alwaysOn'});
}
- let nodeOptions = isDonor ? migrationX509Options.donor : migrationX509Options.recipient;
+ const nodeOptions =
+ isDonor ? migrationX509Options.donor : migrationX509Options.recipient;
nodeOptions["setParameter"] = setParameterOpts;
const rstName = `${name}_${(isDonor ? "donor" : "recipient")}`;
@@ -151,6 +159,19 @@ export class TenantMigrationTest {
this._recipientRst =
this._recipientPassedIn ? recipientRst : performSetUp(false /* isDonor */);
+ // If we don't pass "protocol" and shard merge is enabled, we set the protocol to
+ // "shard merge". Otherwise, the provided protocol is used, which defaults to
+ // "multitenant migrations" if not provided.
+ if (protocol === "" && isShardMergeEnabled(this.getDonorPrimary().getDB("admin"))) {
+ this.protocol = kProtocolShardMerge;
+ } else if (protocol === "") {
+ this.protocol = kProtocolMultitenantMigrations;
+ }
+
+ this.configRecipientsNs = this.protocol === kProtocolShardMerge
+ ? TenantMigrationTest.kConfigShardMergeRecipientsNS
+ : TenantMigrationTest.kConfigRecipientsNS;
+
this._donorRst.asCluster(this._donorRst.nodes, () => {
this._donorRst.getPrimary();
this._donorRst.awaitReplication();
@@ -228,12 +249,8 @@ export class TenantMigrationTest {
*
* Returns the result of the 'donorStartMigration' command.
*/
- startMigration(migrationOpts,
- {retryOnRetryableErrors = false, enableDonorStartMigrationFsync = false} = {}) {
- return this.runDonorStartMigration(migrationOpts, {
- retryOnRetryableErrors,
- enableDonorStartMigrationFsync,
- });
+ startMigration(migrationOpts, {retryOnRetryableErrors = false} = {}) {
+ return this.runDonorStartMigration(migrationOpts, {retryOnRetryableErrors});
}
/**
@@ -291,11 +308,9 @@ export class TenantMigrationTest {
const {
waitForMigrationToComplete = false,
retryOnRetryableErrors = false,
- enableDonorStartMigrationFsync = false,
} = opts;
- const cmdObj = {
- donorStartMigration: 1,
+ const migrationOpts = {
migrationId: UUID(migrationIdString),
tenantId,
tenantIds,
@@ -306,8 +321,7 @@ export class TenantMigrationTest {
protocol
};
- const stateRes = runTenantMigrationCommand(cmdObj, this.getDonorRst(), {
- enableDonorStartMigrationFsync,
+ const stateRes = runDonorStartMigrationCommand(migrationOpts, this.getDonorRst(), {
retryOnRetryableErrors,
shouldStopFunc: stateRes =>
(!waitForMigrationToComplete || isMigrationCompleted(stateRes))
@@ -344,27 +358,21 @@ export class TenantMigrationTest {
donorPrimary.getCollection(TenantMigrationTest.kConfigDonorsNS).findOne({
_id: UUID(migrationIdString)
});
+
const recipientStateDoc =
- recipientPrimary.getCollection(TenantMigrationTest.kConfigRecipientsNS).findOne({
+ recipientPrimary.getCollection(this.configRecipientsNs).findOne({
_id: UUID(migrationIdString)
});
- const shardMergeRecipientStateDoc =
- recipientPrimary.getCollection(TenantMigrationTest.kConfigShardMergeRecipientsNS)
- .findOne({_id: UUID(migrationIdString)});
-
if (donorStateDoc) {
assert(donorStateDoc.expireAt);
}
if (recipientStateDoc) {
assert(recipientStateDoc.expireAt);
}
- if (shardMergeRecipientStateDoc) {
- assert(shardMergeRecipientStateDoc.expireAt);
- }
const configDBCollections = recipientPrimary.getDB('config').getCollectionNames();
- assert(!configDBCollections.includes('repl.migration.oplog_' + migrationIdString),
+ assert(!configDBCollections.includes(`repl.migration.oplog_${migrationIdString}`),
configDBCollections);
this.getDonorRst().asCluster(donorPrimary, () => {
@@ -425,12 +433,7 @@ export class TenantMigrationTest {
});
recipientNodes.forEach(node => {
- const configRecipientsColl =
- node.getCollection(TenantMigrationTest.kConfigRecipientsNS);
- assert.soon(() => 0 === configRecipientsColl.count({_id: migrationId}), tojson(node));
-
- const configShardMergeRecipientsColl =
- node.getCollection(TenantMigrationTest.kConfigShardMergeRecipientsNS);
+ const configRecipientsColl = node.getCollection(this.configRecipientsNs);
assert.soon(() => 0 === configRecipientsColl.count({_id: migrationId}), tojson(node));
let mtab;
@@ -544,13 +547,8 @@ export class TenantMigrationTest {
expectedAccessState,
}) {
const configRecipientsColl =
- this.getRecipientPrimary().getCollection("config.tenantMigrationRecipients");
- let configDoc = configRecipientsColl.findOne({_id: migrationId});
- if (!configDoc) {
- configDoc = this.getRecipientPrimary()
- .getCollection(TenantMigrationTest.kConfigShardMergeRecipientsNS)
- .findOne({_id: migrationId});
- }
+ this.getRecipientPrimary().getCollection(this.configRecipientsNs);
+ const configDoc = configRecipientsColl.findOne({_id: migrationId});
const mtab = this.getTenantMigrationAccessBlocker({recipientNode: node, tenantId});
@@ -602,13 +600,6 @@ export class TenantMigrationTest {
() => (`${tojson(docsReturned)} is not equal to ${tojson(data)}`));
}
- /**
- * Crafts a tenant database name.
- */
- tenantDB(tenantId, dbName) {
- return `${tenantId}_${dbName}`;
- }
-
/**
* Returns the TenantMigrationAccessBlocker serverStatus output for the migration or shard merge
* for the given node.
@@ -715,8 +706,8 @@ TenantMigrationTest.DonorAccessState = {
};
TenantMigrationTest.RecipientAccessState = {
- kReject: "reject",
- kRejectBefore: "rejectBefore"
+ kRejectReadsAndWrites: "rejectReadsAndWrites",
+ kRejectReadsBefore: "rejectReadsBefore"
};
TenantMigrationTest.kConfigDonorsNS = "config.tenantMigrationDonors";
diff --git a/jstests/replsets/libs/tenant_migration_util.js b/jstests/replsets/libs/tenant_migration_util.js
index 611ca24fb66c6..ed4e701665e99 100644
--- a/jstests/replsets/libs/tenant_migration_util.js
+++ b/jstests/replsets/libs/tenant_migration_util.js
@@ -2,6 +2,15 @@
* Utilities for testing tenant migrations.
*/
export const kExternalKeysNs = "config.external_validation_keys";
+export const kProtocolShardMerge = "shard merge";
+export const kProtocolMultitenantMigrations = "multitenant migrations";
+
+/**
+ * Crafts a tenant database name.
+ */
+export function makeTenantDB(tenantId, dbName) {
+ return `${tenantId}_${dbName}`;
+}
/**
* Returns true if feature flag 'featureFlagShardMerge' is enabled, false otherwise.
@@ -28,35 +37,6 @@ function shouldUseMergeTenantIds(db) {
return MongoRunner.compareBinVersions(fcvDoc.featureCompatibilityVersion.version, "6.3") >= 0;
}
-/**
- * Construct a donorStartMigration command object with protocol: "shard merge" if the feature
- * flag is enabled.
- */
-export function donorStartMigrationWithProtocol(cmd, db) {
- // If we don't pass "protocol" and shard merge is enabled, we set the protocol to
- // "shard merge". Otherwise, the provided protocol is used, which defaults to
- // "multitenant migrations" if not provided.
- if (cmd["protocol"] === undefined && isShardMergeEnabled(db)) {
- const cmdCopy = Object.assign({}, cmd);
-
- if (shouldUseMergeTenantIds(db)) {
- cmdCopy.tenantIds = cmdCopy.tenantIds || [ObjectId(cmdCopy.tenantId)];
- }
-
- delete cmdCopy.tenantId;
- cmdCopy.protocol = "shard merge";
- return cmdCopy;
- } else if (cmd["protocol"] == "shard merge") {
- const cmdCopy = Object.assign({}, cmd);
- delete cmdCopy.tenantId;
- return cmdCopy;
- } else {
- const cmdCopy = Object.assign({}, cmd);
- delete cmdCopy.tenantIds;
- return cmdCopy;
- }
-}
-
/**
* Returns the external keys for the given migration id.
*/
@@ -138,35 +118,31 @@ export function isMigrationCompleted(res) {
* fixture.
*/
export async function runMigrationAsync(migrationOpts, donorRstArgs, opts = {}) {
- const {isMigrationCompleted, makeMigrationCertificatesForTest, runTenantMigrationCommand} =
+ const {isMigrationCompleted, makeMigrationCertificatesForTest, runDonorStartMigrationCommand} =
await import("jstests/replsets/libs/tenant_migration_util.js");
load("jstests/replsets/rslib.js"); // createRst
const {
retryOnRetryableErrors = false,
- enableDonorStartMigrationFsync = false,
} = opts;
const donorRst = createRst(donorRstArgs, retryOnRetryableErrors);
const migrationCertificates = makeMigrationCertificatesForTest();
- const cmdObj = {
- donorStartMigration: 1,
- migrationId: UUID(migrationOpts.migrationIdString),
- tenantId: migrationOpts.tenantId,
- tenantIds: eval(migrationOpts.tenantIds),
- recipientConnectionString: migrationOpts.recipientConnString,
- readPreference: migrationOpts.readPreference || {mode: "primary"},
- donorCertificateForRecipient: migrationOpts.donorCertificateForRecipient ||
- migrationCertificates.donorCertificateForRecipient,
- recipientCertificateForDonor: migrationOpts.recipientCertificateForDonor ||
- migrationCertificates.recipientCertificateForDonor,
- };
- return runTenantMigrationCommand(cmdObj, donorRst, {
- retryOnRetryableErrors,
- enableDonorStartMigrationFsync,
- shouldStopFunc: isMigrationCompleted
- });
+ return runDonorStartMigrationCommand(
+ {
+ migrationId: UUID(migrationOpts.migrationIdString),
+ tenantId: migrationOpts.tenantId,
+ tenantIds: eval(migrationOpts.tenantIds),
+ recipientConnectionString: migrationOpts.recipientConnString,
+ readPreference: migrationOpts.readPreference || {mode: "primary"},
+ donorCertificateForRecipient: migrationOpts.donorCertificateForRecipient ||
+ migrationCertificates.donorCertificateForRecipient,
+ recipientCertificateForDonor: migrationOpts.recipientCertificateForDonor ||
+ migrationCertificates.recipientCertificateForDonor,
+ },
+ donorRst,
+ {retryOnRetryableErrors, shouldStopFunc: isMigrationCompleted});
}
/**
@@ -215,6 +191,36 @@ export async function tryAbortMigrationAsync(
return runTenantMigrationCommand(cmdObj, donorRst, {retryOnRetryableErrors});
}
+/**
+ * Runs the donorStartMigration command against the primary of the provided replica set. Will
+ * automatically assign the correct 'protocol' and 'tenantId'/'tenantIds' based on the provided
+ * 'protocol' and/or currently enabled feature flags.
+ */
+export function runDonorStartMigrationCommand(migrationOpts, rst, {
+ retryOnRetryableErrors = false,
+ shouldStopFunc = () => true,
+} = {}) {
+ // If we don't pass "protocol" and shard merge is enabled, we set the protocol to
+ // "shard merge". Otherwise, the provided protocol is used, which defaults to
+ // "multitenant migrations" if not provided.
+ const db = rst.getPrimary().getDB("admin");
+ const cmd = Object.assign({donorStartMigration: 1}, migrationOpts);
+ if (cmd["protocol"] === undefined && isShardMergeEnabled(db)) {
+ if (shouldUseMergeTenantIds(db)) {
+ cmd.tenantIds = cmd.tenantIds || [ObjectId(cmd.tenantId)];
+ }
+
+ delete cmd.tenantId;
+ cmd.protocol = kProtocolShardMerge;
+ } else if (cmd["protocol"] == kProtocolShardMerge) {
+ delete cmd.tenantId;
+ } else {
+ delete cmd.tenantIds;
+ }
+
+ return runTenantMigrationCommand(cmd, rst, {retryOnRetryableErrors, shouldStopFunc});
+}
+
/**
* Runs the given tenant migration command against the primary of the given replica set until
* the command succeeds or fails with a non-retryable error (if 'retryOnRetryableErrors' is
@@ -223,52 +229,37 @@ export async function tryAbortMigrationAsync(
export function runTenantMigrationCommand(cmdObj, rst, {
retryOnRetryableErrors = false,
shouldStopFunc = () => true,
- enableDonorStartMigrationFsync = false
} = {}) {
let primary = rst.getPrimary();
- let localCmdObj = cmdObj;
- let run = () => primary.adminCommand(localCmdObj);
- if (Object.keys(cmdObj)[0] === "donorStartMigration") {
- run = () => {
- const adminDB = primary.getDB("admin");
- localCmdObj = donorStartMigrationWithProtocol(cmdObj, adminDB);
- if (enableDonorStartMigrationFsync) {
- rst.awaitLastOpCommitted();
- assert.commandWorked(primary.adminCommand({fsync: 1}));
- }
- return primary.adminCommand(localCmdObj);
- };
- }
-
let res;
assert.soon(() => {
try {
// Note: assert.commandWorked() considers command responses with embedded
// writeErrors and WriteConcernErrors as a failure even if the command returned
- // "ok: 1". And, admin commands(like, donorStartMigration)
- // doesn't generate writeConcernErros or WriteErrors. So, it's safe to wrap up
- // run() with assert.commandWorked() here. However, in few scenarios, like
- // Mongo.prototype.recordRerouteDueToTenantMigration(), it's not safe to wrap up
- // run() with commandWorked() as retrying on retryable writeConcernErrors can
- // cause the retry attempt to fail with writeErrors.
+ // "ok: 1". And, admin commands(like, donorStartMigration) doesn't generate
+ // writeConcernErros or WriteErrors. So, it's safe to wrap the command invocation with
+ // assert.commandWorked() here. However, in few scenarios, like
+ // Mongo.prototype.recordRerouteDueToTenantMigration(), it's not safe to wrap the
+ // command invocation with commandWorked() as retrying on retryable writeConcernErrors
+ // can cause the retry attempt to fail with writeErrors.
res = undefined;
// In some tests we expects the command to fail due to a network error. We want to
// catch the error OR the unhandled exception here and return the error to the
// caller to assert on the result. Otherwise if this is not a network exception
// it will be caught in the outter catch and either be retried or thrown.
- res = executeNoThrowNetworkError(() => run());
+ res = executeNoThrowNetworkError(() => primary.adminCommand(cmdObj));
assert.commandWorked(res);
return shouldStopFunc(res);
} catch (e) {
if (retryOnRetryableErrors && isRetryableError(e)) {
jsTestLog(`Retryable error running runTenantMigrationCommand. Command: ${
- tojson(localCmdObj)}, Error: ${tojson(e)}`);
+ tojson(cmdObj)}, Error: ${tojson(e)}`);
primary = rst.getPrimary();
return false;
}
jsTestLog(`Error running runTenantMigrationCommand. Command: ${
- tojson(localCmdObj)}, Error: ${tojson(e)}`);
+ tojson(cmdObj)}, Error: ${tojson(e)}`);
// If res is defined, return true to exit assert.soon and return res to the caller.
// Otherwise rethrow e to propagate it to the caller.
diff --git a/jstests/replsets/localhostAuthBypass.js b/jstests/replsets/localhostAuthBypass.js
index cff02a251931e..3c2ce3cbeb00e 100644
--- a/jstests/replsets/localhostAuthBypass.js
+++ b/jstests/replsets/localhostAuthBypass.js
@@ -107,7 +107,7 @@ var authenticate = function(mongo) {
};
var start = function(useHostName) {
- var rs = new ReplSetTest(
+ const rs = new ReplSetTest(
{name: replSetName, nodes: 3, keyFile: keyfile, auth: "", useHostName: useHostName});
rs.startSet();
@@ -126,7 +126,7 @@ var runTest = function(useHostName) {
print("=====================");
print("starting replica set: useHostName=" + useHostName);
print("=====================");
- var rs = start(useHostName);
+ const rs = start(useHostName);
var port = rs.getPort(rs.getPrimary());
var host = "localhost:" + port;
var secHosts = [];
@@ -190,7 +190,7 @@ var runNonlocalTest = function(ipAddr) {
print("starting mongod: non-local host access " + ipAddr);
print("==========================");
- var rs = start(false);
+ const rs = start(false);
var port = rs.getPort(rs.getPrimary());
var host = ipAddr + ":" + port;
var secHosts = [];
diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js
index c034ac404d87e..274b198e48cd8 100644
--- a/jstests/replsets/maintenance.js
+++ b/jstests/replsets/maintenance.js
@@ -23,7 +23,7 @@ assert.soon(function() {
return conns[1].getDB("admin").hello().secondary;
});
-join =
+let join =
startParallelShell("db.getSiblingDB('bar').runCommand({compact : 'foo'});", replTest.ports[1]);
print("joining");
@@ -81,7 +81,7 @@ assert.eq(recv.errmsg, "node is recovering");
print("now getmore shouldn't work");
var ex = assert.throws(function() {
- lastDoc = null;
+ let lastDoc = null;
while (cursor.hasNext()) {
lastDoc = cursor.next();
}
diff --git a/jstests/replsets/no_chaining.js b/jstests/replsets/no_chaining.js
index fe5998395a0d9..baad3fc58afb6 100644
--- a/jstests/replsets/no_chaining.js
+++ b/jstests/replsets/no_chaining.js
@@ -66,4 +66,4 @@ if (!_isWindows()) {
assert.eq(false, config.settings.chainingAllowed, tojson(config));
}
-replTest.stopSet();
\ No newline at end of file
+replTest.stopSet();
diff --git a/jstests/replsets/noop_writes_in_bulk_writes_wait_for_write_concern.js b/jstests/replsets/noop_writes_in_bulk_writes_wait_for_write_concern.js
new file mode 100644
index 0000000000000..87881d2471a2e
--- /dev/null
+++ b/jstests/replsets/noop_writes_in_bulk_writes_wait_for_write_concern.js
@@ -0,0 +1,335 @@
+/**
+ * This file tests that if a user initiates a bulk write where the last write is a noop, either
+ * due to being a duplicate operation or due to an error based on data we read, that we
+ * still wait for write concern.
+ * The intended behavior for a no-op write is that we advance the repl client's last optime to the
+ * optime of the newest entry in the oplog (also referred as the "system optime"), and wait for
+ * write concern for that optime. This ensures that any writes we may have possibly read that caused
+ * the operation to be a noop have also been replicated. For all of these tests, the optime fixing
+ * behavior should be handled by LastOpFixer.
+ *
+ * @tags: [featureFlagBulkWriteCommand] // TODO SERVER-52419: Remove this tag.
+ */
+
+load("jstests/libs/parallel_shell_helpers.js");
+load("jstests/libs/fail_point_util.js");
+
+const name = jsTestName();
+const replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ nodeOptions: {
+ setParameter: {
+ // Prevent inserts from being batched together. This allows
+ // us to hang between consecutive insert operations without
+ // blocking the ones we already processed from executing.
+ internalInsertMaxBatchSize: 1,
+ }
+ }
+});
+replTest.startSet();
+replTest.initiate();
+
+const primary = replTest.getPrimary();
+const dbName = 'testDB';
+const testDB = primary.getDB(dbName);
+const collName = 'testColl';
+const coll = testDB[collName];
+
+function dropTestCollection() {
+ coll.drop();
+ assert.eq(0, coll.find().itcount(), "test collection not empty");
+}
+
+// Each entry in this array contains a bulkWrite command noop write case we want to test.
+// Entries have the following structure:
+// {
+//
+// bulkReq: